-
Notifications
You must be signed in to change notification settings - Fork 21
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
(boundedqueue): new semaphore implementation (#174)
This PR adds a new semaphore object to limit both on bytes and number of waiters. This should help with some memory issues in the collector where the receiver holds on to too much memory, either due to large/frequent requests or too many waiters blocked. Related to #173
- Loading branch information
1 parent
14c63d1
commit f4dcef6
Showing
6 changed files
with
374 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
# Admission Package | ||
|
||
## Overview | ||
|
||
The admission package provides a BoundedQueue object which is a semaphore implementation that limits the number of bytes admitted into a collector pipeline. Additionally the BoundedQueue limits the number of waiters that can block on a call to `bq.Acquire(sz int64)`. | ||
|
||
This package is an experiment to improve the behavior of Collector pipelines having their `exporterhelper` configured to apply backpressure. This package is meant to be used in receivers, via an interceptor or custom logic. Therefore, the BoundedQueue helps limit memory within the entire collector pipeline by limiting two dimensions that cause memory issues: | ||
1. bytes: large requests that enter the collector pipeline can require large allocations even if downstream components will eventually limit or ratelimit the request. | ||
2. waiters: limiting on bytes alone is not enough because requests that enter the pipeline and block on `bq.Acquire()` can still consume memory within the receiver. If there are enough waiters this can be a significant contribution to memory usage. | ||
|
||
## Usage | ||
|
||
Create a new BoundedQueue by calling `bq := admission.NewBoundedQueue(maxLimitBytes, maxLimitWaiters)` | ||
|
||
Within the component call `bq.Acquire(ctx, requestSize)` which will either | ||
1. succeed immediately if there is enough available memory | ||
2. fail immediately if there are too many waiters | ||
3. block until context cancelation or enough bytes becomes available | ||
|
||
Once a request has finished processing and is sent downstream call `bq.Release(requestSize)` to allow waiters to be admitted for processing. Release should only fail if releasing more bytes than previously acquired. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,150 @@ | ||
package admission | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"sync" | ||
|
||
"github.com/google/uuid" | ||
orderedmap "github.com/wk8/go-ordered-map/v2" | ||
) | ||
|
||
var ErrTooManyWaiters = fmt.Errorf("rejecting request, too many waiters") | ||
|
||
type BoundedQueue struct { | ||
maxLimitBytes int64 | ||
maxLimitWaiters int64 | ||
currentBytes int64 | ||
currentWaiters int64 | ||
lock sync.Mutex | ||
waiters *orderedmap.OrderedMap[uuid.UUID, waiter] | ||
} | ||
|
||
type waiter struct { | ||
readyCh chan struct{} | ||
pendingBytes int64 | ||
ID uuid.UUID | ||
} | ||
|
||
func NewBoundedQueue(maxLimitBytes, maxLimitWaiters int64) *BoundedQueue { | ||
return &BoundedQueue{ | ||
maxLimitBytes: maxLimitBytes, | ||
maxLimitWaiters: maxLimitWaiters, | ||
waiters: orderedmap.New[uuid.UUID, waiter](), | ||
} | ||
} | ||
|
||
func (bq *BoundedQueue) admit(pendingBytes int64) (bool, error) { | ||
bq.lock.Lock() | ||
defer bq.lock.Unlock() | ||
|
||
if pendingBytes > bq.maxLimitBytes { // will never succeed | ||
return false, fmt.Errorf("rejecting request, request size larger than configured limit") | ||
} | ||
|
||
if bq.currentBytes + pendingBytes <= bq.maxLimitBytes { // no need to wait to admit | ||
bq.currentBytes += pendingBytes | ||
return true, nil | ||
} | ||
|
||
// since we were unable to admit, check if we can wait. | ||
if bq.currentWaiters + 1 > bq.maxLimitWaiters { // too many waiters | ||
return false, ErrTooManyWaiters | ||
} | ||
|
||
// if we got to this point we need to wait to acquire bytes, so update currentWaiters before releasing mutex. | ||
bq.currentWaiters += 1 | ||
return false, nil | ||
} | ||
|
||
func (bq *BoundedQueue) Acquire(ctx context.Context, pendingBytes int64) error { | ||
success, err := bq.admit(pendingBytes) | ||
if err != nil || success { | ||
return err | ||
} | ||
|
||
// otherwise we need to wait for bytes to be released | ||
curWaiter := waiter{ | ||
pendingBytes: pendingBytes, | ||
readyCh: make(chan struct{}), | ||
} | ||
|
||
bq.lock.Lock() | ||
|
||
// generate unique key | ||
for { | ||
id := uuid.New() | ||
_, keyExists := bq.waiters.Get(id) | ||
if keyExists { | ||
continue | ||
} | ||
bq.waiters.Set(id, curWaiter) | ||
curWaiter.ID = id | ||
break | ||
} | ||
|
||
bq.lock.Unlock() | ||
|
||
select { | ||
case <-curWaiter.readyCh: | ||
return nil | ||
case <-ctx.Done(): | ||
// canceled before acquired so remove waiter. | ||
bq.lock.Lock() | ||
defer bq.lock.Unlock() | ||
err = fmt.Errorf("context canceled: %w ", ctx.Err()) | ||
|
||
_, found := bq.waiters.Delete(curWaiter.ID) | ||
if !found { | ||
return err | ||
} | ||
|
||
bq.currentWaiters -= 1 | ||
return err | ||
} | ||
} | ||
|
||
func (bq *BoundedQueue) Release(pendingBytes int64) error { | ||
bq.lock.Lock() | ||
defer bq.lock.Unlock() | ||
|
||
bq.currentBytes -= pendingBytes | ||
|
||
if bq.currentBytes < 0 { | ||
return fmt.Errorf("released more bytes than acquired") | ||
} | ||
|
||
for { | ||
if bq.waiters.Len() == 0 { | ||
return nil | ||
} | ||
next := bq.waiters.Oldest() | ||
nextWaiter := next.Value | ||
nextKey := next.Key | ||
if bq.currentBytes + nextWaiter.pendingBytes <= bq.maxLimitBytes { | ||
bq.currentBytes += nextWaiter.pendingBytes | ||
bq.currentWaiters -= 1 | ||
close(nextWaiter.readyCh) | ||
_, found := bq.waiters.Delete(nextKey) | ||
if !found { | ||
return fmt.Errorf("deleting waiter that doesn't exist") | ||
} | ||
continue | ||
|
||
} else { | ||
break | ||
} | ||
} | ||
|
||
return nil | ||
} | ||
|
||
func (bq *BoundedQueue) TryAcquire(pendingBytes int64) bool { | ||
bq.lock.Lock() | ||
defer bq.lock.Unlock() | ||
if bq.currentBytes + pendingBytes <= bq.maxLimitBytes { | ||
bq.currentBytes += pendingBytes | ||
return true | ||
} | ||
return false | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,186 @@ | ||
package admission | ||
|
||
|
||
import ( | ||
"context" | ||
"sync" | ||
"testing" | ||
"time" | ||
|
||
"github.com/stretchr/testify/assert" | ||
"github.com/stretchr/testify/require" | ||
"go.uber.org/multierr" | ||
) | ||
|
||
func min(x, y int64) int64 { | ||
if x <= y { | ||
return x | ||
} | ||
return y | ||
} | ||
|
||
func max(x, y int64) int64 { | ||
if x >= y { | ||
return x | ||
} | ||
return y | ||
} | ||
|
||
func abs(x int64) int64 { | ||
if x < 0 { | ||
return -x | ||
} | ||
return x | ||
} | ||
func TestAcquireSimpleNoWaiters(t *testing.T) { | ||
maxLimitBytes := 1000 | ||
maxLimitWaiters := 10 | ||
numRequests := 40 | ||
requestSize := 21 | ||
|
||
bq := NewBoundedQueue(int64(maxLimitBytes), int64(maxLimitWaiters)) | ||
|
||
ctx, _ := context.WithTimeout(context.Background(), 10 * time.Second) | ||
for i := 0; i < numRequests; i++ { | ||
go func() { | ||
err := bq.Acquire(ctx, int64(requestSize)) | ||
assert.NoError(t, err) | ||
}() | ||
} | ||
|
||
require.Never(t, func() bool { | ||
return bq.waiters.Len() > 0 | ||
}, 2*time.Second, 10*time.Millisecond) | ||
|
||
for i := 0; i < int(numRequests); i++ { | ||
assert.NoError(t, bq.Release(int64(requestSize))) | ||
assert.Equal(t, int64(0), bq.currentWaiters) | ||
} | ||
|
||
assert.ErrorContains(t, bq.Release(int64(1)), "released more bytes than acquired") | ||
assert.NoError(t, bq.Acquire(ctx, int64(maxLimitBytes))) | ||
} | ||
|
||
func TestAcquireBoundedWithWaiters(t *testing.T) { | ||
tests := []struct{ | ||
name string | ||
maxLimitBytes int64 | ||
maxLimitWaiters int64 | ||
numRequests int64 | ||
requestSize int64 | ||
timeout time.Duration | ||
}{ | ||
{ | ||
name: "below max waiters above max bytes", | ||
maxLimitBytes: 1000, | ||
maxLimitWaiters: 100, | ||
numRequests: 100, | ||
requestSize: 21, | ||
timeout: 5 * time.Second, | ||
}, | ||
{ | ||
name: "above max waiters above max bytes", | ||
maxLimitBytes: 1000, | ||
maxLimitWaiters: 100, | ||
numRequests: 200, | ||
requestSize: 21, | ||
timeout: 5 * time.Second, | ||
}, | ||
} | ||
for _, tt := range tests { | ||
t.Run(tt.name, func(t *testing.T) { | ||
bq := NewBoundedQueue(tt.maxLimitBytes, tt.maxLimitWaiters) | ||
var blockedRequests int64 | ||
numReqsUntilBlocked := tt.maxLimitBytes / tt.requestSize | ||
requestsAboveLimit := abs(tt.numRequests - numReqsUntilBlocked) | ||
tooManyWaiters := requestsAboveLimit > tt.maxLimitWaiters | ||
numRejected := max(requestsAboveLimit - tt.maxLimitWaiters, int64(0)) | ||
|
||
// There should never be more blocked requests than maxLimitWaiters. | ||
blockedRequests = min(tt.maxLimitWaiters, requestsAboveLimit) | ||
|
||
ctx, _ := context.WithTimeout(context.Background(), tt.timeout) | ||
var errs error | ||
for i := 0; i < int(tt.numRequests); i++ { | ||
go func() { | ||
err := bq.Acquire(ctx, tt.requestSize) | ||
bq.lock.Lock() | ||
defer bq.lock.Unlock() | ||
errs = multierr.Append(errs, err) | ||
}() | ||
} | ||
|
||
require.Eventually(t, func() bool { | ||
bq.lock.Lock() | ||
defer bq.lock.Unlock() | ||
return bq.waiters.Len() == int(blockedRequests) | ||
}, 3*time.Second, 10*time.Millisecond) | ||
|
||
|
||
assert.NoError(t, bq.Release(tt.requestSize)) | ||
assert.Equal(t, bq.waiters.Len(), int(blockedRequests)-1) | ||
|
||
for i := 0; i < int(tt.numRequests-numRejected)-1; i++ { | ||
assert.NoError(t, bq.Release(tt.requestSize)) | ||
} | ||
|
||
bq.lock.Lock() | ||
if tooManyWaiters { | ||
assert.ErrorContains(t, errs, ErrTooManyWaiters.Error()) | ||
} else { | ||
assert.NoError(t, errs) | ||
} | ||
bq.lock.Unlock() | ||
|
||
// confirm all bytes were released by acquiring maxLimitBytes. | ||
assert.True(t, bq.TryAcquire(tt.maxLimitBytes)) | ||
}) | ||
} | ||
} | ||
|
||
func TestAcquireContextCanceled(t *testing.T) { | ||
maxLimitBytes := 1000 | ||
maxLimitWaiters := 100 | ||
numRequests := 100 | ||
requestSize := 21 | ||
numReqsUntilBlocked := maxLimitBytes / requestSize | ||
requestsAboveLimit := abs(int64(numRequests) - int64(numReqsUntilBlocked)) | ||
|
||
blockedRequests := min(int64(maxLimitWaiters), int64(requestsAboveLimit)) | ||
|
||
bq := NewBoundedQueue(int64(maxLimitBytes), int64(maxLimitWaiters)) | ||
|
||
ctx, cancel := context.WithTimeout(context.Background(), 10 * time.Second) | ||
var errs error | ||
var wg sync.WaitGroup | ||
for i := 0; i < numRequests; i++ { | ||
wg.Add(1) | ||
go func() { | ||
err := bq.Acquire(ctx, int64(requestSize)) | ||
bq.lock.Lock() | ||
defer bq.lock.Unlock() | ||
errs = multierr.Append(errs, err) | ||
wg.Done() | ||
}() | ||
} | ||
|
||
// Wait until all calls to Acquire() happen and we have the expected number of waiters. | ||
require.Eventually(t, func() bool { | ||
bq.lock.Lock() | ||
defer bq.lock.Unlock() | ||
return bq.waiters.Len() == int(blockedRequests) | ||
}, 3*time.Second, 10*time.Millisecond) | ||
|
||
cancel() | ||
wg.Wait() | ||
assert.ErrorContains(t, errs, "context canceled") | ||
|
||
// Now all waiters should have returned and been removed. | ||
assert.Equal(t, 0, bq.waiters.Len()) | ||
|
||
for i := 0; i < numReqsUntilBlocked; i++ { | ||
assert.NoError(t, bq.Release(int64(requestSize))) | ||
assert.Equal(t, int64(0), bq.currentWaiters) | ||
} | ||
assert.True(t, bq.TryAcquire(int64(maxLimitBytes))) | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.