Skip to content

Implement a bucket per stream to prevent HOLB #31

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions frame.go
Original file line number Diff line number Diff line change
@@ -13,6 +13,7 @@ const ( // cmds
cmdSYN byte = iota // stream open
cmdFIN // stream close, a.k.a EOF mark
cmdPSH // data push
cmdACK // data ack
cmdNOP // no operation
)

14 changes: 7 additions & 7 deletions mux.go
Original file line number Diff line number Diff line change
@@ -21,18 +21,18 @@ type Config struct {
// frame size to sent to the remote
MaxFrameSize int

// MaxReceiveBuffer is used to control the maximum
// MaxPerStreamReceiveBuffer is used to control the maximum
// number of data in the buffer pool
MaxReceiveBuffer int
MaxPerStreamReceiveBuffer int
}

// DefaultConfig is used to return a default configuration
func DefaultConfig() *Config {
return &Config{
KeepAliveInterval: 10 * time.Second,
KeepAliveTimeout: 30 * time.Second,
MaxFrameSize: 4096,
MaxReceiveBuffer: 4194304,
KeepAliveInterval: 10 * time.Second,
KeepAliveTimeout: 30 * time.Second,
MaxFrameSize: 4096,
MaxPerStreamReceiveBuffer: 4194304,
}
}

@@ -50,7 +50,7 @@ func VerifyConfig(config *Config) error {
if config.MaxFrameSize > 65535 {
return errors.New("max frame size must not be larger than 65535")
}
if config.MaxReceiveBuffer <= 0 {
if config.MaxPerStreamReceiveBuffer <= 0 {
return errors.New("max receive buffer must be positive")
}
return nil
2 changes: 1 addition & 1 deletion mux_test.go
Original file line number Diff line number Diff line change
@@ -51,7 +51,7 @@ func TestConfig(t *testing.T) {
}

config = DefaultConfig()
config.MaxReceiveBuffer = 0
config.MaxPerStreamReceiveBuffer = 0
err = VerifyConfig(config)
t.Log(err)
if err == nil {
134 changes: 86 additions & 48 deletions session.go
Original file line number Diff line number Diff line change
@@ -8,6 +8,7 @@ import (
"time"

"github.com/pkg/errors"
"container/heap"
)

const (
@@ -21,15 +22,42 @@ const (
)

type writeRequest struct {
frame Frame
result chan writeResult
niceness uint8
sequence uint64 // Used to keep the heap ordered by time
frame Frame
result chan writeResult
}

type writeResult struct {
n int
err error
}

type writeHeap []writeRequest

func (h writeHeap) Len() int { return len(h) }
func (h writeHeap) Less(i, j int) bool {
if h[i].niceness == h[j].niceness {
return h[i].sequence < h[j].sequence
}
return h[i].niceness < h[j].niceness
}
func (h writeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }

func (h *writeHeap) Push(x interface{}) {
// Push and Pop use pointer receivers because they modify the slice's length,
// not just its contents.
*h = append(*h, x.(writeRequest))
}

func (h *writeHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}

// Session defines a multiplexed connection for streams
type Session struct {
conn io.ReadWriteCloser
@@ -38,9 +66,6 @@ type Session struct {
nextStreamID uint32 // next stream identifier
nextStreamIDLock sync.Mutex

bucket int32 // token bucket
bucketNotify chan struct{} // used for waiting for tokens

streams map[uint32]*Stream // all streams in this session
streamLock sync.Mutex // locks streams

@@ -54,7 +79,10 @@ type Session struct {

deadline atomic.Value

writes chan writeRequest
writeTicket chan struct{}
writesLock sync.Mutex
writes writeHeap
writeSequenceNum uint64
}

func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
@@ -64,9 +92,7 @@ func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
s.config = config
s.streams = make(map[uint32]*Stream)
s.chAccepts = make(chan *Stream, defaultAcceptBacklog)
s.bucket = int32(config.MaxReceiveBuffer)
s.bucketNotify = make(chan struct{}, 1)
s.writes = make(chan writeRequest)
s.writeTicket = make(chan struct{})

if client {
s.nextStreamID = 1
@@ -79,8 +105,12 @@ func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
return s
}

// OpenStream is used to create a new stream
func (s *Session) OpenStream() (*Stream, error) {
return s.OpenStreamOpt(100)
}

// OpenStream is used to create a new stream
func (s *Session) OpenStreamOpt(niceness uint8) (*Stream, error) {
if s.IsClosed() {
return nil, errors.New(errBrokenPipe)
}
@@ -101,9 +131,9 @@ func (s *Session) OpenStream() (*Stream, error) {
}
s.nextStreamIDLock.Unlock()

stream := newStream(sid, s.config.MaxFrameSize, s)
stream := newStream(sid, niceness, s.config.MaxFrameSize, int32(s.config.MaxPerStreamReceiveBuffer), s)

if _, err := s.writeFrame(newFrame(cmdSYN, sid)); err != nil {
if _, err := s.writeFrame(0, newFrame(cmdSYN, sid)); err != nil {
return nil, errors.Wrap(err, "writeFrame")
}

@@ -113,9 +143,13 @@ func (s *Session) OpenStream() (*Stream, error) {
return stream, nil
}

func (s *Session) AcceptStream() (*Stream, error) {
return s.AcceptStreamOpt(100)
}

// AcceptStream is used to block until the next available stream
// is ready to be accepted.
func (s *Session) AcceptStream() (*Stream, error) {
func (s *Session) AcceptStreamOpt(niceness uint8) (*Stream, error) {
var deadline <-chan time.Time
if d, ok := s.deadline.Load().(time.Time); ok && !d.IsZero() {
timer := time.NewTimer(time.Until(d))
@@ -124,6 +158,7 @@ func (s *Session) AcceptStream() (*Stream, error) {
}
select {
case stream := <-s.chAccepts:
stream.niceness = niceness
return stream, nil
case <-deadline:
return nil, errTimeout
@@ -148,19 +183,10 @@ func (s *Session) Close() (err error) {
s.streams[k].sessionClose()
}
s.streamLock.Unlock()
s.notifyBucket()
return s.conn.Close()
}
}

// notifyBucket notifies recvLoop that bucket is available
func (s *Session) notifyBucket() {
select {
case s.bucketNotify <- struct{}{}:
default:
}
}

// IsClosed does a safe check to see if we have shutdown
func (s *Session) IsClosed() bool {
select {
@@ -191,20 +217,15 @@ func (s *Session) SetDeadline(t time.Time) error {
// notify the session that a stream has closed
func (s *Session) streamClosed(sid uint32) {
s.streamLock.Lock()
if n := s.streams[sid].recycleTokens(); n > 0 { // return remaining tokens to the bucket
if atomic.AddInt32(&s.bucket, int32(n)) > 0 {
s.notifyBucket()
}
}
delete(s.streams, sid)
s.streamLock.Unlock()
}

// returnTokens is called by stream to return token after read
func (s *Session) returnTokens(n int) {
if atomic.AddInt32(&s.bucket, int32(n)) > 0 {
s.notifyBucket()
}
func (s *Session) queueAcks(streamId uint32, n int32) {
ack := newFrame(cmdACK, streamId)
ack.data = make([]byte, 4)
binary.BigEndian.PutUint32(ack.data, uint32(n))
s.queueFrame(0, ack)
}

// session read a frame from underlying connection
@@ -235,10 +256,6 @@ func (s *Session) readFrame(buffer []byte) (f Frame, err error) {
func (s *Session) recvLoop() {
buffer := make([]byte, (1<<16)+headerSize)
for {
for atomic.LoadInt32(&s.bucket) <= 0 && !s.IsClosed() {
<-s.bucketNotify
}

if f, err := s.readFrame(buffer); err == nil {
atomic.StoreInt32(&s.dataReady, 1)

@@ -247,7 +264,7 @@ func (s *Session) recvLoop() {
case cmdSYN:
s.streamLock.Lock()
if _, ok := s.streams[f.sid]; !ok {
stream := newStream(f.sid, s.config.MaxFrameSize, s)
stream := newStream(f.sid, 255, s.config.MaxFrameSize, int32(s.config.MaxPerStreamReceiveBuffer), s)
s.streams[f.sid] = stream
select {
case s.chAccepts <- stream:
@@ -265,11 +282,17 @@ func (s *Session) recvLoop() {
case cmdPSH:
s.streamLock.Lock()
if stream, ok := s.streams[f.sid]; ok {
atomic.AddInt32(&s.bucket, -int32(len(f.data)))
stream.pushBytes(f.data)
stream.notifyReadEvent()
}
s.streamLock.Unlock()
case cmdACK:
s.streamLock.Lock()
if stream, ok := s.streams[f.sid]; ok {
tokens := binary.BigEndian.Uint32(f.data)
stream.receiveAck(int32(tokens))
}
s.streamLock.Unlock()
default:
s.Close()
return
@@ -289,8 +312,7 @@ func (s *Session) keepalive() {
for {
select {
case <-tickerPing.C:
s.writeFrame(newFrame(cmdNOP, 0))
s.notifyBucket() // force a signal to the recvLoop
s.writeFrame(0, newFrame(cmdNOP, 0))
case <-tickerTimeout.C:
if !atomic.CompareAndSwapInt32(&s.dataReady, 1, 0) {
s.Close()
@@ -308,7 +330,11 @@ func (s *Session) sendLoop() {
select {
case <-s.die:
return
case request := <-s.writes:
case <-s.writeTicket:
s.writesLock.Lock()
request := heap.Pop(&s.writes).(writeRequest)
s.writesLock.Unlock()

buf[0] = request.frame.ver
buf[1] = request.frame.cmd
binary.LittleEndian.PutUint16(buf[2:], uint16(len(request.frame.data)))
@@ -332,19 +358,31 @@ func (s *Session) sendLoop() {
}
}

// writeFrame writes the frame to the underlying connection
// and returns the number of bytes written if successful
func (s *Session) writeFrame(f Frame) (n int, err error) {
func (s *Session) queueFrame(niceness uint8, f Frame) (writeRequest, error) {
req := writeRequest{
frame: f,
result: make(chan writeResult, 1),
niceness: niceness,
sequence: atomic.AddUint64(&s.writeSequenceNum, 1),
frame: f,
result: make(chan writeResult, 1),
}
s.writesLock.Lock()
heap.Push(&s.writes, req)
s.writesLock.Unlock()
select {
case <-s.die:
return 0, errors.New(errBrokenPipe)
case s.writes <- req:
return req, errors.New(errBrokenPipe)
case s.writeTicket <- struct{}{}:
}
return req, nil
}

// writeFrame writes the frame to the underlying connection
// and returns the number of bytes written if successful
func (s *Session) writeFrame(niceness uint8, f Frame) (n int, err error) {
req, err := s.queueFrame(niceness, f)
if err != nil {
return 0, err
}
result := <-req.result
return result.n, result.err
}
Loading