=e-1){var s=u[n];return s.x0=i,s.y0=o,s.x1=a,void(s.y1=c)}var l=f[n],h=r/2+l,d=n+1,p=e-1;for(;d>>1;f[g]c-o){var _=r?(i*v+a*y)/r:a;t(n,d,y,i,o,_,c),t(d,e,v,_,o,a,c)}else{var b=r?(o*v+c*y)/r:c;t(n,d,y,i,o,a,b),t(d,e,v,i,b,a,c)}}(0,c,t.value,n,e,r,i)},t.treemapDice=Ap,t.treemapResquarify=Lp,t.treemapSlice=Ip,t.treemapSliceDice=function(t,n,e,r,i){(1&t.depth?Ip:Ap)(t,n,e,r,i)},t.treemapSquarify=Yp,t.tsv=Mc,t.tsvFormat=lc,t.tsvFormatBody=hc,t.tsvFormatRow=pc,t.tsvFormatRows=dc,t.tsvFormatValue=gc,t.tsvParse=fc,t.tsvParseRows=sc,t.union=function(...t){const n=new InternSet;for(const e of t)for(const t of e)n.add(t);return n},t.unixDay=_y,t.unixDays=by,t.utcDay=yy,t.utcDays=vy,t.utcFriday=By,t.utcFridays=Vy,t.utcHour=hy,t.utcHours=dy,t.utcMillisecond=Wg,t.utcMilliseconds=Zg,t.utcMinute=cy,t.utcMinutes=fy,t.utcMonday=qy,t.utcMondays=jy,t.utcMonth=Qy,t.utcMonths=Jy,t.utcSaturday=Yy,t.utcSaturdays=Wy,t.utcSecond=iy,t.utcSeconds=oy,t.utcSunday=Fy,t.utcSundays=Ly,t.utcThursday=Oy,t.utcThursdays=Gy,t.utcTickInterval=av,t.utcTicks=ov,t.utcTuesday=Uy,t.utcTuesdays=Hy,t.utcWednesday=Iy,t.utcWednesdays=Xy,t.utcWeek=Fy,t.utcWeeks=Ly,t.utcYear=ev,t.utcYears=rv,t.variance=x,t.version="7.9.0",t.window=pn,t.xml=Sc,t.zip=function(){return gt(arguments)},t.zoom=function(){var t,n,e,r=Ew,i=Nw,o=zw,a=Cw,u=Pw,c=[0,1/0],f=[[-1/0,-1/0],[1/0,1/0]],s=250,l=ri,h=$t("start","zoom","end"),d=500,p=150,g=0,y=10;function v(t){t.property("__zoom",kw).on("wheel.zoom",T,{passive:!1}).on("mousedown.zoom",A).on("dblclick.zoom",S).filter(u).on("touchstart.zoom",E).on("touchmove.zoom",N).on("touchend.zoom touchcancel.zoom",k).style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function _(t,n){return(n=Math.max(c[0],Math.min(c[1],n)))===t.k?t:new ww(n,t.x,t.y)}function b(t,n,e){var r=n[0]-e[0]*t.k,i=n[1]-e[1]*t.k;return r===t.x&&i===t.y?t:new ww(t.k,r,i)}function m(t){return[(+t[0][0]+ +t[1][0])/2,(+t[0][1]+ +t[1][1])/2]}function x(t,n,e,r){t.on("start.zoom",(function(){w(this,arguments).event(r).start()})).on("interrupt.zoom end.zoom",(function(){w(this,arguments).event(r).end()})).tween("zoom",(function(){var t=this,o=arguments,a=w(t,o).event(r),u=i.apply(t,o),c=null==e?m(u):"function"==typeof e?e.apply(t,o):e,f=Math.max(u[1][0]-u[0][0],u[1][1]-u[0][1]),s=t.__zoom,h="function"==typeof n?n.apply(t,o):n,d=l(s.invert(c).concat(f/s.k),h.invert(c).concat(f/h.k));return function(t){if(1===t)t=h;else{var n=d(t),e=f/n[2];t=new ww(e,c[0]-n[0]*e,c[1]-n[1]*e)}a.zoom(null,t)}}))}function w(t,n,e){return!e&&t.__zooming||new M(t,n)}function M(t,n){this.that=t,this.args=n,this.active=0,this.sourceEvent=null,this.extent=i.apply(t,n),this.taps=0}function T(t,...n){if(r.apply(this,arguments)){var e=w(this,n).event(t),i=this.__zoom,u=Math.max(c[0],Math.min(c[1],i.k*Math.pow(2,a.apply(this,arguments)))),s=ne(t);if(e.wheel)e.mouse[0][0]===s[0]&&e.mouse[0][1]===s[1]||(e.mouse[1]=i.invert(e.mouse[0]=s)),clearTimeout(e.wheel);else{if(i.k===u)return;e.mouse=[s,i.invert(s)],Gi(this),e.start()}Sw(t),e.wheel=setTimeout((function(){e.wheel=null,e.end()}),p),e.zoom("mouse",o(b(_(i,u),e.mouse[0],e.mouse[1]),e.extent,f))}}function A(t,...n){if(!e&&r.apply(this,arguments)){var i=t.currentTarget,a=w(this,n,!0).event(t),u=Zn(t.view).on("mousemove.zoom",(function(t){if(Sw(t),!a.moved){var n=t.clientX-s,e=t.clientY-l;a.moved=n*n+e*e>g}a.event(t).zoom("mouse",o(b(a.that.__zoom,a.mouse[0]=ne(t,i),a.mouse[1]),a.extent,f))}),!0).on("mouseup.zoom",(function(t){u.on("mousemove.zoom mouseup.zoom",null),ue(t.view,a.moved),Sw(t),a.event(t).end()}),!0),c=ne(t,i),s=t.clientX,l=t.clientY;ae(t.view),Aw(t),a.mouse=[c,this.__zoom.invert(c)],Gi(this),a.start()}}function S(t,...n){if(r.apply(this,arguments)){var e=this.__zoom,a=ne(t.changedTouches?t.changedTouches[0]:t,this),u=e.invert(a),c=e.k*(t.shiftKey?.5:2),l=o(b(_(e,c),a,u),i.apply(this,n),f);Sw(t),s>0?Zn(this).transition().duration(s).call(x,l,a,t):Zn(this).call(v.transform,l,a,t)}}function E(e,...i){if(r.apply(this,arguments)){var o,a,u,c,f=e.touches,s=f.length,l=w(this,i,e.changedTouches.length===s).event(e);for(Aw(e),a=0;a
+
+
+
+
+
+
+ Speedometer Visualization
+
+
+
+
+
+
+
diff --git a/_examples/recovery_mode_cache/main.go b/_examples/recovery_mode_cache/main.go
new file mode 100644
index 00000000..512658b2
--- /dev/null
+++ b/_examples/recovery_mode_cache/main.go
@@ -0,0 +1,166 @@
+package main
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "os/signal"
+ "strconv"
+ "syscall"
+ "time"
+
+ _ "net/http/pprof"
+
+ "github.com/centrifugal/centrifuge"
+)
+
+var port = flag.Int("port", 8000, "Port to bind app to")
+
+func handleLog(e centrifuge.LogEntry) {
+ log.Printf("%s: %v", e.Message, e.Fields)
+}
+
+func authMiddleware(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ newCtx := centrifuge.SetCredentials(ctx, ¢rifuge.Credentials{
+ UserID: "",
+ })
+ r = r.WithContext(newCtx)
+ h.ServeHTTP(w, r)
+ })
+}
+
+func waitExitSignal(n *centrifuge.Node, s *http.Server) {
+ sigCh := make(chan os.Signal, 1)
+ done := make(chan bool, 1)
+ signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
+ go func() {
+ <-sigCh
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ _ = n.Shutdown(ctx)
+ _ = s.Shutdown(ctx)
+ done <- true
+ }()
+ <-done
+}
+
+const exampleChannel = "speed"
+
+// Check whether channel is allowed for subscribing. In real case permission
+// check will probably be more complex than in this example.
+func channelSubscribeAllowed(channel string) bool {
+ return channel == exampleChannel
+}
+
+func main() {
+ node, _ := centrifuge.New(centrifuge.Config{
+ LogLevel: centrifuge.LogLevelInfo,
+ LogHandler: handleLog,
+ HistoryMetaTTL: 24 * time.Hour,
+ })
+
+ node.OnConnect(func(client *centrifuge.Client) {
+ transport := client.Transport()
+ log.Printf("[user %s] connected via %s with protocol: %s", client.UserID(), transport.Name(), transport.Protocol())
+
+ client.OnSubscribe(func(e centrifuge.SubscribeEvent, cb centrifuge.SubscribeCallback) {
+ log.Printf("[user %s] subscribes on %s", client.UserID(), e.Channel)
+
+ if !channelSubscribeAllowed(e.Channel) {
+ cb(centrifuge.SubscribeReply{}, centrifuge.ErrorPermissionDenied)
+ return
+ }
+
+ cb(centrifuge.SubscribeReply{
+ Options: centrifuge.SubscribeOptions{
+ EnableRecovery: true,
+ RecoveryMode: centrifuge.RecoveryModeCache,
+ },
+ }, nil)
+ })
+
+ client.OnUnsubscribe(func(e centrifuge.UnsubscribeEvent) {
+ log.Printf("[user %s] unsubscribed from %s: %s", client.UserID(), e.Channel, e.Reason)
+ })
+
+ client.OnDisconnect(func(e centrifuge.DisconnectEvent) {
+ log.Printf("[user %s] disconnected: %s", client.UserID(), e.Reason)
+ })
+ })
+
+ if err := node.Run(); err != nil {
+ log.Fatal(err)
+ }
+
+ go func() {
+ const (
+ accelerationRate = 2.0 // Speed increment per 100 ms
+ brakingRate = 10.0 // Speed decrement per 100 ms
+ maxSpeed = 190.0
+ minSpeed = 50.0
+ )
+
+ speed := 0.0
+ increasing := true
+
+ ticker := time.NewTicker(100 * time.Millisecond)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if increasing {
+ speed += accelerationRate
+ if speed >= maxSpeed {
+ increasing = false
+ }
+ } else {
+ speed -= brakingRate
+ if speed <= minSpeed {
+ increasing = true
+ }
+ }
+ _, err := node.Publish(
+ exampleChannel,
+ []byte(`{"speed": `+fmt.Sprint(speed)+`}`),
+ centrifuge.WithHistory(1, time.Minute),
+ )
+ if err != nil {
+ log.Printf("error publishing to personal channel: %s", err)
+ }
+ }
+ }
+ }()
+
+ mux := http.DefaultServeMux
+
+ websocketHandler := centrifuge.NewWebsocketHandler(node, centrifuge.WebsocketConfig{
+ ReadBufferSize: 1024,
+ UseWriteBufferPool: true,
+ })
+ mux.Handle("/connection/websocket", authMiddleware(websocketHandler))
+ mux.Handle("/", http.FileServer(http.Dir("./")))
+
+ server := &http.Server{
+ Handler: mux,
+ Addr: "127.0.0.1:" + strconv.Itoa(*port),
+ ReadTimeout: 10 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ }
+
+ log.Print("Starting server, visit http://localhost:8000")
+ go func() {
+ if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
+ log.Fatal(err)
+ }
+ }()
+
+ waitExitSignal(node, server)
+ log.Println("bye!")
+}
diff --git a/_examples/recovery_mode_cache/readme.md b/_examples/recovery_mode_cache/readme.md
new file mode 100644
index 00000000..3f2912df
--- /dev/null
+++ b/_examples/recovery_mode_cache/readme.md
@@ -0,0 +1,7 @@
+Demonstration on using cache recovery mode.
+
+```
+go run main.go
+```
+
+Then go to http://localhost:8000
diff --git a/broker.go b/broker.go
index 1f40f4c4..348d6ebe 100644
--- a/broker.go
+++ b/broker.go
@@ -35,7 +35,7 @@ type ClientInfo struct {
// BrokerEventHandler can handle messages received from PUB/SUB system.
type BrokerEventHandler interface {
// HandlePublication to handle received Publications.
- HandlePublication(ch string, pub *Publication, sp StreamPosition) error
+ HandlePublication(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error
// HandleJoin to handle received Join messages.
HandleJoin(ch string, info *ClientInfo) error
// HandleLeave to handle received Leave messages.
@@ -112,6 +112,8 @@ type PublishOptions struct {
// with second precision, so don't set something less than one second here. By default,
// Centrifuge uses 5 minutes as idempotent result TTL.
IdempotentResultTTL time.Duration
+ // UseDelta enables using delta encoding for the publication.
+ UseDelta bool
}
// Broker is responsible for PUB/SUB mechanics.
diff --git a/broker_memory.go b/broker_memory.go
index d2f01625..f2097ea5 100644
--- a/broker_memory.go
+++ b/broker_memory.go
@@ -3,6 +3,7 @@ package centrifuge
import (
"container/heap"
"context"
+ "fmt"
"sync"
"time"
@@ -104,8 +105,11 @@ func (b *MemoryBroker) Publish(ch string, data []byte, opts PublishOptions) (Str
Info: opts.ClientInfo,
Tags: opts.Tags,
}
+ var prevPub *Publication
if opts.HistorySize > 0 && opts.HistoryTTL > 0 {
- streamTop, err := b.historyHub.add(ch, pub, opts)
+ var err error
+ var streamTop StreamPosition
+ streamTop, prevPub, err = b.historyHub.add(ch, pub, opts)
if err != nil {
return StreamPosition{}, false, err
}
@@ -117,7 +121,7 @@ func (b *MemoryBroker) Publish(ch string, data []byte, opts PublishOptions) (Str
}
b.saveResultToCache(ch, opts.IdempotencyKey, streamTop, resultExpireSeconds)
}
- return streamTop, false, b.eventHandler.HandlePublication(ch, pub, streamTop)
+ return streamTop, false, b.eventHandler.HandlePublication(ch, pub, streamTop, prevPub)
}
streamPosition := StreamPosition{}
if opts.IdempotencyKey != "" {
@@ -127,7 +131,7 @@ func (b *MemoryBroker) Publish(ch string, data []byte, opts PublishOptions) (Str
}
b.saveResultToCache(ch, opts.IdempotencyKey, streamPosition, resultExpireSeconds)
}
- return streamPosition, false, b.eventHandler.HandlePublication(ch, pub, StreamPosition{})
+ return streamPosition, false, b.eventHandler.HandlePublication(ch, pub, StreamPosition{}, prevPub)
}
func (b *MemoryBroker) getResultFromCache(ch string, key string) (StreamPosition, bool) {
@@ -239,6 +243,10 @@ func newHistoryHub(historyMetaTTL time.Duration, closeCh chan struct{}) *history
}
}
+func (h *historyHub) close() {
+ close(h.closeCh)
+}
+
func (h *historyHub) runCleanups() {
go h.expireStreams()
go h.removeStreams()
@@ -324,10 +332,24 @@ func (h *historyHub) expireStreams() {
}
}
-func (h *historyHub) add(ch string, pub *Publication, opts PublishOptions) (StreamPosition, error) {
+func (h *historyHub) add(ch string, pub *Publication, opts PublishOptions) (StreamPosition, *Publication, error) {
h.Lock()
defer h.Unlock()
+ var prevPub *Publication // May be nil is there were no previous publications.
+ if opts.UseDelta {
+ pubs, _, err := h.getLocked(ch, HistoryOptions{Filter: HistoryFilter{
+ Limit: 1,
+ Reverse: true,
+ }, MetaTTL: opts.HistoryMetaTTL})
+ if err != nil {
+ return StreamPosition{}, nil, fmt.Errorf("error getting previous publication from stream: %w", err)
+ }
+ if len(pubs) > 0 {
+ prevPub = pubs[0]
+ }
+ }
+
var offset uint64
var epoch string
@@ -367,7 +389,7 @@ func (h *historyHub) add(ch string, pub *Publication, opts PublishOptions) (Stre
}
pub.Offset = offset
- return StreamPosition{Offset: offset, Epoch: epoch}, nil
+ return StreamPosition{Offset: offset, Epoch: epoch}, prevPub, nil
}
// Lock must be held outside.
@@ -390,7 +412,11 @@ func getPosition(stream *memstream.Stream) StreamPosition {
func (h *historyHub) get(ch string, opts HistoryOptions) ([]*Publication, StreamPosition, error) {
h.Lock()
defer h.Unlock()
+ return h.getLocked(ch, opts)
+}
+// Lock must be held outside.
+func (h *historyHub) getLocked(ch string, opts HistoryOptions) ([]*Publication, StreamPosition, error) {
filter := opts.Filter
historyMetaTTL := opts.MetaTTL
diff --git a/broker_memory_test.go b/broker_memory_test.go
index aff3a46f..5f4725f7 100644
--- a/broker_memory_test.go
+++ b/broker_memory_test.go
@@ -141,7 +141,7 @@ func TestMemoryBrokerPublishIdempotent(t *testing.T) {
numPubs := 0
e.eventHandler = &testBrokerEventHandler{
- HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error {
+ HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error {
numPubs++
return nil
},
@@ -169,7 +169,7 @@ func TestMemoryBrokerPublishIdempotentWithHistory(t *testing.T) {
numPubs := 0
e.eventHandler = &testBrokerEventHandler{
- HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error {
+ HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error {
numPubs++
return nil
},
@@ -229,10 +229,10 @@ func TestMemoryHistoryHub(t *testing.T) {
ch1 := "channel1"
ch2 := "channel2"
pub := newTestPublication()
- _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second})
- _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second})
- _, _ = h.add(ch2, pub, PublishOptions{HistorySize: 2, HistoryTTL: time.Second})
- _, _ = h.add(ch2, pub, PublishOptions{HistorySize: 2, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch2, pub, PublishOptions{HistorySize: 2, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch2, pub, PublishOptions{HistorySize: 2, HistoryTTL: time.Second})
hist, _, err := h.get(ch1, HistoryOptions{
Filter: HistoryFilter{
@@ -271,10 +271,10 @@ func TestMemoryHistoryHub(t *testing.T) {
require.Equal(t, 0, len(hist))
// test history messages limit
- _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 10, HistoryTTL: time.Second})
- _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 10, HistoryTTL: time.Second})
- _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 10, HistoryTTL: time.Second})
- _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 10, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 10, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 10, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 10, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 10, HistoryTTL: time.Second})
hist, _, err = h.get(ch1, HistoryOptions{
Filter: HistoryFilter{
Limit: -1,
@@ -291,8 +291,8 @@ func TestMemoryHistoryHub(t *testing.T) {
require.Equal(t, 1, len(hist))
// test history limit greater than history size
- _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second})
- _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second})
hist, _, err = h.get(ch1, HistoryOptions{
Filter: HistoryFilter{
Limit: 2,
@@ -312,10 +312,10 @@ func TestMemoryHistoryHubMetaTTL(t *testing.T) {
h.RLock()
require.Equal(t, int64(0), h.nextRemoveCheck)
h.RUnlock()
- _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second})
- _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second})
- _, _ = h.add(ch2, pub, PublishOptions{HistorySize: 2, HistoryTTL: time.Second})
- _, _ = h.add(ch2, pub, PublishOptions{HistorySize: 2, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch2, pub, PublishOptions{HistorySize: 2, HistoryTTL: time.Second})
+ _, _, _ = h.add(ch2, pub, PublishOptions{HistorySize: 2, HistoryTTL: time.Second})
h.RLock()
require.True(t, h.nextRemoveCheck > 0)
require.Equal(t, 2, len(h.streams))
@@ -350,10 +350,10 @@ func TestMemoryHistoryHubMetaTTLPerChannel(t *testing.T) {
h.RLock()
require.Equal(t, int64(0), h.nextRemoveCheck)
h.RUnlock()
- _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second, HistoryMetaTTL: time.Second})
- _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second, HistoryMetaTTL: time.Second})
- _, _ = h.add(ch2, pub, PublishOptions{HistorySize: 2, HistoryTTL: time.Second, HistoryMetaTTL: time.Second})
- _, _ = h.add(ch2, pub, PublishOptions{HistorySize: 2, HistoryTTL: time.Second, HistoryMetaTTL: time.Second})
+ _, _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second, HistoryMetaTTL: time.Second})
+ _, _, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second, HistoryMetaTTL: time.Second})
+ _, _, _ = h.add(ch2, pub, PublishOptions{HistorySize: 2, HistoryTTL: time.Second, HistoryMetaTTL: time.Second})
+ _, _, _ = h.add(ch2, pub, PublishOptions{HistorySize: 2, HistoryTTL: time.Second, HistoryMetaTTL: time.Second})
h.RLock()
require.True(t, h.nextRemoveCheck > 0)
require.Equal(t, 2, len(h.streams))
@@ -545,19 +545,33 @@ type recoverTest struct {
Sleep int
Limit int
Recovered bool
+ RecoveryMode RecoveryMode
}
var clientRecoverTests = []recoverTest{
- {"empty_stream", 10, 60, 0, 0, 0, 0, 0, true},
- {"from_position", 10, 60, 10, 8, 2, 0, 0, true},
- {"from_position_limited", 10, 60, 10, 5, 0, 0, 2, false},
- {"from_position_with_server_limit", 10, 60, 10, 5, 0, 0, 1, false},
- {"from_position_that_already_gone", 10, 60, 20, 8, 0, 0, 0, false},
- {"from_position_that_not_exist_yet", 10, 60, 20, 108, 0, 0, 0, false},
- {"same_position_no_pubs_expected", 10, 60, 7, 7, 0, 0, 0, true},
- {"empty_position_recover_expected", 10, 60, 4, 0, 4, 0, 0, true},
- {"from_position_in_expired_stream", 10, 1, 10, 8, 0, 3, 0, false},
- {"from_same_position_in_expired_stream", 10, 1, 1, 1, 0, 3, 0, true},
+ {"empty_stream", 10, 60, 0, 0, 0, 0, 0, true, RecoveryModeStream},
+ {"from_position", 10, 60, 10, 8, 2, 0, 0, true, RecoveryModeStream},
+ {"from_position_limited", 10, 60, 10, 5, 0, 0, 2, false, RecoveryModeStream},
+ {"from_position_with_server_limit", 10, 60, 10, 5, 0, 0, 1, false, RecoveryModeStream},
+ {"from_position_that_already_gone", 10, 60, 20, 8, 0, 0, 0, false, RecoveryModeStream},
+ {"from_position_that_not_exist_yet", 10, 60, 20, 108, 0, 0, 0, false, RecoveryModeStream},
+ {"same_position_no_pubs_expected", 10, 60, 7, 7, 0, 0, 0, true, RecoveryModeStream},
+ {"empty_position_recover_expected", 10, 60, 4, 0, 4, 0, 0, true, RecoveryModeStream},
+ {"from_position_in_expired_stream", 10, 1, 10, 8, 0, 3, 0, false, RecoveryModeStream},
+ {"from_same_position_in_expired_stream", 10, 1, 1, 1, 0, 3, 0, true, RecoveryModeStream},
+ {"from_same_position_in_expired_stream", 10, 1, 1, 1, 0, 3, 0, true, RecoveryModeStream},
+
+ {"cache_empty_stream", 10, 60, 0, 0, 0, 0, 0, false, RecoveryModeCache},
+ {"cache_from_position", 10, 60, 10, 8, 1, 0, 0, true, RecoveryModeCache},
+ {"cache_from_position_limited", 10, 60, 10, 5, 1, 0, 2, true, RecoveryModeCache},
+ {"cache_from_position_with_server_limit", 10, 60, 10, 5, 1, 0, 1, true, RecoveryModeCache},
+ {"cache_from_position_that_already_gone", 10, 60, 20, 8, 1, 0, 0, true, RecoveryModeCache},
+ {"cache_from_position_that_not_exist_yet", 10, 60, 20, 108, 1, 0, 0, true, RecoveryModeCache},
+ {"cache_same_position_no_pubs_expected", 10, 60, 7, 7, 0, 0, 0, true, RecoveryModeCache},
+ {"cache_empty_position_recover_expected", 10, 60, 4, 0, 1, 0, 0, true, RecoveryModeCache},
+ {"cache_from_position_in_expired_stream", 10, 1, 10, 8, 0, 3, 0, false, RecoveryModeCache},
+ {"cache_from_same_position_in_expired_stream", 10, 1, 1, 1, 0, 3, 0, true, RecoveryModeCache},
+ {"cache_from_same_position_in_expired_stream", 10, 1, 1, 1, 0, 3, 0, true, RecoveryModeCache},
}
type recoverTestChannel struct {
@@ -576,7 +590,7 @@ func TestClientSubscribeRecover(t *testing.T) {
node.config.RecoveryMaxPublicationLimit = tt.Limit
node.OnConnect(func(client *Client) {
client.OnSubscribe(func(event SubscribeEvent, cb SubscribeCallback) {
- opts := SubscribeOptions{EnableRecovery: true}
+ opts := SubscribeOptions{EnableRecovery: true, RecoveryMode: tt.RecoveryMode}
cb(SubscribeReply{Options: opts}, nil)
})
})
@@ -615,8 +629,8 @@ func TestClientSubscribeRecover(t *testing.T) {
require.Nil(t, disconnect)
require.Nil(t, rwWrapper.replies[0].Error)
res := extractSubscribeResult(rwWrapper.replies)
- require.Equal(t, tt.NumRecovered, len(res.Publications))
require.Equal(t, tt.Recovered, res.Recovered)
+ require.Equal(t, tt.NumRecovered, len(res.Publications))
if len(res.Publications) > 1 {
require.True(t, res.Publications[0].Offset < res.Publications[1].Offset)
}
@@ -754,3 +768,19 @@ func BenchmarkMemoryBrokerHistoryIteration(b *testing.B) {
it.testHistoryIteration(b, e.node, startPosition)
}
}
+
+func TestMemoryHistoryHubPrevPub(t *testing.T) {
+ t.Parallel()
+ h := newHistoryHub(0, make(chan struct{}))
+ h.runCleanups()
+ h.RLock()
+ require.Equal(t, 0, len(h.streams))
+ h.RUnlock()
+ defer h.close()
+ ch1 := "channel1"
+ pub := newTestPublication()
+ _, prevPub, _ := h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second, UseDelta: true})
+ require.Nil(t, prevPub)
+ _, prevPub, _ = h.add(ch1, pub, PublishOptions{HistorySize: 1, HistoryTTL: time.Second, UseDelta: true})
+ require.NotNil(t, prevPub)
+}
diff --git a/broker_redis.go b/broker_redis.go
index 1457b816..6e3adb5b 100644
--- a/broker_redis.go
+++ b/broker_redis.go
@@ -710,6 +710,11 @@ func (b *RedisBroker) publish(s *shardWrapper, ch string, data []byte, opts Publ
script = b.addHistoryStreamScript
}
+ var useDelta string
+ if opts.UseDelta {
+ useDelta = "1"
+ }
+
replies, err := script.Exec(
context.Background(),
s.shard.client,
@@ -723,6 +728,7 @@ func (b *RedisBroker) publish(s *shardWrapper, ch string, data []byte, opts Publ
strconv.FormatInt(time.Now().Unix(), 10),
publishCommand,
resultExpire,
+ useDelta,
},
).ToArray()
if err != nil {
@@ -996,7 +1002,7 @@ var (
)
func (b *RedisBroker) handleRedisClientMessage(eventHandler BrokerEventHandler, chID channelID, data []byte) error {
- pushData, pushType, sp, ok := extractPushData(data)
+ pushData, pushType, sp, delta, prevPayload, ok := extractPushData(data)
if !ok {
return fmt.Errorf("malformed PUB/SUB data: %s", data)
}
@@ -1013,7 +1019,16 @@ func (b *RedisBroker) handleRedisClientMessage(eventHandler BrokerEventHandler,
// it to unmarshalled Publication.
pub.Offset = sp.Offset
}
- _ = eventHandler.HandlePublication(channel, pubFromProto(&pub), sp)
+ if delta && len(prevPayload) > 0 {
+ var prevPub protocol.Publication
+ err = prevPub.UnmarshalVT(prevPayload)
+ if err != nil {
+ return err
+ }
+ _ = eventHandler.HandlePublication(channel, pubFromProto(&pub), sp, pubFromProto(&prevPub))
+ } else {
+ _ = eventHandler.HandlePublication(channel, pubFromProto(&pub), sp, nil)
+ }
} else if pushType == joinPushType {
var info protocol.ClientInfo
err := info.UnmarshalVT(pushData)
@@ -1199,7 +1214,7 @@ func (b *RedisBroker) historyList(s *RedisShard, ch string, filter HistoryFilter
return nil, StreamPosition{}, errors.New("error getting value")
}
- pushData, _, sp, ok := extractPushData(convert.StringToBytes(value))
+ pushData, _, sp, _, _, ok := extractPushData(convert.StringToBytes(value))
if !ok {
return nil, StreamPosition{}, fmt.Errorf("malformed publication value: %s", value)
}
@@ -1281,45 +1296,149 @@ var (
)
// See tests for supported format examples.
-func extractPushData(data []byte) ([]byte, pushType, StreamPosition, bool) {
+func extractPushData(data []byte) ([]byte, pushType, StreamPosition, bool, []byte, bool) {
var offset uint64
var epoch string
if !bytes.HasPrefix(data, metaSep) {
- return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, true
+ return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, true
}
- nextMetaSepPos := bytes.Index(data[len(metaSep):], metaSep)
- if nextMetaSepPos <= 0 {
- return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false
+
+ content := data[len(metaSep):]
+ if len(content) == 0 {
+ return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false
}
- content := data[len(metaSep) : len(metaSep)+nextMetaSepPos]
- contentType := content[0]
- rest := data[len(metaSep)+nextMetaSepPos+len(metaSep):]
+ contentType := content[0]
switch contentType {
case 'j':
- return rest, joinPushType, StreamPosition{}, true
+ // __j__payload.
+ nextMetaSepPos := bytes.Index(data[len(metaSep):], metaSep)
+ if nextMetaSepPos <= 0 {
+ return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false
+ }
+ rest := data[len(metaSep)+nextMetaSepPos+len(metaSep):]
+ return rest, joinPushType, StreamPosition{}, false, nil, true
case 'l':
- return rest, leavePushType, StreamPosition{}, true
- }
+ // __l__payload.
+ nextMetaSepPos := bytes.Index(data[len(metaSep):], metaSep)
+ if nextMetaSepPos <= 0 {
+ return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false
+ }
+ rest := data[len(metaSep)+nextMetaSepPos+len(metaSep):]
+ return rest, leavePushType, StreamPosition{}, false, nil, true
+ case 'p':
+ // p1:offset:epoch__payload
+ nextMetaSepPos := bytes.Index(data[len(metaSep):], metaSep)
+ if nextMetaSepPos <= 0 {
+ return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false
+ }
+ header := data[len(metaSep) : len(metaSep)+nextMetaSepPos]
+ stringHeader := convert.BytesToString(header)
- stringContent := convert.BytesToString(content)
+ rest := data[len(metaSep)+nextMetaSepPos+len(metaSep):]
- if contentType == 'p' {
- // new format p1:offset:epoch
- stringContent = stringContent[3:] // offset:epoch
- epochDelimiterPos := strings.Index(stringContent, contentSep)
+ stringHeader = stringHeader[3:] // offset:epoch
+ epochDelimiterPos := strings.Index(stringHeader, contentSep)
if epochDelimiterPos <= 0 {
- return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false
+ return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false
}
var err error
- offset, err = strconv.ParseUint(stringContent[:epochDelimiterPos], 10, 64)
- epoch = stringContent[epochDelimiterPos+1:]
- return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, err == nil
+ offset, err = strconv.ParseUint(stringHeader[:epochDelimiterPos], 10, 64)
+ epoch = stringHeader[epochDelimiterPos+1:]
+ return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, err == nil
+ case 'd':
+ // d1:offset:epoch:prev_payload_length:prev_payload:payload_length:payload
+ stringContent := convert.BytesToString(content)
+ parsedDelta, err := parseDeltaPush(stringContent)
+ return convert.StringToBytes(parsedDelta.Payload), pubPushType, StreamPosition{Epoch: parsedDelta.Epoch, Offset: parsedDelta.Offset}, true, convert.StringToBytes(parsedDelta.PrevPayload), err == nil
+ default:
+ // Unknown content type.
+ return nil, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false
}
+}
- // old format with offset only: __offset__
- var err error
- offset, err = strconv.ParseUint(stringContent, 10, 64)
- return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, err == nil
+type deltaPublicationPush struct {
+ Offset uint64
+ Epoch string
+ PrevPayloadLength int
+ PrevPayload string
+ PayloadLength int
+ Payload string
+}
+
+func parseDeltaPush(input string) (*deltaPublicationPush, error) {
+ // d1:offset:epoch:prev_payload_length:prev_payload:payload_length:payload
+ const prefix = "d1:"
+ if !strings.HasPrefix(input, prefix) {
+ return nil, fmt.Errorf("input does not start with the expected prefix")
+ }
+ input = input[len(prefix):] // Remove prefix
+
+ // offset:epoch:prev_payload_length:prev_payload:payload_length:payload
+
+ idx := strings.IndexByte(input, ':')
+ if idx == -1 {
+ return nil, fmt.Errorf("invalid format, missing offset")
+ }
+ offset, err := strconv.ParseUint(input[:idx], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing offset: %v", err)
+ }
+ input = input[idx+1:]
+
+ // epoch:prev_payload_length:prev_payload:payload_length:payload
+
+ idx = strings.IndexByte(input, ':')
+ if idx == -1 {
+ return nil, fmt.Errorf("invalid format, missing epoch")
+ }
+ epoch := input[:idx]
+ input = input[idx+1:]
+
+ // prev_payload_length:prev_payload:payload_length:payload
+
+ idx = strings.IndexByte(input, ':')
+ if idx == -1 {
+ return nil, fmt.Errorf("invalid format, missing prev payload length")
+ }
+ prevPayloadLength, err := strconv.Atoi(input[:idx])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing prev payload length: %v", err)
+ }
+
+ input = input[idx+1:]
+
+ // Extract prev_payload based on prev_payload_length
+ if len(input) < prevPayloadLength {
+ return nil, fmt.Errorf("input is shorter than expected prev payload length")
+ }
+ prevPayload := input[:prevPayloadLength]
+ input = input[prevPayloadLength+1:]
+
+ // payload_length:payload
+ idx = strings.IndexByte(input, ':')
+ if idx == -1 {
+ return nil, fmt.Errorf("invalid format, missing payload")
+ }
+ payloadLength, err := strconv.Atoi(input[:idx])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing payload_length: %v", err)
+ }
+ input = input[idx+1:]
+
+ // Extract payload based on payload_length
+ if len(input) < payloadLength {
+ return nil, fmt.Errorf("input is shorter than expected payload length")
+ }
+ payload := input[:payloadLength]
+
+ return &deltaPublicationPush{
+ Offset: offset,
+ Epoch: epoch,
+ PrevPayloadLength: prevPayloadLength,
+ PrevPayload: prevPayload,
+ PayloadLength: payloadLength,
+ Payload: payload,
+ }, nil
}
diff --git a/broker_redis_test.go b/broker_redis_test.go
index f805c6e7..5e82d3bf 100644
--- a/broker_redis_test.go
+++ b/broker_redis_test.go
@@ -16,6 +16,7 @@ import (
"time"
"github.com/centrifugal/protocol"
+ "github.com/google/uuid"
"github.com/stretchr/testify/require"
)
@@ -684,7 +685,7 @@ func TestRedisBrokerHandlePubSubMessage(t *testing.T) {
b := NewTestRedisBroker(t, node, getUniquePrefix(), false)
defer func() { _ = node.Shutdown(context.Background()) }()
defer stopRedisBroker(b)
- err := b.handleRedisClientMessage(&testBrokerEventHandler{HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error {
+ err := b.handleRedisClientMessage(&testBrokerEventHandler{HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error {
require.Equal(t, "test", ch)
require.Equal(t, uint64(16901), sp.Offset)
require.Equal(t, "xyz", sp.Epoch)
@@ -692,7 +693,7 @@ func TestRedisBrokerHandlePubSubMessage(t *testing.T) {
}}, b.messageChannelID(b.shards[0].shard, "test"), []byte("__p1:16901:xyz__dsdsd"))
require.Error(t, err)
- err = b.handleRedisClientMessage(&testBrokerEventHandler{HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error {
+ err = b.handleRedisClientMessage(&testBrokerEventHandler{HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error {
return nil
}}, b.messageChannelID(b.shards[0].shard, "test"), []byte("__p1:16901"))
require.Error(t, err)
@@ -703,7 +704,7 @@ func TestRedisBrokerHandlePubSubMessage(t *testing.T) {
data, err := pub.MarshalVT()
require.NoError(t, err)
var publicationHandlerCalled bool
- err = b.handleRedisClientMessage(&testBrokerEventHandler{HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error {
+ err = b.handleRedisClientMessage(&testBrokerEventHandler{HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error {
publicationHandlerCalled = true
require.Equal(t, "test", ch)
require.Equal(t, uint64(16901), sp.Offset)
@@ -744,7 +745,7 @@ func BenchmarkRedisExtractPushData(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
- _, _, sp, ok := extractPushData(data)
+ _, _, sp, _, _, ok := extractPushData(data)
if !ok {
b.Fatal("wrong data")
}
@@ -759,55 +760,40 @@ func BenchmarkRedisExtractPushData(b *testing.B) {
func TestRedisExtractPushData(t *testing.T) {
data := []byte(`__p1:16901:xyz.123__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`)
- pushData, pushType, sp, ok := extractPushData(data)
+ pushData, pushType, sp, _, _, ok := extractPushData(data)
require.True(t, ok)
require.Equal(t, pubPushType, pushType)
require.Equal(t, uint64(16901), sp.Offset)
require.Equal(t, "xyz.123", sp.Epoch)
require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData)
- data = []byte(`__16901__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`)
- pushData, pushType, sp, ok = extractPushData(data)
- require.True(t, ok)
- require.Equal(t, pubPushType, pushType)
- require.Equal(t, uint64(16901), sp.Offset)
- require.Equal(t, "", sp.Epoch)
- require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData)
-
data = []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`)
- pushData, pushType, sp, ok = extractPushData(data)
+ pushData, pushType, sp, _, _, ok = extractPushData(data)
require.True(t, ok)
require.Equal(t, pubPushType, pushType)
require.Equal(t, uint64(0), sp.Offset)
require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData)
- data = []byte(`__4294967337__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`)
- pushData, pushType, sp, ok = extractPushData(data)
- require.True(t, ok)
- require.Equal(t, pubPushType, pushType)
- require.Equal(t, uint64(4294967337), sp.Offset)
- require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData)
-
data = []byte(`__j__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`)
- pushData, pushType, sp, ok = extractPushData(data)
+ pushData, pushType, sp, _, _, ok = extractPushData(data)
require.True(t, ok)
require.Equal(t, joinPushType, pushType)
require.Equal(t, uint64(0), sp.Offset)
require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData)
data = []byte(`__l__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`)
- pushData, pushType, sp, ok = extractPushData(data)
+ pushData, pushType, sp, _, _, ok = extractPushData(data)
require.True(t, ok)
require.Equal(t, leavePushType, pushType)
require.Equal(t, uint64(0), sp.Offset)
require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData)
data = []byte(`____\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`)
- _, _, _, ok = extractPushData(data)
+ _, _, _, _, _, ok = extractPushData(data)
require.False(t, ok)
data = []byte(`__a__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`)
- _, _, _, ok = extractPushData(data)
+ _, _, _, _, _, ok = extractPushData(data)
require.False(t, ok)
}
@@ -973,7 +959,7 @@ func TestRedisPubSubTwoNodes(t *testing.T) {
HandleControlFunc: func(bytes []byte) error {
return nil
},
- HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error {
+ HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error {
c := atomic.AddInt64(&numPublications, 1)
if c == int64(msgNum) {
close(pubCh)
@@ -1040,6 +1026,96 @@ func TestRedisPubSubTwoNodes(t *testing.T) {
}
}
+type testDeltaPublishHandle struct {
+ ch string
+ pub *Publication
+ sp StreamPosition
+ prevPub *Publication
+}
+
+func TestRedisPubSubTwoNodesWithDelta(t *testing.T) {
+ redisConf := testRedisConf()
+
+ prefix := getUniquePrefix()
+
+ ch := "test" + uuid.NewString()
+
+ node1, _ := New(Config{})
+ s, err := NewRedisShard(node1, redisConf)
+ require.NoError(t, err)
+ b1, _ := NewRedisBroker(node1, RedisBrokerConfig{
+ Prefix: prefix,
+ Shards: []*RedisShard{s},
+ numPubSubSubscribers: 4,
+ numPubSubProcessors: 2,
+ })
+ node1.SetBroker(b1)
+ defer func() { _ = node1.Shutdown(context.Background()) }()
+ defer stopRedisBroker(b1)
+
+ msgNum := 2
+ var numPublications int64
+ pubCh := make(chan struct{})
+ var resultsMu sync.Mutex
+ results := make([]testDeltaPublishHandle, 0, msgNum)
+
+ brokerEventHandler := &testBrokerEventHandler{
+ HandleControlFunc: func(bytes []byte) error {
+ return nil
+ },
+ HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error {
+ resultsMu.Lock()
+ defer resultsMu.Unlock()
+ results = append(results, testDeltaPublishHandle{
+ ch: ch,
+ pub: pub,
+ sp: sp,
+ prevPub: prevPub,
+ })
+ c := atomic.AddInt64(&numPublications, 1)
+ if c == int64(msgNum) {
+ close(pubCh)
+ }
+ return nil
+ },
+ }
+ _ = b1.Run(brokerEventHandler)
+
+ require.NoError(t, b1.Subscribe(ch))
+
+ node2, _ := New(Config{})
+ s2, err := NewRedisShard(node2, redisConf)
+ require.NoError(t, err)
+
+ b2, _ := NewRedisBroker(node2, RedisBrokerConfig{
+ Prefix: prefix,
+ Shards: []*RedisShard{s2},
+ })
+ node2.SetBroker(b2)
+ _ = node2.Run()
+ defer func() { _ = node2.Shutdown(context.Background()) }()
+ defer stopRedisBroker(b2)
+
+ for i := 0; i < msgNum; i++ {
+ sp, err := node2.Publish(ch, []byte("123"),
+ WithHistory(1, time.Minute), WithDelta(true))
+ require.NoError(t, err)
+ require.Equal(t, sp.Offset, uint64(i+1))
+ }
+
+ select {
+ case <-pubCh:
+ case <-time.After(time.Second):
+ require.Fail(t, "timeout waiting for PUB/SUB message")
+ }
+
+ resultsMu.Lock()
+ defer resultsMu.Unlock()
+ require.Len(t, results, msgNum)
+ require.Nil(t, results[0].prevPub)
+ require.NotNil(t, results[1].prevPub)
+}
+
func TestRedisClusterShardedPubSub(t *testing.T) {
redisConf := RedisShardConfig{
ClusterAddresses: []string{"localhost:7000", "localhost:7001", "localhost:7002"},
@@ -1080,7 +1156,7 @@ func TestRedisClusterShardedPubSub(t *testing.T) {
HandleControlFunc: func(bytes []byte) error {
return nil
},
- HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error {
+ HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error {
c := atomic.AddInt64(&numPublications, 1)
if c == int64(msgNum) {
close(pubCh)
@@ -1536,22 +1612,22 @@ func testRedisClientSubscribeRecover(t *testing.T, tt recoverTest, useStreams bo
historyResult, err := node.recoverHistory(channel, StreamPosition{tt.SinceOffset, streamTop.Epoch}, 0)
require.NoError(t, err)
- recoveredPubs, recovered := isRecovered(historyResult, tt.SinceOffset, streamTop.Epoch)
+ recoveredPubs, recovered := isStreamRecovered(historyResult, tt.SinceOffset, streamTop.Epoch)
require.Equal(t, tt.NumRecovered, len(recoveredPubs))
require.Equal(t, tt.Recovered, recovered)
}
var brokerRecoverTests = []recoverTest{
- {"empty_stream", 10, 60, 0, 0, 0, 0, 0, true},
- {"from_position", 10, 60, 10, 8, 2, 0, 0, true},
- {"from_position_limited", 10, 60, 10, 5, 2, 0, 2, false},
- {"from_position_with_server_limit", 10, 60, 10, 5, 1, 0, 1, false},
- {"from_position_that_already_gone", 10, 60, 20, 8, 10, 0, 0, false},
- {"from_position_that_not_exist_yet", 10, 60, 20, 108, 0, 0, 0, false},
- {"same_position_no_pubs_expected", 10, 60, 7, 7, 0, 0, 0, true},
- {"empty_position_recover_expected", 10, 60, 4, 0, 4, 0, 0, true},
- {"from_position_in_expired_stream", 10, 1, 10, 8, 0, 3, 0, false},
- {"from_same_position_in_expired_stream", 10, 1, 1, 1, 0, 3, 0, true},
+ {"empty_stream", 10, 60, 0, 0, 0, 0, 0, true, RecoveryModeStream},
+ {"from_position", 10, 60, 10, 8, 2, 0, 0, true, RecoveryModeStream},
+ {"from_position_limited", 10, 60, 10, 5, 2, 0, 2, false, RecoveryModeStream},
+ {"from_position_with_server_limit", 10, 60, 10, 5, 1, 0, 1, false, RecoveryModeStream},
+ {"from_position_that_already_gone", 10, 60, 20, 8, 10, 0, 0, false, RecoveryModeStream},
+ {"from_position_that_not_exist_yet", 10, 60, 20, 108, 0, 0, 0, false, RecoveryModeStream},
+ {"same_position_no_pubs_expected", 10, 60, 7, 7, 0, 0, 0, true, RecoveryModeStream},
+ {"empty_position_recover_expected", 10, 60, 4, 0, 4, 0, 0, true, RecoveryModeStream},
+ {"from_position_in_expired_stream", 10, 1, 10, 8, 0, 3, 0, false, RecoveryModeStream},
+ {"from_same_position_in_expired_stream", 10, 1, 1, 1, 0, 3, 0, true, RecoveryModeStream},
}
func TestRedisClientSubscribeRecoverStreams(t *testing.T) {
@@ -1708,7 +1784,7 @@ func BenchmarkPubSubThroughput(b *testing.B) {
HandleControlFunc: func(bytes []byte) error {
return nil
},
- HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error {
+ HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error {
pubCh <- struct{}{}
return nil
},
@@ -1883,3 +1959,76 @@ func TestPreShardedSlots(t *testing.T) {
})
}
}
+
+func TestParseDeltaPush(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expectError bool
+ expectedResult *deltaPublicationPush
+ }{
+ {
+ name: "valid data with colon in payload",
+ input: "d1:1234567890:epoch1:4:test:18:payload:with:colon",
+ expectError: false,
+ expectedResult: &deltaPublicationPush{
+ Offset: 1234567890,
+ Epoch: "epoch1",
+ PrevPayloadLength: 4,
+ PrevPayload: "test",
+ PayloadLength: 18,
+ Payload: "payload:with:colon",
+ },
+ },
+ {
+ name: "valid data with empty payload",
+ input: "d1:1234567890:epoch2:0::0:",
+ expectError: false,
+ expectedResult: &deltaPublicationPush{
+ Offset: 1234567890,
+ Epoch: "epoch2",
+ PrevPayloadLength: 0,
+ PrevPayload: "",
+ PayloadLength: 0,
+ Payload: "",
+ },
+ },
+ {
+ name: "invalid format - missing parts",
+ input: "d1:123456:epoch3",
+ expectError: true,
+ },
+ {
+ name: "invalid offset",
+ input: "d1:notanumber:epoch4:4:test:5:hello",
+ expectError: true,
+ },
+ {
+ name: "invalid prev payload length",
+ input: "d1:12:epoch4:invalid:test:5:hello",
+ expectError: true,
+ },
+ {
+ name: "invalid prev payload length",
+ input: "d1:12:epoch4:4:test:invalid:hello",
+ expectError: true,
+ },
+ {
+ name: "invalid format no payload",
+ input: "d1:12:epoch4:4:test:5:",
+ expectError: true,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ result, err := parseDeltaPush(tc.input)
+ if tc.expectError {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tc.expectedResult, result)
+ }
+ })
+ }
+}
diff --git a/channel_medium.go b/channel_medium.go
new file mode 100644
index 00000000..5ceeee9a
--- /dev/null
+++ b/channel_medium.go
@@ -0,0 +1,446 @@
+package centrifuge
+
+import (
+ "errors"
+ "math"
+ "sync"
+ "time"
+
+ "github.com/centrifugal/centrifuge/internal/timers"
+)
+
+// ChannelMediumOptions is an EXPERIMENTAL way to enable using a channel medium layer in Centrifuge.
+// Note, channel medium layer is very unstable at the moment – do not use it in production!
+// Channel medium layer is an optional per-channel intermediary between Broker PUB/SUB and Client
+// connections. This intermediary layer may be used for various per-channel tweaks and optimizations.
+// Channel medium comes with memory overhead depending on ChannelMediumOptions. At the same time, it
+// can provide significant benefits in terms of overall system efficiency and flexibility.
+type ChannelMediumOptions struct {
+ // KeepLatestPublication enables keeping latest publication which was broadcasted to channel subscribers on
+ // this Node in the channel medium layer. This is helpful for supporting deltas in at most once scenario.
+ KeepLatestPublication bool
+
+ // EnablePositionSync when true delegates connection position checks to the channel medium. In that case
+ // check is only performed no more often than once in Config.ClientChannelPositionCheckDelay thus reducing
+ // the load on broker in cases when channel has many subscribers. When message loss is detected medium layer
+ // tells caller about this and also marks all channel subscribers with insufficient state flag. By default,
+ // medium is not used for sync – in that case each individual connection syncs position independently.
+ EnablePositionSync bool
+
+ // EnableQueue for incoming publications. This can be useful to reduce PUB/SUB message processing time
+ // (as we put it into a single medium layer queue instead of each individual connection queue), reduce
+ // channel broadcast contention (when one channel waits for broadcast of another channel to finish),
+ // and also opens a road for broadcast tweaks – such as BroadcastDelay and delta between several
+ // publications (deltas require both BroadcastDelay and KeepLatestPublication to be enabled). This costs
+ // additional goroutine.
+ enableQueue bool
+ // QueueMaxSize is a maximum size of the queue used in channel medium (in bytes). If zero, 16MB default
+ // is used. If max size reached, new publications will be dropped.
+ queueMaxSize int
+
+ // BroadcastDelay controls the delay before Publication broadcast. On time tick Centrifugo broadcasts
+ // only the latest publication in the channel if any. Useful to reduce/smooth the number of messages sent
+ // to clients when publication contains the entire state. If zero, all publications will be sent to clients
+ // without delay logic involved on channel medium level. BroadcastDelay option requires (!) EnableQueue to be
+ // enabled, as we can not afford delays during broadcast from the PUB/SUB layer. BroadcastDelay must not be
+ // used in channels with positioning/recovery on since it skips publications.
+ broadcastDelay time.Duration
+}
+
+// Keep global to save 8 byte per-channel. Must be only changed by tests.
+var channelMediumTimeNow = time.Now
+
+// channelMedium is initialized when first subscriber comes into channel, and dropped as soon as last
+// subscriber leaves the channel on the Node.
+type channelMedium struct {
+ channel string
+ node node
+ options ChannelMediumOptions
+
+ mu sync.RWMutex
+ closeCh chan struct{}
+ // optional queue for publications.
+ messages *publicationQueue
+ // We must synchronize broadcast method between general publications and insufficient state notifications.
+ // Only used when queue is disabled.
+ broadcastMu sync.Mutex
+ // latestPublication is a publication last sent to connections on this Node.
+ latestPublication *Publication
+ // positionCheckTime is a time (Unix Nanoseconds) when last position check was performed.
+ positionCheckTime int64
+}
+
+type node interface {
+ handlePublication(ch string, sp StreamPosition, pub, prevPub *Publication, memPrevPub *Publication) error
+ streamTop(ch string, historyMetaTTL time.Duration) (StreamPosition, error)
+}
+
+func newChannelMedium(channel string, node node, options ChannelMediumOptions) (*channelMedium, error) {
+ if options.broadcastDelay > 0 && !options.enableQueue {
+ return nil, errors.New("broadcast delay can only be used with queue enabled")
+ }
+ c := &channelMedium{
+ channel: channel,
+ node: node,
+ options: options,
+ closeCh: make(chan struct{}),
+ positionCheckTime: channelMediumTimeNow().UnixNano(),
+ }
+ if options.enableQueue {
+ c.messages = newPublicationQueue(2)
+ go c.writer()
+ }
+ return c, nil
+}
+
+type queuedPub struct {
+ pub *Publication
+ sp StreamPosition
+ prevPub *Publication
+ isInsufficientState bool
+}
+
+const defaultChannelLayerQueueMaxSize = 16 * 1024 * 1024
+
+func (c *channelMedium) broadcastPublication(pub *Publication, sp StreamPosition, prevPub *Publication) {
+ bp := queuedPub{pub: pub, sp: sp, prevPub: prevPub}
+ c.mu.Lock()
+ c.positionCheckTime = channelMediumTimeNow().UnixNano()
+ c.mu.Unlock()
+
+ if c.options.enableQueue {
+ queueMaxSize := defaultChannelLayerQueueMaxSize
+ if c.options.queueMaxSize > 0 {
+ queueMaxSize = c.options.queueMaxSize
+ }
+ if c.messages.Size() > queueMaxSize {
+ return
+ }
+ c.messages.Add(queuedPublication{Publication: bp})
+ } else {
+ c.broadcastMu.Lock()
+ defer c.broadcastMu.Unlock()
+ c.broadcast(bp)
+ }
+}
+
+func (c *channelMedium) broadcastInsufficientState() {
+ bp := queuedPub{prevPub: nil, isInsufficientState: true}
+ c.mu.Lock()
+ c.positionCheckTime = channelMediumTimeNow().UnixNano()
+ c.mu.Unlock()
+ if c.options.enableQueue {
+ // TODO: possibly support c.messages.dropQueued() for this path ?
+ c.messages.Add(queuedPublication{Publication: bp})
+ } else {
+ c.broadcastMu.Lock()
+ defer c.broadcastMu.Unlock()
+ c.broadcast(bp)
+ }
+}
+
+func (c *channelMedium) broadcast(qp queuedPub) {
+ pubToBroadcast := qp.pub
+ spToBroadcast := qp.sp
+ if qp.isInsufficientState {
+ // using math.MaxUint64 as a special offset to trigger insufficient state.
+ pubToBroadcast = &Publication{Offset: math.MaxUint64}
+ spToBroadcast.Offset = math.MaxUint64
+ }
+
+ prevPub := qp.prevPub
+ var localPrevPub *Publication
+ useLocalLatestPub := c.options.KeepLatestPublication && !qp.isInsufficientState
+ if useLocalLatestPub {
+ localPrevPub = c.latestPublication
+ }
+ if c.options.broadcastDelay > 0 && !c.options.KeepLatestPublication {
+ prevPub = nil
+ }
+ if qp.isInsufficientState {
+ prevPub = nil
+ }
+ _ = c.node.handlePublication(c.channel, spToBroadcast, pubToBroadcast, prevPub, localPrevPub)
+ if useLocalLatestPub {
+ c.latestPublication = qp.pub
+ }
+}
+
+func (c *channelMedium) writer() {
+ for {
+ if ok := c.waitSendPub(c.options.broadcastDelay); !ok {
+ return
+ }
+ }
+}
+
+func (c *channelMedium) waitSendPub(delay time.Duration) bool {
+ // Wait for message from the queue.
+ ok := c.messages.Wait()
+ if !ok {
+ return false
+ }
+
+ if delay > 0 {
+ tm := timers.AcquireTimer(delay)
+ select {
+ case <-tm.C:
+ case <-c.closeCh:
+ timers.ReleaseTimer(tm)
+ return false
+ }
+ timers.ReleaseTimer(tm)
+ }
+
+ msg, ok := c.messages.Remove()
+ if !ok {
+ return !c.messages.Closed()
+ }
+ if delay == 0 || msg.Publication.isInsufficientState {
+ c.broadcast(msg.Publication)
+ return true
+ }
+ messageCount := c.messages.Len()
+ for messageCount > 0 {
+ messageCount--
+ var ok bool
+ msg, ok = c.messages.Remove()
+ if !ok {
+ if c.messages.Closed() {
+ return false
+ }
+ break
+ }
+ if msg.Publication.isInsufficientState {
+ break
+ }
+ }
+ c.broadcast(msg.Publication)
+ return true
+}
+
+func (c *channelMedium) CheckPosition(historyMetaTTL time.Duration, clientPosition StreamPosition, checkDelay time.Duration) bool {
+ nowUnixNano := channelMediumTimeNow().UnixNano()
+ c.mu.Lock()
+ needCheckPosition := nowUnixNano-c.positionCheckTime >= checkDelay.Nanoseconds()
+ if needCheckPosition {
+ c.positionCheckTime = nowUnixNano
+ }
+ c.mu.Unlock()
+ if !needCheckPosition {
+ return true
+ }
+ _, validPosition, err := c.checkPositionWithRetry(historyMetaTTL, clientPosition)
+ if err != nil {
+ // Position will be checked again later.
+ return true
+ }
+ if !validPosition {
+ c.broadcastInsufficientState()
+ }
+ return validPosition
+}
+
+func (c *channelMedium) checkPositionWithRetry(historyMetaTTL time.Duration, clientPosition StreamPosition) (StreamPosition, bool, error) {
+ sp, validPosition, err := c.checkPositionOnce(historyMetaTTL, clientPosition)
+ if err != nil || !validPosition {
+ return c.checkPositionOnce(historyMetaTTL, clientPosition)
+ }
+ return sp, validPosition, err
+}
+
+func (c *channelMedium) checkPositionOnce(historyMetaTTL time.Duration, clientPosition StreamPosition) (StreamPosition, bool, error) {
+ streamTop, err := c.node.streamTop(c.channel, historyMetaTTL)
+ if err != nil {
+ return StreamPosition{}, false, err
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ isValidPosition := streamTop.Epoch == clientPosition.Epoch && clientPosition.Offset == streamTop.Offset
+ return streamTop, isValidPosition, nil
+}
+
+func (c *channelMedium) close() {
+ close(c.closeCh)
+}
+
+type queuedPublication struct {
+ Publication queuedPub
+}
+
+// publicationQueue is an unbounded queue of queuedPublication.
+// The queue is goroutine safe.
+// Inspired by http://blog.dubbelboer.com/2015/04/25/go-faster-queue.html (MIT)
+type publicationQueue struct {
+ mu sync.RWMutex
+ cond *sync.Cond
+ nodes []queuedPublication
+ head int
+ tail int
+ cnt int
+ size int
+ closed bool
+ initCap int
+}
+
+// newPublicationQueue returns a new queuedPublication queue with initial capacity.
+func newPublicationQueue(initialCapacity int) *publicationQueue {
+ sq := &publicationQueue{
+ initCap: initialCapacity,
+ nodes: make([]queuedPublication, initialCapacity),
+ }
+ sq.cond = sync.NewCond(&sq.mu)
+ return sq
+}
+
+// Mutex must be held when calling.
+func (q *publicationQueue) resize(n int) {
+ nodes := make([]queuedPublication, n)
+ if q.head < q.tail {
+ copy(nodes, q.nodes[q.head:q.tail])
+ } else {
+ copy(nodes, q.nodes[q.head:])
+ copy(nodes[len(q.nodes)-q.head:], q.nodes[:q.tail])
+ }
+
+ q.tail = q.cnt % n
+ q.head = 0
+ q.nodes = nodes
+}
+
+// Add an queuedPublication to the back of the queue
+// will return false if the queue is closed.
+// In that case the queuedPublication is dropped.
+func (q *publicationQueue) Add(i queuedPublication) bool {
+ q.mu.Lock()
+ if q.closed {
+ q.mu.Unlock()
+ return false
+ }
+ if q.cnt == len(q.nodes) {
+ // Also tested a growth rate of 1.5, see: http://stackoverflow.com/questions/2269063/buffer-growth-strategy
+ // In Go this resulted in a higher memory usage.
+ q.resize(q.cnt * 2)
+ }
+ q.nodes[q.tail] = i
+ q.tail = (q.tail + 1) % len(q.nodes)
+ if i.Publication.pub != nil {
+ q.size += len(i.Publication.pub.Data)
+ }
+ q.cnt++
+ q.cond.Signal()
+ q.mu.Unlock()
+ return true
+}
+
+// Close the queue and discard all entries in the queue
+// all goroutines in wait() will return
+func (q *publicationQueue) Close() {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+ q.closed = true
+ q.cnt = 0
+ q.nodes = nil
+ q.size = 0
+ q.cond.Broadcast()
+}
+
+// CloseRemaining will close the queue and return all entries in the queue.
+// All goroutines in wait() will return.
+func (q *publicationQueue) CloseRemaining() []queuedPublication {
+ q.mu.Lock()
+ defer q.mu.Unlock()
+ if q.closed {
+ return []queuedPublication{}
+ }
+ rem := make([]queuedPublication, 0, q.cnt)
+ for q.cnt > 0 {
+ i := q.nodes[q.head]
+ q.head = (q.head + 1) % len(q.nodes)
+ q.cnt--
+ rem = append(rem, i)
+ }
+ q.closed = true
+ q.cnt = 0
+ q.nodes = nil
+ q.size = 0
+ q.cond.Broadcast()
+ return rem
+}
+
+// Closed returns true if the queue has been closed
+// The call cannot guarantee that the queue hasn't been
+// closed while the function returns, so only "true" has a definite meaning.
+func (q *publicationQueue) Closed() bool {
+ q.mu.RLock()
+ c := q.closed
+ q.mu.RUnlock()
+ return c
+}
+
+// Wait for a message to be added.
+// If there are items on the queue will return immediately.
+// Will return false if the queue is closed.
+// Otherwise, returns true.
+func (q *publicationQueue) Wait() bool {
+ q.mu.Lock()
+ if q.closed {
+ q.mu.Unlock()
+ return false
+ }
+ if q.cnt != 0 {
+ q.mu.Unlock()
+ return true
+ }
+ q.cond.Wait()
+ q.mu.Unlock()
+ return true
+}
+
+// Remove will remove an queuedPublication from the queue.
+// If false is returned, it either means 1) there were no items on the queue
+// or 2) the queue is closed.
+func (q *publicationQueue) Remove() (queuedPublication, bool) {
+ q.mu.Lock()
+ if q.cnt == 0 {
+ q.mu.Unlock()
+ return queuedPublication{}, false
+ }
+ i := q.nodes[q.head]
+ q.head = (q.head + 1) % len(q.nodes)
+ q.cnt--
+ if i.Publication.pub != nil {
+ q.size -= len(i.Publication.pub.Data)
+ }
+
+ if n := len(q.nodes) / 2; n >= q.initCap && q.cnt <= n {
+ q.resize(n)
+ }
+
+ q.mu.Unlock()
+ return i, true
+}
+
+// Cap returns the capacity (without allocations)
+func (q *publicationQueue) Cap() int {
+ q.mu.RLock()
+ c := cap(q.nodes)
+ q.mu.RUnlock()
+ return c
+}
+
+// Len returns the current length of the queue.
+func (q *publicationQueue) Len() int {
+ q.mu.RLock()
+ l := q.cnt
+ q.mu.RUnlock()
+ return l
+}
+
+// Size returns the current size of the queue.
+func (q *publicationQueue) Size() int {
+ q.mu.RLock()
+ s := q.size
+ q.mu.RUnlock()
+ return s
+}
diff --git a/channel_medium_test.go b/channel_medium_test.go
new file mode 100644
index 00000000..b1e9be86
--- /dev/null
+++ b/channel_medium_test.go
@@ -0,0 +1,173 @@
+package centrifuge
+
+import (
+ "errors"
+ "math"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+// Helper function to create a channelMedium with options.
+func setupChannelMedium(t testing.TB, options ChannelMediumOptions, node node) *channelMedium {
+ t.Helper()
+ channel := "testChannel"
+ cache, err := newChannelMedium(channel, node, options)
+ if err != nil {
+ require.NoError(t, err)
+ }
+ return cache
+}
+
+type mockNode struct {
+ handlePublicationFunc func(channel string, sp StreamPosition, pub, prevPub, localPrevPub *Publication) error
+ streamTopFunc func(ch string, historyMetaTTL time.Duration) (StreamPosition, error)
+}
+
+func (m *mockNode) handlePublication(channel string, sp StreamPosition, pub, prevPub, localPrevPub *Publication) error {
+ if m.handlePublicationFunc != nil {
+ return m.handlePublicationFunc(channel, sp, pub, prevPub, localPrevPub)
+ }
+ return nil
+}
+
+func (m *mockNode) streamTop(ch string, historyMetaTTL time.Duration) (StreamPosition, error) {
+ if m.streamTopFunc != nil {
+ return m.streamTopFunc(ch, historyMetaTTL)
+ }
+ return StreamPosition{}, nil
+}
+
+func TestChannelMediumHandlePublication(t *testing.T) {
+ optionSet := []ChannelMediumOptions{
+ {
+ enableQueue: false,
+ KeepLatestPublication: false,
+ },
+ {
+ enableQueue: true,
+ KeepLatestPublication: false,
+ },
+ {
+ enableQueue: true,
+ KeepLatestPublication: false,
+ broadcastDelay: 10 * time.Millisecond,
+ },
+ {
+ enableQueue: true,
+ KeepLatestPublication: true,
+ broadcastDelay: 10 * time.Millisecond,
+ },
+ }
+
+ for i, options := range optionSet {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ doneCh := make(chan struct{})
+
+ cache := setupChannelMedium(t, options, &mockNode{
+ handlePublicationFunc: func(channel string, sp StreamPosition, pub, prevPub, localPrevPub *Publication) error {
+ close(doneCh)
+ return nil
+ },
+ })
+
+ pub := &Publication{Data: []byte("test data")}
+ sp := StreamPosition{Offset: 1}
+
+ cache.broadcastPublication(pub, sp, nil)
+
+ select {
+ case <-doneCh:
+ case <-time.After(5 * time.Second):
+ require.Fail(t, "handlePublicationFunc was not called")
+ }
+ })
+ }
+}
+
+func TestChannelMediumInsufficientState(t *testing.T) {
+ options := ChannelMediumOptions{
+ enableQueue: true,
+ KeepLatestPublication: true,
+ }
+ doneCh := make(chan struct{})
+ medium := setupChannelMedium(t, options, &mockNode{
+ handlePublicationFunc: func(channel string, sp StreamPosition, pub, prevPub, localPrevPub *Publication) error {
+ require.Equal(t, uint64(math.MaxUint64), pub.Offset)
+ require.Equal(t, uint64(math.MaxUint64), sp.Offset)
+ close(doneCh)
+ return nil
+ },
+ })
+
+ // Simulate the behavior when the state is marked as insufficient
+ medium.broadcastInsufficientState()
+
+ select {
+ case <-doneCh:
+ case <-time.After(5 * time.Second):
+ require.Fail(t, "handlePublicationFunc was not called")
+ }
+}
+
+func TestChannelMediumPositionSync(t *testing.T) {
+ options := ChannelMediumOptions{
+ EnablePositionSync: true,
+ }
+ doneCh := make(chan struct{})
+ var closeOnce sync.Once
+ medium := setupChannelMedium(t, options, &mockNode{
+ streamTopFunc: func(ch string, historyMetaTTL time.Duration) (StreamPosition, error) {
+ closeOnce.Do(func() {
+ close(doneCh)
+ })
+ return StreamPosition{}, nil
+ },
+ })
+ originalGetter := channelMediumTimeNow
+ channelMediumTimeNow = func() time.Time {
+ return time.Now().Add(time.Hour)
+ }
+ medium.CheckPosition(time.Second, StreamPosition{Offset: 1, Epoch: "test"}, time.Second)
+ channelMediumTimeNow = originalGetter
+ select {
+ case <-doneCh:
+ case <-time.After(5 * time.Second):
+ require.Fail(t, "historyFunc was not called")
+ }
+}
+
+func TestChannelMediumPositionSyncRetry(t *testing.T) {
+ options := ChannelMediumOptions{
+ EnablePositionSync: true,
+ }
+ doneCh := make(chan struct{})
+ var closeOnce sync.Once
+ numCalls := 0
+ medium := setupChannelMedium(t, options, &mockNode{
+ streamTopFunc: func(ch string, historyMetaTTL time.Duration) (StreamPosition, error) {
+ if numCalls == 0 {
+ numCalls++
+ return StreamPosition{}, errors.New("boom")
+ }
+ closeOnce.Do(func() {
+ close(doneCh)
+ })
+ return StreamPosition{}, nil
+ },
+ })
+ originalGetter := channelMediumTimeNow
+ channelMediumTimeNow = func() time.Time {
+ return time.Now().Add(time.Hour)
+ }
+ medium.CheckPosition(time.Second, StreamPosition{Offset: 1, Epoch: "test"}, time.Second)
+ channelMediumTimeNow = originalGetter
+ select {
+ case <-doneCh:
+ case <-time.After(5 * time.Second):
+ require.Fail(t, "streamTopLatestPubFunc was not called")
+ }
+}
diff --git a/client.go b/client.go
index 954fb277..f1a640f5 100644
--- a/client.go
+++ b/client.go
@@ -5,9 +5,12 @@ import (
"errors"
"fmt"
"io"
+ "math"
+ "slices"
"sync"
"time"
+ "github.com/centrifugal/centrifuge/internal/convert"
"github.com/centrifugal/centrifuge/internal/queue"
"github.com/centrifugal/centrifuge/internal/recovery"
"github.com/centrifugal/centrifuge/internal/saferand"
@@ -15,6 +18,7 @@ import (
"github.com/centrifugal/protocol"
"github.com/google/uuid"
"github.com/segmentio/encoding/json"
+ fdelta "github.com/shadowspore/fossil-delta"
)
// Empty Replies/Pushes for pings.
@@ -136,6 +140,7 @@ const (
flagPositioning
flagServerSide
flagClientSideRefresh
+ flagDeltaAllowed
)
// ChannelContext contains extra context for channel connection subscribed to.
@@ -527,11 +532,11 @@ func (c *Client) checkPong() {
func (c *Client) addPingUpdate(isFirst bool, scheduleNext bool) {
delay := c.pingInterval
if isFirst {
- // Send first ping in random interval between 0 and PingInterval to
+ // Send first ping in random interval between PingInterval/2 and PingInterval to
// spread ping-pongs in time (useful when many connections reconnect
// almost immediately).
pingNanoseconds := c.pingInterval.Nanoseconds()
- delay = time.Duration(randSource.Int63n(pingNanoseconds)) * time.Nanosecond
+ delay = time.Duration(pingNanoseconds/2) + time.Duration(randSource.Int63n(pingNanoseconds/2))*time.Nanosecond
}
c.nextPing = time.Now().Add(delay).UnixNano()
if scheduleNext {
@@ -737,16 +742,6 @@ func (c *Client) checkPosition(checkDelay time.Duration, ch string, chCtx Channe
historyMetaTTL = time.Duration(chCtx.metaTTLSeconds) * time.Second
}
- streamTop, err := c.node.streamTop(ch, historyMetaTTL)
- if err != nil {
- // Check later.
- return true
- }
-
- return c.isValidPosition(streamTop, nowUnix, ch)
-}
-
-func (c *Client) isValidPosition(streamTop StreamPosition, nowUnix int64, ch string) bool {
c.mu.Lock()
if c.status == statusClosed {
c.mu.Unlock()
@@ -760,18 +755,20 @@ func (c *Client) isValidPosition(streamTop StreamPosition, nowUnix int64, ch str
position := chCtx.streamPosition
c.mu.Unlock()
- isValidPosition := streamTop.Epoch == position.Epoch && position.Offset >= streamTop.Offset
- if isValidPosition {
+ validPosition, err := c.node.checkPosition(ch, position, historyMetaTTL)
+ if err != nil {
+ // Check later.
+ return true
+ }
+ if validPosition {
c.mu.Lock()
if chContext, ok := c.channels[ch]; ok {
chContext.positionCheckTime = nowUnix
c.channels[ch] = chContext
}
c.mu.Unlock()
- return true
}
-
- return false
+ return validPosition
}
// ID returns unique client connection id.
@@ -1613,6 +1610,17 @@ func (c *Client) handleSubscribe(req *protocol.SubscribeRequest, cmd *protocol.C
return ErrorNotAvailable
}
+ if req.Channel == "" {
+ return c.logDisconnectBadRequest("channel required for subscribe")
+ }
+
+ if req.Delta != "" {
+ _, ok := stringToDeltaType[req.Delta]
+ if !ok {
+ return c.logDisconnectBadRequest("unknown delta type in subscribe request: " + req.Delta)
+ }
+ }
+
replyError, disconnect := c.validateSubscribeRequest(req)
if disconnect != nil || replyError != nil {
if disconnect != nil {
@@ -2666,7 +2674,7 @@ type subscribeContext struct {
channelContext ChannelContext
}
-func isRecovered(historyResult HistoryResult, cmdOffset uint64, cmdEpoch string) ([]*protocol.Publication, bool) {
+func isStreamRecovered(historyResult HistoryResult, cmdOffset uint64, cmdEpoch string) ([]*protocol.Publication, bool) {
latestOffset := historyResult.Offset
latestEpoch := historyResult.Epoch
@@ -2689,6 +2697,26 @@ func isRecovered(historyResult HistoryResult, cmdOffset uint64, cmdEpoch string)
return recoveredPubs, recovered
}
+func isCacheRecovered(latestPub *Publication, currentSP StreamPosition, cmdOffset uint64, cmdEpoch string) ([]*protocol.Publication, bool) {
+ latestOffset := currentSP.Offset
+ latestEpoch := currentSP.Epoch
+ var recovered bool
+ recoveredPubs := make([]*protocol.Publication, 0, 1)
+ if latestPub != nil {
+ publication := latestPub
+ recovered = publication.Offset == latestOffset
+ skipPublication := cmdOffset > 0 && cmdOffset == latestOffset && cmdEpoch == latestEpoch
+ if recovered && !skipPublication {
+ protoPub := pubToProto(publication)
+ recoveredPubs = append(recoveredPubs, protoPub)
+ }
+ } else if cmdOffset > 0 && latestOffset == cmdOffset && cmdEpoch == latestEpoch {
+ // Client already had state, which has not been modified since.
+ recovered = true
+ }
+ return recoveredPubs, recovered
+}
+
// subscribeCmd handles subscribe command - clients send this when subscribe
// on channel, if channel is private then we must validate provided sign here before
// actually subscribe client on channel. Optionally we can send missed messages to
@@ -2731,7 +2759,15 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep
c.pubSubSync.StartBuffering(channel)
}
- err := c.node.addSubscription(channel, c)
+ sub := subInfo{client: c, deltaType: deltaTypeNone}
+ if req.Delta != "" {
+ dt := DeltaType(req.Delta)
+ if slices.Contains(reply.Options.AllowedDeltaTypes, dt) {
+ res.Delta = true
+ sub.deltaType = dt
+ }
+ }
+ err := c.node.addSubscription(channel, sub)
if err != nil {
c.node.logger.log(newLogEntry(LogLevelError, "error adding subscription", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()}))
c.pubSubSync.StopBuffering(channel)
@@ -2759,6 +2795,16 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep
)
if reply.Options.EnablePositioning || reply.Options.EnableRecovery {
+ handleErr := func(err error) subscribeContext {
+ c.pubSubSync.StopBuffering(channel)
+ var clientErr *Error
+ if errors.As(err, &clientErr) && !errors.Is(clientErr, ErrorInternal) {
+ return errorDisconnectContext(clientErr, nil)
+ }
+ ctx.disconnect = &DisconnectServerError
+ return ctx
+ }
+
res.Positioned = true
if reply.Options.EnableRecovery {
res.Recoverable = true
@@ -2767,45 +2813,74 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep
if reply.Options.EnableRecovery && req.Recover {
cmdOffset := req.Offset
cmdEpoch := req.Epoch
+ recoveryMode := reply.Options.RecoveryMode
// Client provided subscribe request with recover flag on. Try to recover missed
- // publications automatically from history (we suppose here that history configured wisely).
- historyResult, err := c.node.recoverHistory(channel, StreamPosition{Offset: cmdOffset, Epoch: cmdEpoch}, reply.Options.HistoryMetaTTL)
- if err != nil {
- if errors.Is(err, ErrorUnrecoverablePosition) {
- // Result contains stream position in case of ErrorUnrecoverablePosition
- // during recovery.
+ // publications automatically from history (we assume here that the history configured wisely).
+
+ if recoveryMode == RecoveryModeCache {
+ latestPub, currentSP, err := c.node.recoverCache(channel, reply.Options.HistoryMetaTTL)
+ if err != nil {
+ c.node.logger.log(newLogEntry(LogLevelError, "error on cache recover", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()}))
+ return handleErr(err)
+ }
+ latestOffset = currentSP.Offset
+ latestEpoch = currentSP.Epoch
+ var recovered bool
+ recoveredPubs, recovered = isCacheRecovered(latestPub, currentSP, cmdOffset, cmdEpoch)
+ res.Recovered = recovered
+ if latestPub == nil && c.node.clientEvents.cacheEmptyHandler != nil {
+ cacheReply, err := c.node.clientEvents.cacheEmptyHandler(CacheEmptyEvent{Channel: channel})
+ if err != nil {
+ c.node.logger.log(newLogEntry(LogLevelError, "error on cache empty", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()}))
+ return handleErr(err)
+ }
+ if cacheReply.Populated && !recovered {
+ // One more chance to recover in case we know cache was populated.
+ latestPub, currentSP, err = c.node.recoverCache(channel, reply.Options.HistoryMetaTTL)
+ if err != nil {
+ c.node.logger.log(newLogEntry(LogLevelError, "error on populated cache recover", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()}))
+ return handleErr(err)
+ }
+ latestOffset = currentSP.Offset
+ latestEpoch = currentSP.Epoch
+ recoveredPubs, recovered = isCacheRecovered(latestPub, currentSP, cmdOffset, cmdEpoch)
+ res.Recovered = recovered
+ c.node.metrics.incRecover(res.Recovered)
+ } else {
+ c.node.metrics.incRecover(res.Recovered)
+ }
+ } else {
+ c.node.metrics.incRecover(res.Recovered)
+ }
+ } else {
+ historyResult, err := c.node.recoverHistory(channel, StreamPosition{Offset: cmdOffset, Epoch: cmdEpoch}, reply.Options.HistoryMetaTTL)
+ if err != nil {
+ if errors.Is(err, ErrorUnrecoverablePosition) {
+ // Result contains stream position in case of ErrorUnrecoverablePosition
+ // during recovery.
+ latestOffset = historyResult.Offset
+ latestEpoch = historyResult.Epoch
+ res.Recovered = false
+ c.node.metrics.incRecover(res.Recovered)
+ } else {
+ c.node.logger.log(newLogEntry(LogLevelError, "error on recover", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()}))
+ return handleErr(err)
+ }
+ } else {
latestOffset = historyResult.Offset
latestEpoch = historyResult.Epoch
- res.Recovered = false
+ var recovered bool
+ recoveredPubs, recovered = isStreamRecovered(historyResult, cmdOffset, cmdEpoch)
+ res.Recovered = recovered
c.node.metrics.incRecover(res.Recovered)
- } else {
- c.node.logger.log(newLogEntry(LogLevelError, "error on recover", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()}))
- c.pubSubSync.StopBuffering(channel)
- if clientErr, ok := err.(*Error); ok && clientErr != ErrorInternal {
- return errorDisconnectContext(clientErr, nil)
- }
- ctx.disconnect = &DisconnectServerError
- return ctx
}
- } else {
- latestOffset = historyResult.Offset
- latestEpoch = historyResult.Epoch
- var recovered bool
- recoveredPubs, recovered = isRecovered(historyResult, cmdOffset, cmdEpoch)
- res.Recovered = recovered
- c.node.metrics.incRecover(res.Recovered)
}
} else {
streamTop, err := c.node.streamTop(channel, reply.Options.HistoryMetaTTL)
if err != nil {
c.node.logger.log(newLogEntry(LogLevelError, "error getting stream state for channel", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()}))
- c.pubSubSync.StopBuffering(channel)
- if clientErr, ok := err.(*Error); ok && clientErr != ErrorInternal {
- return errorDisconnectContext(clientErr, nil)
- }
- ctx.disconnect = &DisconnectServerError
- return ctx
+ return handleErr(err)
}
latestOffset = streamTop.Offset
latestEpoch = streamTop.Epoch
@@ -2822,6 +2897,11 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep
ctx.disconnect = &DisconnectInsufficientState
return ctx
}
+ if reply.Options.RecoveryMode == RecoveryModeCache && len(recoveredPubs) > 1 && req.Delta == "" {
+ // In RecoveryModeCache case client is only interested in last message. So if delta encoding is
+ // not used then we can only send the last publication.
+ recoveredPubs = recoveredPubs[len(recoveredPubs)-1:]
+ }
}
if len(recoveredPubs) > 0 {
@@ -2835,14 +2915,22 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep
}
}
+ var channelFlags uint8
+
if res.Recovered {
// Only append recovered publications in case continuity in a channel can be achieved.
- res.Publications = recoveredPubs
- // In case of successful recovery attach stream position from request to subscribe response.
+ if res.Delta && req.Delta == string(DeltaTypeFossil) {
+ res.Publications = c.makeRecoveredPubsDeltaFossil(recoveredPubs)
+ // Allow delta for the following real-time publications since recovery is successful
+ // and makeRecoveredPubsDeltaFossil already created publication with base data if required.
+ channelFlags |= flagDeltaAllowed
+ } else {
+ res.Publications = recoveredPubs
+ }
+ // In case of successful recovery attach stream offset from request to subscribe response.
// This simplifies client implementation as it doesn't need to distinguish between cases when
// subscribe response has recovered publications, or it has no recovered publications.
// Valid stream position will be then caught up upon processing publications.
- res.Epoch = req.Epoch
res.Offset = req.Offset
}
res.WasRecovering = req.Recover
@@ -2867,7 +2955,6 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep
defer c.handleCommandFinished(cmd, protocol.FrameTypeSubscribe, nil, protoReply, started)
}
- var channelFlags uint8
channelFlags |= flagSubscribed
if serverSide {
channelFlags |= flagServerSide
@@ -2928,6 +3015,53 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep
return ctx
}
+func (c *Client) makeRecoveredPubsDeltaFossil(recoveredPubs []*protocol.Publication) []*protocol.Publication {
+ if len(recoveredPubs) == 0 {
+ return nil
+ }
+ prevPub := recoveredPubs[0]
+ if c.transport.Protocol() == ProtocolTypeJSON {
+ // For JSON case we need to use JSON string (js) for data.
+ pub := &protocol.Publication{
+ Offset: prevPub.Offset,
+ Info: prevPub.Info,
+ Tags: prevPub.Tags,
+ Data: json.Escape(convert.BytesToString(prevPub.Data)),
+ Delta: false,
+ }
+ recoveredPubs[0] = pub
+ }
+ // Probably during recovery we should not make deltas? This is something to investigate, in
+ // RecoveryModeCache case this won't be used since there is only one publication max recovered.
+ if len(recoveredPubs) > 1 {
+ for i, pub := range recoveredPubs[1:] {
+ patch := fdelta.Create(prevPub.Data, pub.Data)
+ var deltaPub *protocol.Publication
+ if c.transport.Protocol() == ProtocolTypeJSON {
+ // For JSON case we need to use JSON string (js) for patch.
+ deltaPub = &protocol.Publication{
+ Offset: pub.Offset,
+ Data: json.Escape(convert.BytesToString(patch)),
+ Info: pub.Info,
+ Tags: pub.Tags,
+ Delta: true,
+ }
+ } else {
+ deltaPub = &protocol.Publication{
+ Offset: pub.Offset,
+ Data: patch,
+ Info: pub.Info,
+ Tags: pub.Tags,
+ Delta: true,
+ }
+ }
+ recoveredPubs[i+1] = deltaPub
+ prevPub = recoveredPubs[i]
+ }
+ }
+ return recoveredPubs
+}
+
func (c *Client) releaseSubscribeCommandReply(reply *protocol.Reply) {
protocol.ReplyPool.ReleaseSubscribeReply(reply)
}
@@ -2965,38 +3099,50 @@ func (c *Client) handleAsyncUnsubscribe(ch string, unsub Unsubscribe) {
}
}
-func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publication, data []byte, sp StreamPosition) error {
+func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publication, prep preparedData, sp StreamPosition) error {
c.mu.Lock()
channelContext, ok := c.channels[ch]
if !ok || !channelHasFlag(channelContext.flags, flagSubscribed) {
c.mu.Unlock()
return nil
}
+ deltaAllowed := channelHasFlag(channelContext.flags, flagDeltaAllowed)
if !channelHasFlag(channelContext.flags, flagPositioning) {
+ // Publication with Offset, but client does not use positioning.
if hasFlag(c.transport.DisabledPushFlags(), PushFlagPublication) {
c.mu.Unlock()
return nil
}
c.mu.Unlock()
- return c.transportEnqueue(data, ch, protocol.FrameTypePushPublication)
+ if pub.Offset == math.MaxUint64 {
+ // This is a special pub to trigger insufficient state. Noop in non-positioning case.
+ return nil
+ }
+ if prep.deltaSub {
+ if deltaAllowed {
+ return c.transportEnqueue(prep.localDeltaData, ch, protocol.FrameTypePushPublication)
+ }
+ c.mu.Lock()
+ if chCtx, chCtxOK := c.channels[ch]; chCtxOK {
+ chCtx.flags |= flagDeltaAllowed
+ c.channels[ch] = chCtx
+ }
+ c.mu.Unlock()
+ }
+ return c.transportEnqueue(prep.fullData, ch, protocol.FrameTypePushPublication)
}
serverSide := channelHasFlag(channelContext.flags, flagServerSide)
currentPositionOffset := channelContext.streamPosition.Offset
nextExpectedOffset := currentPositionOffset + 1
pubOffset := pub.Offset
pubEpoch := sp.Epoch
- if pubEpoch != channelContext.streamPosition.Epoch {
+ if pubEpoch != channelContext.streamPosition.Epoch || pubOffset != nextExpectedOffset {
+ // We can introduce an option to mark connection with insufficient state flag instead
+ // of disconnecting it immediately. In that case connection will eventually reconnect
+ // due to periodic sync. While connection channel is in the insufficient state we must
+ // skip publications coming to it. This mode may be useful to spread the resubscribe load.
if c.node.logger.enabled(LogLevelDebug) {
- c.node.logger.log(newLogEntry(LogLevelDebug, "client insufficient state", map[string]any{"channel": ch, "user": c.user, "client": c.uid, "epoch": pubEpoch, "expectedEpoch": channelContext.streamPosition.Epoch}))
- }
- // Oops: sth lost, let client reconnect/resubscribe to recover its state.
- go func() { c.handleInsufficientState(ch, serverSide) }()
- c.mu.Unlock()
- return nil
- }
- if pubOffset != nextExpectedOffset {
- if c.node.logger.enabled(LogLevelDebug) {
- c.node.logger.log(newLogEntry(LogLevelDebug, "client insufficient state", map[string]any{"channel": ch, "user": c.user, "client": c.uid, "offset": pubOffset, "expectedOffset": nextExpectedOffset}))
+ c.node.logger.log(newLogEntry(LogLevelDebug, "client insufficient state", map[string]any{"channel": ch, "user": c.user, "client": c.uid, "epoch": pubEpoch, "expectedEpoch": channelContext.streamPosition.Epoch, "offset": pubOffset, "expectedOffset": nextExpectedOffset}))
}
// Oops: sth lost, let client reconnect/resubscribe to recover its state.
go func() { c.handleInsufficientState(ch, serverSide) }()
@@ -3010,10 +3156,25 @@ func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publica
if hasFlag(c.transport.DisabledPushFlags(), PushFlagPublication) {
return nil
}
- return c.transportEnqueue(data, ch, protocol.FrameTypePushPublication)
+ if prep.deltaSub {
+ if deltaAllowed {
+ return c.transportEnqueue(prep.brokerDeltaData, ch, protocol.FrameTypePushPublication)
+ }
+ c.mu.Lock()
+ if chCtx, chCtxOK := c.channels[ch]; chCtxOK {
+ chCtx.flags |= flagDeltaAllowed
+ c.channels[ch] = chCtx
+ }
+ c.mu.Unlock()
+ }
+ return c.transportEnqueue(prep.fullData, ch, protocol.FrameTypePushPublication)
+}
+
+func (c *Client) writePublicationNoDelta(ch string, pub *protocol.Publication, data []byte, sp StreamPosition) error {
+ return c.writePublication(ch, pub, preparedData{fullData: data, brokerDeltaData: nil, localDeltaData: nil, deltaSub: false}, sp)
}
-func (c *Client) writePublication(ch string, pub *protocol.Publication, data []byte, sp StreamPosition) error {
+func (c *Client) writePublication(ch string, pub *protocol.Publication, prep preparedData, sp StreamPosition) error {
if c.node.LogEnabled(LogLevelTrace) {
c.traceOutPush(&protocol.Push{Channel: ch, Pub: pub})
}
@@ -3021,10 +3182,33 @@ func (c *Client) writePublication(ch string, pub *protocol.Publication, data []b
if hasFlag(c.transport.DisabledPushFlags(), PushFlagPublication) {
return nil
}
- return c.transportEnqueue(data, ch, protocol.FrameTypePushPublication)
+
+ if prep.deltaSub {
+ // For this path (no Offset) delta may come from channel medium layer, so that we can use it
+ // here if allowed for the connection.
+ c.mu.RLock()
+ channelContext, ok := c.channels[ch]
+ if !ok {
+ c.mu.RUnlock()
+ return nil
+ }
+ deltaAllowed := channelHasFlag(channelContext.flags, flagDeltaAllowed)
+ c.mu.RUnlock()
+
+ if deltaAllowed {
+ return c.transportEnqueue(prep.localDeltaData, ch, protocol.FrameTypePushPublication)
+ }
+ c.mu.Lock()
+ if chCtx, chCtxOK := c.channels[ch]; chCtxOK {
+ chCtx.flags |= flagDeltaAllowed
+ c.channels[ch] = chCtx
+ }
+ c.mu.Unlock()
+ }
+ return c.transportEnqueue(prep.fullData, ch, protocol.FrameTypePushPublication)
}
c.pubSubSync.SyncPublication(ch, pub, func() {
- _ = c.writePublicationUpdatePosition(ch, pub, data, sp)
+ _ = c.writePublicationUpdatePosition(ch, pub, prep, sp)
})
return nil
}
diff --git a/client_experimental.go b/client_experimental.go
index 000c7f21..bfcb355c 100644
--- a/client_experimental.go
+++ b/client_experimental.go
@@ -29,7 +29,7 @@ func (c *Client) WritePublication(channel string, publication *Publication, sp S
go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c)
return err
}
- return c.writePublication(channel, pub, jsonPush, sp)
+ return c.writePublicationNoDelta(channel, pub, jsonPush, sp)
} else {
push := &protocol.Push{Channel: channel, Pub: pub}
var err error
@@ -38,7 +38,7 @@ func (c *Client) WritePublication(channel string, publication *Publication, sp S
go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c)
return err
}
- return c.writePublication(channel, pub, jsonReply, sp)
+ return c.writePublicationNoDelta(channel, pub, jsonReply, sp)
}
} else if protoType == protocol.TypeProtobuf {
if c.transport.Unidirectional() {
@@ -48,7 +48,7 @@ func (c *Client) WritePublication(channel string, publication *Publication, sp S
if err != nil {
return err
}
- return c.writePublication(channel, pub, protobufPush, sp)
+ return c.writePublicationNoDelta(channel, pub, protobufPush, sp)
} else {
push := &protocol.Push{Channel: channel, Pub: pub}
var err error
@@ -56,7 +56,7 @@ func (c *Client) WritePublication(channel string, publication *Publication, sp S
if err != nil {
return err
}
- return c.writePublication(channel, pub, protobufReply, sp)
+ return c.writePublicationNoDelta(channel, pub, protobufReply, sp)
}
}
diff --git a/client_test.go b/client_test.go
index 4abded52..2fce87e8 100644
--- a/client_test.go
+++ b/client_test.go
@@ -648,6 +648,46 @@ func TestClientSubscribeBrokerErrorOnRecoverHistory(t *testing.T) {
}
}
+func TestClientSubscribeDeltaNotAllowed(t *testing.T) {
+ n := defaultTestNode()
+ defer func() { _ = n.Shutdown(context.Background()) }()
+
+ ctx, cancelFn := context.WithCancel(context.Background())
+ transport := newTestTransport(cancelFn)
+ transport.sink = make(chan []byte, 100)
+ transport.setProtocolType(ProtocolTypeJSON)
+ transport.setProtocolVersion(ProtocolVersion2)
+ client := newTestConnectedClientWithTransport(t, ctx, n, transport, "42")
+ rwWrapper := testReplyWriterWrapper()
+ err := client.handleSubscribe(&protocol.SubscribeRequest{
+ Channel: "test_channel",
+ Delta: string(DeltaTypeFossil),
+ }, &protocol.Command{Id: 1}, time.Now(), rwWrapper.rw)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(rwWrapper.replies))
+ require.Nil(t, rwWrapper.replies[0].Error)
+ res := extractSubscribeResult(rwWrapper.replies)
+ require.False(t, res.Delta)
+}
+
+func TestClientSubscribeUnknownDelta(t *testing.T) {
+ n := deltaTestNode()
+ defer func() { _ = n.Shutdown(context.Background()) }()
+
+ ctx, cancelFn := context.WithCancel(context.Background())
+ transport := newTestTransport(cancelFn)
+ transport.sink = make(chan []byte, 100)
+ transport.setProtocolType(ProtocolTypeJSON)
+ transport.setProtocolVersion(ProtocolVersion2)
+ client := newTestConnectedClientWithTransport(t, ctx, n, transport, "42")
+ rwWrapper := testReplyWriterWrapper()
+ err := client.handleSubscribe(&protocol.SubscribeRequest{
+ Channel: "test_channel",
+ Delta: "invalid",
+ }, &protocol.Command{Id: 1}, time.Now(), rwWrapper.rw)
+ require.Equal(t, DisconnectBadRequest, err)
+}
+
func testUnexpectedOffsetEpochProtocolV2(t *testing.T, offset uint64, epoch string) {
t.Parallel()
broker := NewTestBroker()
@@ -676,9 +716,9 @@ func testUnexpectedOffsetEpochProtocolV2(t *testing.T, offset uint64, epoch stri
}, &protocol.Command{}, time.Now(), rwWrapper.rw)
require.NoError(t, err)
- err = node.handlePublication("test", &Publication{
+ err = node.handlePublication("test", StreamPosition{offset, epoch}, &Publication{
Offset: offset,
- }, StreamPosition{offset, epoch})
+ }, nil, nil)
require.NoError(t, err)
select {
@@ -1503,7 +1543,7 @@ func TestClientPublishNotAvailable(t *testing.T) {
type testBrokerEventHandler struct {
// Publication must register callback func to handle Publications received.
- HandlePublicationFunc func(ch string, pub *Publication, sp StreamPosition) error
+ HandlePublicationFunc func(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error
// Join must register callback func to handle Join messages received.
HandleJoinFunc func(ch string, info *ClientInfo) error
// Leave must register callback func to handle Leave messages received.
@@ -1512,9 +1552,9 @@ type testBrokerEventHandler struct {
HandleControlFunc func([]byte) error
}
-func (b *testBrokerEventHandler) HandlePublication(ch string, pub *Publication, sp StreamPosition) error {
+func (b *testBrokerEventHandler) HandlePublication(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error {
if b.HandlePublicationFunc != nil {
- return b.HandlePublicationFunc(ch, pub, sp)
+ return b.HandlePublicationFunc(ch, pub, sp, prevPub)
}
return nil
}
@@ -1560,7 +1600,7 @@ func TestClientPublishHandler(t *testing.T) {
connectClientV2(t, client)
node.broker.(*MemoryBroker).eventHandler = &testBrokerEventHandler{
- HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error {
+ HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error {
var msg testClientMessage
err := json.Unmarshal(pub.Data, &msg)
require.NoError(t, err)
@@ -2995,7 +3035,7 @@ func TestClientCheckPosition(t *testing.T) {
}
node.mu.Unlock()
- // no recover.
+ // no flagPositioning.
got := client.checkPosition(300*time.Second, "channel", ChannelContext{})
require.True(t, got)
@@ -3004,75 +3044,6 @@ func TestClientCheckPosition(t *testing.T) {
require.True(t, got)
}
-func TestClientIsValidPosition(t *testing.T) {
- node := defaultTestNode()
- defer func() { _ = node.Shutdown(context.Background()) }()
-
- client := newTestClient(t, node, "42")
-
- node.mu.Lock()
- node.nowTimeGetter = func() time.Time {
- return time.Unix(200, 0)
- }
- node.mu.Unlock()
-
- client.channels = map[string]ChannelContext{
- "example": {
- flags: flagSubscribed,
- positionCheckTime: 50,
- streamPosition: StreamPosition{
- Offset: 20,
- Epoch: "test",
- },
- },
- }
-
- got := client.isValidPosition(StreamPosition{
- Offset: 20,
- Epoch: "test",
- }, 200, "example")
- require.True(t, got)
- require.Equal(t, int64(200), client.channels["example"].positionCheckTime)
-
- got = client.isValidPosition(StreamPosition{
- Offset: 19,
- Epoch: "test",
- }, 210, "example")
- require.True(t, got)
- require.Equal(t, int64(210), client.channels["example"].positionCheckTime)
-
- got = client.isValidPosition(StreamPosition{
- Offset: 21,
- Epoch: "test",
- }, 220, "example")
- require.False(t, got)
- require.Equal(t, int64(210), client.channels["example"].positionCheckTime)
-
- client.channels = map[string]ChannelContext{
- "example": {
- positionCheckTime: 50,
- streamPosition: StreamPosition{
- Offset: 20,
- Epoch: "test",
- },
- },
- }
- // no subscribed flag.
- got = client.isValidPosition(StreamPosition{
- Offset: 21,
- Epoch: "test",
- }, 220, "example")
- require.True(t, got)
-
- _ = client.close(DisconnectConnectionClosed)
- // closed client.
- got = client.isValidPosition(StreamPosition{
- Offset: 21,
- Epoch: "test",
- }, 220, "example")
- require.True(t, got)
-}
-
func TestErrLogLevel(t *testing.T) {
require.Equal(t, LogLevelInfo, errLogLevel(ErrorNotAvailable))
require.Equal(t, LogLevelError, errLogLevel(errors.New("boom")))
@@ -3080,7 +3051,7 @@ func TestErrLogLevel(t *testing.T) {
func errLogLevel(err error) LogLevel {
logLevel := LogLevelInfo
- if err != ErrorNotAvailable {
+ if !errors.Is(err, ErrorNotAvailable) {
logLevel = LogLevelError
}
return logLevel
diff --git a/config.go b/config.go
index 5de49aab..8bf247b1 100644
--- a/config.go
+++ b/config.go
@@ -108,6 +108,12 @@ type Config struct {
// function for extracting channel_namespace label for transport_messages_received and
// transport_messages_received_size.
ChannelNamespaceLabelForTransportMessagesReceived bool
+
+ // GetChannelMediumOptions is a way to provide ChannelMediumOptions for specific channel.
+ // This function is called each time new channel appears on the Node. If it returns false
+ // then no medium layer will be used for the channel.
+ // See the doc comment for ChannelMediumOptions for more details about channel medium concept.
+ GetChannelMediumOptions func(channel string) (ChannelMediumOptions, bool)
}
const (
diff --git a/events.go b/events.go
index 7e88b9fb..0fe10e98 100644
--- a/events.go
+++ b/events.go
@@ -358,6 +358,25 @@ type HistoryHandler func(HistoryEvent, HistoryCallback)
// internal state. Returning a copy is important to avoid data races.
type StateSnapshotHandler func() (any, error)
+// CacheEmptyEvent is issued when recovery mode is used but Centrifuge can't
+// find Publication in history to recover from. This event allows application
+// to decide what to do in this case – it's possible to populate the cache by
+// sending actual data to a channel.
+type CacheEmptyEvent struct {
+ Channel string
+}
+
+// CacheEmptyReply contains fields determining the reaction on cache empty event.
+type CacheEmptyReply struct {
+ // Populated when set to true tells Centrifuge that cache was populated and
+ // in that case Centrifuge will try to recover missed Publication from history
+ // one more time.
+ Populated bool
+}
+
+// CacheEmptyHandler allows setting cache empty handler function.
+type CacheEmptyHandler func(CacheEmptyEvent) (CacheEmptyReply, error)
+
// SurveyEvent with Op and Data of survey.
type SurveyEvent struct {
Op string
diff --git a/go.mod b/go.mod
index 15009aa1..f0b13626 100644
--- a/go.mod
+++ b/go.mod
@@ -5,11 +5,12 @@ go 1.21
require (
github.com/FZambia/eagle v0.1.0
github.com/Yiling-J/theine-go v0.3.2
- github.com/centrifugal/protocol v0.12.1
+ github.com/centrifugal/protocol v0.13.0
github.com/google/uuid v1.6.0
github.com/prometheus/client_golang v1.19.1
- github.com/redis/rueidis v1.0.37-0.20240510165047-ebd66b7de128
+ github.com/redis/rueidis v1.0.37
github.com/segmentio/encoding v0.4.0
+ github.com/shadowspore/fossil-delta v0.0.0-20240102155221-e3a8590b820b
github.com/stretchr/testify v1.9.0
golang.org/x/sync v0.7.0
google.golang.org/protobuf v1.34.1
diff --git a/go.sum b/go.sum
index c0de5171..23b065e4 100644
--- a/go.sum
+++ b/go.sum
@@ -4,8 +4,8 @@ github.com/Yiling-J/theine-go v0.3.2 h1:XcSdMPV9DwBD9gqqSxbBfVJnP8CCiqNSqp3C6Ypm
github.com/Yiling-J/theine-go v0.3.2/go.mod h1:ygLXqrWPZT/a+PzK5hQ0+a6gu0lpAY5IudTcgnPleqI=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/centrifugal/protocol v0.12.1 h1:hGbIl9Y0UbVsESgLcsqgZ7duwEnrZebFUYdu5Opwzgo=
-github.com/centrifugal/protocol v0.12.1/go.mod h1:5Z0SuNdXEt83Fkoi34BCyY23p1P8+zQakQS6/BfJHak=
+github.com/centrifugal/protocol v0.13.0 h1:3j9CWlbML5O9OlhLmSPWgptby0hDn4pQC9W+q6UiQQo=
+github.com/centrifugal/protocol v0.13.0/go.mod h1:lM54PGU/u5WupYSb755Zv6tZ2ju1SqNKCp6A4s0DeG4=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -38,14 +38,16 @@ github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSz
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
-github.com/redis/rueidis v1.0.37-0.20240510165047-ebd66b7de128 h1:wjJwFyRE8EYJVASGhwnQ7oI2uPdRrCGuPKiI8kpgGLE=
-github.com/redis/rueidis v1.0.37-0.20240510165047-ebd66b7de128/go.mod h1:bnbkk4+CkXZgDPEbUtSos/o55i4RhFYYesJ4DS2zmq0=
+github.com/redis/rueidis v1.0.37 h1:RBb1s97wcvlK94YZvyh+B/c6zOkc0ssamlfWRGfRlaw=
+github.com/redis/rueidis v1.0.37/go.mod h1:bnbkk4+CkXZgDPEbUtSos/o55i4RhFYYesJ4DS2zmq0=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/segmentio/encoding v0.4.0 h1:MEBYvRqiUB2nfR2criEXWqwdY6HJOUrCn5hboVOVmy8=
github.com/segmentio/encoding v0.4.0/go.mod h1:/d03Cd8PoaDeceuhUUUQWjU0KhWjrmYrWPgtJHYZSnI=
+github.com/shadowspore/fossil-delta v0.0.0-20240102155221-e3a8590b820b h1:SCYeryKXBVdW38167VyumGakH+7E4Wxe6b/zxmQxwyM=
+github.com/shadowspore/fossil-delta v0.0.0-20240102155221-e3a8590b820b/go.mod h1:daNLfX/GJKuZyN4HkMf0h8dVmTmgRbBSkd9bFQyGNIo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
diff --git a/hub.go b/hub.go
index 7759b1d9..d0f7c067 100644
--- a/hub.go
+++ b/hub.go
@@ -5,7 +5,11 @@ import (
"io"
"sync"
+ "github.com/centrifugal/centrifuge/internal/convert"
+
"github.com/centrifugal/protocol"
+ "github.com/segmentio/encoding/json"
+ fdelta "github.com/shadowspore/fossil-delta"
)
const numHubShards = 64
@@ -86,15 +90,15 @@ func (h *Hub) remove(c *Client) error {
// Connections returns all user connections to the current Node.
func (h *Hub) Connections() map[string]*Client {
- conns := make(map[string]*Client)
+ connections := make(map[string]*Client)
for _, shard := range h.connShards {
shard.mu.RLock()
- for clientID, c := range shard.conns {
- conns[clientID] = c
+ for clientID, c := range shard.clients {
+ connections[clientID] = c
}
shard.mu.RUnlock()
}
- return conns
+ return connections
}
// UserConnections returns all user connections to the current Node.
@@ -118,8 +122,8 @@ func (h *Hub) disconnect(userID string, disconnect Disconnect, clientID, session
return h.connShards[index(userID, numHubShards)].disconnect(userID, disconnect, clientID, sessionID, whitelist)
}
-func (h *Hub) addSub(ch string, c *Client) (bool, error) {
- return h.subShards[index(ch, numHubShards)].addSub(ch, c)
+func (h *Hub) addSub(ch string, sub subInfo) (bool, error) {
+ return h.subShards[index(ch, numHubShards)].addSub(ch, sub)
}
// removeSub removes connection from clientHub subscriptions registry.
@@ -131,9 +135,13 @@ func (h *Hub) removeSub(ch string, c *Client) (bool, error) {
// Usually this is NOT what you need since in most cases you should use Node.Publish method which
// uses a Broker to deliver publications to all Nodes in a cluster and maintains publication history
// in a channel with incremental offset. By calling BroadcastPublication messages will only be sent
-// to the current node subscribers without any defined offset semantics.
+// to the current node subscribers without any defined offset semantics, without delta support.
func (h *Hub) BroadcastPublication(ch string, pub *Publication, sp StreamPosition) error {
- return h.subShards[index(ch, numHubShards)].broadcastPublication(ch, pubToProto(pub), sp)
+ return h.broadcastPublication(ch, sp, pub, nil, nil)
+}
+
+func (h *Hub) broadcastPublication(ch string, sp StreamPosition, pub, prevPub, localPrevPub *Publication) error {
+ return h.subShards[index(ch, numHubShards)].broadcastPublication(ch, sp, pub, prevPub, localPrevPub)
}
// broadcastJoin sends message to all clients subscribed on channel.
@@ -201,15 +209,15 @@ func (h *Hub) NumChannels() int {
type connShard struct {
mu sync.RWMutex
// match client ID with actual client connection.
- conns map[string]*Client
+ clients map[string]*Client
// registry to hold active client connections grouped by user.
users map[string]map[string]struct{}
}
func newConnShard() *connShard {
return &connShard{
- conns: make(map[string]*Client),
- users: make(map[string]map[string]struct{}),
+ clients: make(map[string]*Client),
+ users: make(map[string]map[string]struct{}),
}
}
@@ -225,8 +233,8 @@ func (h *connShard) shutdown(ctx context.Context, sem chan struct{}) error {
h.mu.RLock()
// At this moment node won't accept new client connections, so we can
// safely copy existing clients and release lock.
- clients := make([]*Client, 0, len(h.conns))
- for _, client := range h.conns {
+ clients := make([]*Client, 0, len(h.clients))
+ for _, client := range h.clients {
clients = append(clients, client)
}
h.mu.RUnlock()
@@ -394,16 +402,16 @@ func (h *connShard) userConnections(userID string) map[string]*Client {
return map[string]*Client{}
}
- conns := make(map[string]*Client, len(userConnections))
+ connections := make(map[string]*Client, len(userConnections))
for uid := range userConnections {
- c, ok := h.conns[uid]
+ c, ok := h.clients[uid]
if !ok {
continue
}
- conns[uid] = c
+ connections[uid] = c
}
- return conns
+ return connections
}
// Add connection into clientHub connections registry.
@@ -414,7 +422,7 @@ func (h *connShard) add(c *Client) error {
uid := c.ID()
user := c.UserID()
- h.conns[uid] = c
+ h.clients[uid] = c
if _, ok := h.users[user]; !ok {
h.users[user] = make(map[string]struct{})
@@ -431,7 +439,7 @@ func (h *connShard) remove(c *Client) error {
uid := c.ID()
user := c.UserID()
- delete(h.conns, uid)
+ delete(h.clients, uid)
// try to find connection to delete, return early if not found.
if _, ok := h.users[user]; !ok {
@@ -470,32 +478,49 @@ func (h *connShard) NumUsers() int {
return len(h.users)
}
+type DeltaType string
+
+const (
+ deltaTypeNone DeltaType = ""
+ // DeltaTypeFossil is Fossil delta encoding. See https://fossil-scm.org/home/doc/tip/www/delta_encoder_algorithm.wiki.
+ DeltaTypeFossil DeltaType = "fossil"
+)
+
+var stringToDeltaType = map[string]DeltaType{
+ "fossil": DeltaTypeFossil,
+}
+
+type subInfo struct {
+ client *Client
+ deltaType DeltaType
+}
+
type subShard struct {
mu sync.RWMutex
- // registry to hold active subscriptions of clients to channels.
- subs map[string]map[string]*Client
+ // registry to hold active subscriptions of clients to channels with some additional info.
+ subs map[string]map[string]subInfo
logger *logger
}
func newSubShard(logger *logger) *subShard {
return &subShard{
- subs: make(map[string]map[string]*Client),
+ subs: make(map[string]map[string]subInfo),
logger: logger,
}
}
// addSub adds connection into clientHub subscriptions registry.
-func (h *subShard) addSub(ch string, c *Client) (bool, error) {
+func (h *subShard) addSub(ch string, sub subInfo) (bool, error) {
h.mu.Lock()
defer h.mu.Unlock()
- uid := c.ID()
+ uid := sub.client.ID()
_, ok := h.subs[ch]
if !ok {
- h.subs[ch] = make(map[string]*Client)
+ h.subs[ch] = make(map[string]subInfo)
}
- h.subs[ch][uid] = c
+ h.subs[ch][uid] = sub
if !ok {
return true, nil
}
@@ -535,8 +560,95 @@ type encodeError struct {
error error
}
-// broadcastPublication sends message to all clients subscribed on channel.
-func (h *subShard) broadcastPublication(channel string, pub *protocol.Publication, sp StreamPosition) error {
+type preparedKey struct {
+ ProtocolType protocol.Type
+ Unidirectional bool
+ DeltaType DeltaType
+}
+
+type preparedData struct {
+ fullData []byte
+ brokerDeltaData []byte
+ localDeltaData []byte
+ deltaSub bool
+}
+
+func getDeltaPub(prevPub *Publication, fullPub *protocol.Publication, key preparedKey) *protocol.Publication {
+ deltaPub := fullPub
+ if prevPub != nil && key.DeltaType == DeltaTypeFossil {
+ patch := fdelta.Create(prevPub.Data, fullPub.Data)
+ if key.ProtocolType == protocol.TypeJSON {
+ deltaPub = &protocol.Publication{
+ Offset: fullPub.Offset,
+ Data: json.Escape(convert.BytesToString(patch)),
+ Info: fullPub.Info,
+ Tags: fullPub.Tags,
+ Delta: true,
+ }
+ } else {
+ deltaPub = &protocol.Publication{
+ Offset: fullPub.Offset,
+ Data: patch,
+ Info: fullPub.Info,
+ Tags: fullPub.Tags,
+ Delta: true,
+ }
+ }
+ } else if prevPub == nil && key.ProtocolType == protocol.TypeJSON && key.DeltaType == DeltaTypeFossil {
+ // In JSON and Fossil case we need to send full state in JSON string format.
+ deltaPub = &protocol.Publication{
+ Offset: fullPub.Offset,
+ Data: json.Escape(convert.BytesToString(fullPub.Data)),
+ Info: fullPub.Info,
+ Tags: fullPub.Tags,
+ }
+ }
+ return deltaPub
+}
+
+func getDeltaData(sub subInfo, key preparedKey, channel string, deltaPub *protocol.Publication, jsonEncodeErr *encodeError) ([]byte, error) {
+ var deltaData []byte
+ if key.ProtocolType == protocol.TypeJSON {
+ if sub.client.transport.Unidirectional() {
+ push := &protocol.Push{Channel: channel, Pub: deltaPub}
+ var err error
+ deltaData, err = protocol.DefaultJsonPushEncoder.Encode(push)
+ if err != nil {
+ *jsonEncodeErr = encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err}
+ }
+ } else {
+ push := &protocol.Push{Channel: channel, Pub: deltaPub}
+ var err error
+ deltaData, err = protocol.DefaultJsonReplyEncoder.Encode(&protocol.Reply{Push: push})
+ if err != nil {
+ *jsonEncodeErr = encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err}
+ }
+ }
+ } else if key.ProtocolType == protocol.TypeProtobuf {
+ if sub.client.transport.Unidirectional() {
+ push := &protocol.Push{Channel: channel, Pub: deltaPub}
+ var err error
+ deltaData, err = protocol.DefaultProtobufPushEncoder.Encode(push)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ push := &protocol.Push{Channel: channel, Pub: deltaPub}
+ var err error
+ deltaData, err = protocol.DefaultProtobufReplyEncoder.Encode(&protocol.Reply{Push: push})
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return deltaData, nil
+}
+
+// broadcastPublication sends message to all clients subscribed on a channel.
+func (h *subShard) broadcastPublication(channel string, sp StreamPosition, pub, prevPub, localPrevPub *Publication) error {
+ fullPub := pubToProto(pub)
+ preparedDataByKey := make(map[preparedKey]preparedData)
+
h.mu.RLock()
defer h.mu.RUnlock()
@@ -546,70 +658,104 @@ func (h *subShard) broadcastPublication(channel string, pub *protocol.Publicatio
}
var (
- jsonReply []byte
- protobufReply []byte
-
- jsonPush []byte
- protobufPush []byte
-
jsonEncodeErr *encodeError
)
- for _, c := range channelSubscribers {
- protoType := c.Transport().Protocol().toProto()
- if protoType == protocol.TypeJSON {
- if jsonEncodeErr != nil {
- go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c)
- continue
+ for _, sub := range channelSubscribers {
+ key := preparedKey{
+ ProtocolType: sub.client.Transport().Protocol().toProto(),
+ Unidirectional: sub.client.transport.Unidirectional(),
+ DeltaType: sub.deltaType,
+ }
+ prepValue, prepDataFound := preparedDataByKey[key]
+ if !prepDataFound {
+ var brokerDeltaPub *protocol.Publication
+ if fullPub.Offset > 0 {
+ brokerDeltaPub = getDeltaPub(prevPub, fullPub, key)
}
- if c.transport.Unidirectional() {
- if jsonPush == nil {
- push := &protocol.Push{Channel: channel, Pub: pub}
+ localDeltaPub := getDeltaPub(localPrevPub, fullPub, key)
+
+ var brokerDeltaData []byte
+ var localDeltaData []byte
+ if key.DeltaType != deltaTypeNone {
+ var err error
+ brokerDeltaData, err = getDeltaData(sub, key, channel, brokerDeltaPub, jsonEncodeErr)
+ if err != nil {
+ return err
+ }
+ localDeltaData, err = getDeltaData(sub, key, channel, localDeltaPub, jsonEncodeErr)
+ if err != nil {
+ return err
+ }
+ }
+
+ var fullData []byte
+
+ if key.ProtocolType == protocol.TypeJSON {
+ if sub.client.transport.Unidirectional() {
+ pubToUse := fullPub
+ if key.ProtocolType == protocol.TypeJSON && key.DeltaType == DeltaTypeFossil {
+ pubToUse = &protocol.Publication{
+ Offset: fullPub.Offset,
+ Data: json.Escape(convert.BytesToString(fullPub.Data)),
+ Info: fullPub.Info,
+ Tags: fullPub.Tags,
+ }
+ }
+ push := &protocol.Push{Channel: channel, Pub: pubToUse}
var err error
- jsonPush, err = protocol.DefaultJsonPushEncoder.Encode(push)
+ fullData, err = protocol.DefaultJsonPushEncoder.Encode(push)
if err != nil {
- jsonEncodeErr = &encodeError{client: c.ID(), user: c.UserID(), error: err}
- go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c)
- continue
+ jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err}
}
- }
- _ = c.writePublication(channel, pub, jsonPush, sp)
- } else {
- if jsonReply == nil {
- push := &protocol.Push{Channel: channel, Pub: pub}
+ } else {
+ pubToUse := fullPub
+ if key.ProtocolType == protocol.TypeJSON && key.DeltaType == DeltaTypeFossil {
+ pubToUse = &protocol.Publication{
+ Offset: fullPub.Offset,
+ Data: json.Escape(convert.BytesToString(fullPub.Data)),
+ Info: fullPub.Info,
+ Tags: fullPub.Tags,
+ }
+ }
+ push := &protocol.Push{Channel: channel, Pub: pubToUse}
var err error
- jsonReply, err = protocol.DefaultJsonReplyEncoder.Encode(&protocol.Reply{Push: push})
+ fullData, err = protocol.DefaultJsonReplyEncoder.Encode(&protocol.Reply{Push: push})
if err != nil {
- jsonEncodeErr = &encodeError{client: c.ID(), user: c.UserID(), error: err}
- go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c)
- continue
+ jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err}
}
}
- _ = c.writePublication(channel, pub, jsonReply, sp)
- }
- } else if protoType == protocol.TypeProtobuf {
- if c.transport.Unidirectional() {
- if protobufPush == nil {
- push := &protocol.Push{Channel: channel, Pub: pub}
+ } else if key.ProtocolType == protocol.TypeProtobuf {
+ if sub.client.transport.Unidirectional() {
+ push := &protocol.Push{Channel: channel, Pub: fullPub}
var err error
- protobufPush, err = protocol.DefaultProtobufPushEncoder.Encode(push)
+ fullData, err = protocol.DefaultProtobufPushEncoder.Encode(push)
if err != nil {
return err
}
- }
- _ = c.writePublication(channel, pub, protobufPush, sp)
- } else {
- if protobufReply == nil {
- push := &protocol.Push{Channel: channel, Pub: pub}
+ } else {
+ push := &protocol.Push{Channel: channel, Pub: fullPub}
var err error
- protobufReply, err = protocol.DefaultProtobufReplyEncoder.Encode(&protocol.Reply{Push: push})
+ fullData, err = protocol.DefaultProtobufReplyEncoder.Encode(&protocol.Reply{Push: push})
if err != nil {
return err
}
}
- _ = c.writePublication(channel, pub, protobufReply, sp)
}
+
+ prepValue = preparedData{
+ fullData: fullData,
+ brokerDeltaData: brokerDeltaData,
+ localDeltaData: localDeltaData,
+ deltaSub: key.DeltaType != deltaTypeNone,
+ }
+ preparedDataByKey[key] = prepValue
+ }
+ if sub.client.transport.Protocol() == ProtocolTypeJSON && jsonEncodeErr != nil {
+ go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(sub.client)
+ continue
}
+ _ = sub.client.writePublication(channel, fullPub, prepValue, sp)
}
if jsonEncodeErr != nil && h.logger.enabled(LogLevelWarn) {
// Log that we had clients with inappropriate protocol, and point to the first such client.
@@ -643,40 +789,40 @@ func (h *subShard) broadcastJoin(channel string, join *protocol.Join) error {
jsonEncodeErr *encodeError
)
- for _, c := range channelSubscribers {
- protoType := c.Transport().Protocol().toProto()
+ for _, sub := range channelSubscribers {
+ protoType := sub.client.Transport().Protocol().toProto()
if protoType == protocol.TypeJSON {
if jsonEncodeErr != nil {
- go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c)
+ go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(sub.client)
continue
}
- if c.transport.Unidirectional() {
+ if sub.client.transport.Unidirectional() {
if jsonPush == nil {
push := &protocol.Push{Channel: channel, Join: join}
var err error
jsonPush, err = protocol.DefaultJsonPushEncoder.Encode(push)
if err != nil {
- jsonEncodeErr = &encodeError{client: c.ID(), user: c.UserID(), error: err}
- go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c)
+ jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err}
+ go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(sub.client)
continue
}
}
- _ = c.writeJoin(channel, join, jsonPush)
+ _ = sub.client.writeJoin(channel, join, jsonPush)
} else {
if jsonReply == nil {
push := &protocol.Push{Channel: channel, Join: join}
var err error
jsonReply, err = protocol.DefaultJsonReplyEncoder.Encode(&protocol.Reply{Push: push})
if err != nil {
- jsonEncodeErr = &encodeError{client: c.ID(), user: c.UserID(), error: err}
- go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c)
+ jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err}
+ go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(sub.client)
continue
}
}
- _ = c.writeJoin(channel, join, jsonReply)
+ _ = sub.client.writeJoin(channel, join, jsonReply)
}
} else if protoType == protocol.TypeProtobuf {
- if c.transport.Unidirectional() {
+ if sub.client.transport.Unidirectional() {
if protobufPush == nil {
push := &protocol.Push{Channel: channel, Join: join}
var err error
@@ -685,7 +831,7 @@ func (h *subShard) broadcastJoin(channel string, join *protocol.Join) error {
return err
}
}
- _ = c.writeJoin(channel, join, protobufPush)
+ _ = sub.client.writeJoin(channel, join, protobufPush)
} else {
if protobufReply == nil {
push := &protocol.Push{Channel: channel, Join: join}
@@ -695,7 +841,7 @@ func (h *subShard) broadcastJoin(channel string, join *protocol.Join) error {
return err
}
}
- _ = c.writeJoin(channel, join, protobufReply)
+ _ = sub.client.writeJoin(channel, join, protobufReply)
}
}
}
@@ -731,40 +877,40 @@ func (h *subShard) broadcastLeave(channel string, leave *protocol.Leave) error {
jsonEncodeErr *encodeError
)
- for _, c := range channelSubscribers {
- protoType := c.Transport().Protocol().toProto()
+ for _, sub := range channelSubscribers {
+ protoType := sub.client.Transport().Protocol().toProto()
if protoType == protocol.TypeJSON {
if jsonEncodeErr != nil {
- go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c)
+ go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(sub.client)
continue
}
- if c.transport.Unidirectional() {
+ if sub.client.transport.Unidirectional() {
if jsonPush == nil {
push := &protocol.Push{Channel: channel, Leave: leave}
var err error
jsonPush, err = protocol.DefaultJsonPushEncoder.Encode(push)
if err != nil {
- jsonEncodeErr = &encodeError{client: c.ID(), user: c.UserID(), error: err}
- go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c)
+ jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err}
+ go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(sub.client)
continue
}
}
- _ = c.writeLeave(channel, leave, jsonPush)
+ _ = sub.client.writeLeave(channel, leave, jsonPush)
} else {
if jsonReply == nil {
push := &protocol.Push{Channel: channel, Leave: leave}
var err error
jsonReply, err = protocol.DefaultJsonReplyEncoder.Encode(&protocol.Reply{Push: push})
if err != nil {
- jsonEncodeErr = &encodeError{client: c.ID(), user: c.UserID(), error: err}
- go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c)
+ jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err}
+ go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(sub.client)
continue
}
}
- _ = c.writeLeave(channel, leave, jsonReply)
+ _ = sub.client.writeLeave(channel, leave, jsonReply)
}
} else if protoType == protocol.TypeProtobuf {
- if c.transport.Unidirectional() {
+ if sub.client.transport.Unidirectional() {
if protobufPush == nil {
push := &protocol.Push{Channel: channel, Leave: leave}
var err error
@@ -773,7 +919,7 @@ func (h *subShard) broadcastLeave(channel string, leave *protocol.Leave) error {
return err
}
}
- _ = c.writeLeave(channel, leave, protobufPush)
+ _ = sub.client.writeLeave(channel, leave, protobufPush)
} else {
if protobufReply == nil {
push := &protocol.Push{Channel: channel, Leave: leave}
@@ -783,7 +929,7 @@ func (h *subShard) broadcastLeave(channel string, leave *protocol.Leave) error {
return err
}
}
- _ = c.writeLeave(channel, leave, protobufReply)
+ _ = sub.client.writeLeave(channel, leave, protobufReply)
}
}
}
@@ -834,9 +980,9 @@ func (h *subShard) Channels() []string {
func (h *subShard) NumSubscribers(ch string) int {
h.mu.RLock()
defer h.mu.RUnlock()
- conns, ok := h.subs[ch]
+ clients, ok := h.subs[ch]
if !ok {
return 0
}
- return len(conns)
+ return len(clients)
}
diff --git a/hub_test.go b/hub_test.go
index 56d45c94..d06e9f67 100644
--- a/hub_test.go
+++ b/hub_test.go
@@ -10,6 +10,11 @@ import (
"testing"
"time"
+ "github.com/centrifugal/centrifuge/internal/convert"
+
+ "github.com/centrifugal/protocol"
+ "github.com/segmentio/encoding/json"
+ fdelta "github.com/shadowspore/fossil-delta"
"github.com/stretchr/testify/require"
)
@@ -444,10 +449,10 @@ func TestHubBroadcastPublication(t *testing.T) {
protocolVersion ProtocolVersion
uni bool
}{
- {name: "JSON-V2", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2},
- {name: "Protobuf-V2", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2},
- {name: "JSON-V2-uni", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2, uni: true},
- {name: "Protobuf-V2-uni", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2, uni: true},
+ {name: "JSON", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2},
+ {name: "Protobuf", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2},
+ {name: "JSON-uni", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2, uni: true},
+ {name: "Protobuf-uni", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2, uni: true},
}
for _, tc := range tcs {
@@ -496,6 +501,298 @@ func TestHubBroadcastPublication(t *testing.T) {
}
}
+func deltaTestNode() *Node {
+ n := defaultNodeNoHandlers()
+ n.OnConnect(func(client *Client) {
+ client.OnSubscribe(func(e SubscribeEvent, cb SubscribeCallback) {
+ cb(SubscribeReply{
+ Options: SubscribeOptions{
+ EnableRecovery: true,
+ RecoveryMode: RecoveryModeCache,
+ AllowedDeltaTypes: []DeltaType{DeltaTypeFossil},
+ },
+ }, nil)
+ })
+ client.OnPublish(func(e PublishEvent, cb PublishCallback) {
+ cb(PublishReply{}, nil)
+ })
+ })
+ return n
+}
+
+func deltaTestNodeNoRecovery() *Node {
+ n := defaultNodeNoHandlers()
+ n.OnConnect(func(client *Client) {
+ client.OnSubscribe(func(e SubscribeEvent, cb SubscribeCallback) {
+ cb(SubscribeReply{
+ Options: SubscribeOptions{
+ AllowedDeltaTypes: []DeltaType{DeltaTypeFossil},
+ },
+ }, nil)
+ })
+ client.OnPublish(func(e PublishEvent, cb PublishCallback) {
+ cb(PublishReply{}, nil)
+ })
+ })
+ return n
+}
+
+func newTestSubscribedClientWithTransportDelta(t *testing.T, ctx context.Context, n *Node, transport Transport, userID, chanID string, deltaType DeltaType) *Client {
+ client := newTestConnectedClientWithTransport(t, ctx, n, transport, userID)
+ subscribeClientDelta(t, client, chanID, deltaType)
+ require.True(t, n.hub.NumSubscribers(chanID) > 0)
+ require.Contains(t, client.channels, chanID)
+ return client
+}
+
+func subscribeClientDelta(t testing.TB, client *Client, ch string, deltaType DeltaType) *protocol.SubscribeResult {
+ rwWrapper := testReplyWriterWrapper()
+ err := client.handleSubscribe(&protocol.SubscribeRequest{
+ Channel: ch,
+ Delta: string(deltaType),
+ }, &protocol.Command{Id: 1}, time.Now(), rwWrapper.rw)
+ require.NoError(t, err)
+ require.Nil(t, rwWrapper.replies[0].Error)
+ return rwWrapper.replies[0].Subscribe
+}
+
+func TestHubBroadcastPublicationDelta(t *testing.T) {
+ tcs := []struct {
+ name string
+ protocolType ProtocolType
+ protocolVersion ProtocolVersion
+ uni bool
+ }{
+ {name: "JSON", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2},
+ {name: "Protobuf", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2},
+ {name: "JSON-uni", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2, uni: true},
+ {name: "Protobuf-uni", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2, uni: true},
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ n := deltaTestNode()
+ n.config.GetChannelNamespaceLabel = func(channel string) string {
+ return channel
+ }
+ defer func() { _ = n.Shutdown(context.Background()) }()
+
+ ctx, cancelFn := context.WithCancel(context.Background())
+ transport := newTestTransport(cancelFn)
+ transport.sink = make(chan []byte, 100)
+ transport.setProtocolType(tc.protocolType)
+ transport.setProtocolVersion(tc.protocolVersion)
+ transport.setUnidirectional(tc.uni)
+ newTestSubscribedClientWithTransportDelta(
+ t, ctx, n, transport, "42", "test_channel", DeltaTypeFossil)
+
+ res, err := n.History("test_channel")
+ require.NoError(t, err)
+
+ err = n.hub.broadcastPublication(
+ "test_channel",
+ StreamPosition{Offset: 1, Epoch: res.StreamPosition.Epoch},
+ &Publication{Data: []byte(`{"data": "broadcast_data"}`), Offset: 1},
+ nil,
+ nil,
+ )
+ require.NoError(t, err)
+
+ LOOP:
+ for {
+ select {
+ case data := <-transport.sink:
+ if strings.Contains(string(data), "broadcast_data") {
+ break LOOP
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("no data in sink")
+ }
+ }
+
+ // Broadcast same data to existing channel.
+ err = n.hub.broadcastPublication(
+ "test_channel",
+ StreamPosition{Offset: 2, Epoch: res.StreamPosition.Epoch},
+ &Publication{Data: []byte(`{"data": "broadcast_data"}`), Offset: 2},
+ &Publication{Data: []byte(`{"data": "broadcast_data"}`), Offset: 1},
+ nil,
+ )
+ require.NoError(t, err)
+
+ LOOP2:
+ for {
+ select {
+ case data := <-transport.sink:
+ if strings.Contains(string(data), "broadcast_data") {
+ require.Fail(t, "should not receive same data twice - delta expected")
+ }
+ break LOOP2
+ case <-time.After(2 * time.Second):
+ t.Fatal("no data in sink 2")
+ }
+ }
+ })
+ }
+}
+
+func TestHubBroadcastPublicationDeltaAtMostOnce(t *testing.T) {
+ tcs := []struct {
+ name string
+ protocolType ProtocolType
+ protocolVersion ProtocolVersion
+ uni bool
+ }{
+ {name: "JSON", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2},
+ {name: "Protobuf", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2},
+ {name: "JSON-uni", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2, uni: true},
+ {name: "Protobuf-uni", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2, uni: true},
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ n := deltaTestNodeNoRecovery()
+ n.config.GetChannelNamespaceLabel = func(channel string) string {
+ return channel
+ }
+ defer func() { _ = n.Shutdown(context.Background()) }()
+
+ ctx, cancelFn := context.WithCancel(context.Background())
+ transport := newTestTransport(cancelFn)
+ transport.sink = make(chan []byte, 100)
+ transport.setProtocolType(tc.protocolType)
+ transport.setProtocolVersion(tc.protocolVersion)
+ transport.setUnidirectional(tc.uni)
+ newTestSubscribedClientWithTransportDelta(
+ t, ctx, n, transport, "42", "test_channel", DeltaTypeFossil)
+
+ res, err := n.History("test_channel")
+ require.NoError(t, err)
+
+ err = n.hub.broadcastPublication(
+ "test_channel",
+ StreamPosition{Offset: 1, Epoch: res.StreamPosition.Epoch},
+ &Publication{Data: []byte(`{"data": "broadcast_data"}`), Offset: 1},
+ nil,
+ nil,
+ )
+ require.NoError(t, err)
+
+ LOOP:
+ for {
+ select {
+ case data := <-transport.sink:
+ if strings.Contains(string(data), "broadcast_data") {
+ break LOOP
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("no data in sink")
+ }
+ }
+
+ // Broadcast same data to existing channel.
+ err = n.hub.broadcastPublication(
+ "test_channel",
+ StreamPosition{Offset: 2, Epoch: res.StreamPosition.Epoch},
+ &Publication{Data: []byte(`{"data": "broadcast_data"}`), Offset: 2},
+ nil,
+ &Publication{Data: []byte(`{"data": "broadcast_data"}`), Offset: 1},
+ )
+ require.NoError(t, err)
+
+ LOOP2:
+ for {
+ select {
+ case data := <-transport.sink:
+ if strings.Contains(string(data), "broadcast_data") {
+ require.Fail(t, "should not receive same data twice - delta expected")
+ }
+ break LOOP2
+ case <-time.After(2 * time.Second):
+ t.Fatal("no data in sink 2")
+ }
+ }
+ })
+ }
+}
+
+func TestHubBroadcastPublicationDeltaAtMostOnceNoOffset(t *testing.T) {
+ tcs := []struct {
+ name string
+ protocolType ProtocolType
+ protocolVersion ProtocolVersion
+ uni bool
+ }{
+ {name: "JSON", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2},
+ {name: "Protobuf", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2},
+ {name: "JSON-uni", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2, uni: true},
+ {name: "Protobuf-uni", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2, uni: true},
+ }
+
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ n := deltaTestNodeNoRecovery()
+ n.config.GetChannelNamespaceLabel = func(channel string) string {
+ return channel
+ }
+ defer func() { _ = n.Shutdown(context.Background()) }()
+
+ ctx, cancelFn := context.WithCancel(context.Background())
+ transport := newTestTransport(cancelFn)
+ transport.sink = make(chan []byte, 100)
+ transport.setProtocolType(tc.protocolType)
+ transport.setProtocolVersion(tc.protocolVersion)
+ transport.setUnidirectional(tc.uni)
+ newTestSubscribedClientWithTransportDelta(
+ t, ctx, n, transport, "42", "test_channel", DeltaTypeFossil)
+
+ err := n.hub.broadcastPublication(
+ "test_channel",
+ StreamPosition{},
+ &Publication{Data: []byte(`{"data": "broadcast_data"}`)},
+ nil,
+ nil,
+ )
+ require.NoError(t, err)
+
+ LOOP:
+ for {
+ select {
+ case data := <-transport.sink:
+ if strings.Contains(string(data), "broadcast_data") {
+ break LOOP
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("no data in sink")
+ }
+ }
+
+ // Broadcast same data to existing channel.
+ err = n.hub.broadcastPublication(
+ "test_channel",
+ StreamPosition{},
+ &Publication{Data: []byte(`{"data": "broadcast_data"}`)},
+ nil,
+ &Publication{Data: []byte(`{"data": "broadcast_data"}`)},
+ )
+ require.NoError(t, err)
+
+ LOOP2:
+ for {
+ select {
+ case data := <-transport.sink:
+ if strings.Contains(string(data), "broadcast_data") {
+ require.Fail(t, "should not receive same data twice - delta expected")
+ }
+ break LOOP2
+ case <-time.After(2 * time.Second):
+ t.Fatal("no data in sink 2")
+ }
+ }
+ })
+ }
+}
+
func TestHubBroadcastJoin(t *testing.T) {
tcs := []struct {
name string
@@ -621,8 +918,8 @@ func TestHubSubscriptions(t *testing.T) {
c, err := newClient(context.Background(), defaultTestNode(), newTestTransport(func() {}))
require.NoError(t, err)
- _, _ = h.addSub("test1", c)
- _, _ = h.addSub("test2", c)
+ _, _ = h.addSub("test1", subInfo{client: c, deltaType: ""})
+ _, _ = h.addSub("test2", subInfo{client: c, deltaType: ""})
require.Equal(t, 2, h.NumChannels())
require.Contains(t, h.Channels(), "test1")
require.Contains(t, h.Channels(), "test2")
@@ -664,7 +961,7 @@ func TestUserConnections(t *testing.T) {
_ = h.add(c)
connections := h.UserConnections(c.UserID())
- require.Equal(t, h.connShards[index(c.UserID(), numHubShards)].conns, connections)
+ require.Equal(t, h.connShards[index(c.UserID(), numHubShards)].clients, connections)
}
func TestHubSharding(t *testing.T) {
@@ -687,7 +984,7 @@ func TestHubSharding(t *testing.T) {
require.NoError(t, err)
_ = n.hub.add(c)
for _, ch := range channels {
- _, _ = n.hub.addSub(ch, c)
+ _, _ = n.hub.addSub(ch, subInfo{client: c, deltaType: ""})
}
}
}
@@ -726,7 +1023,7 @@ func BenchmarkHub_Contention(b *testing.B) {
_ = n.hub.add(c)
clients = append(clients, c)
for _, ch := range channels {
- _, _ = n.hub.addSub(ch, c)
+ _, _ = n.hub.addSub(ch, subInfo{client: c, deltaType: ""})
}
}
@@ -746,7 +1043,7 @@ func BenchmarkHub_Contention(b *testing.B) {
defer wg.Done()
_ = n.hub.BroadcastPublication(channels[(i+numChannels/2)%numChannels], pub, streamPosition)
}()
- _, _ = n.hub.addSub(channels[i%numChannels], clients[i%numClients])
+ _, _ = n.hub.addSub(channels[i%numChannels], subInfo{client: clients[i%numClients], deltaType: ""})
wg.Wait()
}
})
@@ -755,9 +1052,11 @@ func BenchmarkHub_Contention(b *testing.B) {
var broadcastBenches = []struct {
NumSubscribers int
}{
+ {1},
+ {10},
+ {100},
{1000},
{10000},
- {100000},
}
// BenchmarkHub_MassiveBroadcast allows estimating time to broadcast
@@ -768,27 +1067,20 @@ func BenchmarkHub_MassiveBroadcast(b *testing.B) {
for _, tt := range broadcastBenches {
numSubscribers := tt.NumSubscribers
- b.Run(fmt.Sprintf("%d", numSubscribers), func(b *testing.B) {
+ b.Run(fmt.Sprintf("subscribers_%d", numSubscribers), func(b *testing.B) {
b.ReportAllocs()
n := defaultTestNodeBenchmark(b)
- numChannels := 64
- channels := make([]string, 0, numChannels)
-
- for i := 0; i < numChannels; i++ {
- channels = append(channels, "broadcast"+strconv.Itoa(i))
- }
+ channel := "broadcast"
- sink := make(chan []byte, 1024)
+ sink := make(chan []byte, 10000)
for i := 0; i < numSubscribers; i++ {
t := newTestTransport(func() {})
t.setSink(sink)
c := newTestConnectedClientWithTransport(b, context.Background(), n, t, "12")
_ = n.hub.add(c)
- for _, ch := range channels {
- _, _ = n.hub.addSub(ch, c)
- }
+ _, _ = n.hub.addSub(channel, subInfo{client: c, deltaType: ""})
}
b.ResetTimer()
@@ -806,7 +1098,7 @@ func BenchmarkHub_MassiveBroadcast(b *testing.B) {
}
}
}()
- _ = n.hub.BroadcastPublication(channels[i%numChannels], pub, streamPosition)
+ _ = n.hub.broadcastPublication(channel, streamPosition, pub, nil, nil)
wg.Wait()
}
})
@@ -899,3 +1191,124 @@ func TestHubBroadcastInappropriateProtocol_Leave(t *testing.T) {
testFunc(client)
})
}
+
+var testJsonData = []byte(`{
+ "_id":"662fb7df5110d6e8e9942fb2",
+ "index":0,
+ "guid":"a100afc6-fc35-47fd-8e3e-e8e9a81629ec",
+ "isActive":true,
+ "balance":"$2,784.25",
+ "picture":"http://placehold.it/32x32",
+ "age":21,
+ "eyeColor":"green",
+ "name":"Lois Norris",
+ "gender":"female",
+ "company":"ORGANICA",
+ "email":"loisnorris@organica.com",
+ "phone":"+1 (939) 451-2349",
+ "address":"774 Ide Court, Sabillasville, Virginia, 4034",
+ "about":"Cupidatat reprehenderit laboris aute pariatur nulla exercitation. Commodo aliqua cupidatat consectetur aliquip. Id irure nisi qui ullamco culpa reprehenderit nisi sunt consequat ipsum. Velit officia sint id voluptate anim. Sunt duis duis consequat mollit incididunt laborum enim amet ad aliqua esse nulla. Aliqua nulla adipisicing ad aliquip ut. Nostrud mollit ex aute magna culpa ea exercitation qui ex.\r\n",
+ "registered":"2023-02-28T11:09:34 -02:00",
+ "latitude":24.054483,
+ "longitude":38.953522,
+ "tags":[
+ "consequat",
+ "adipisicing",
+ "eiusmod",
+ "ipsum",
+ "enim",
+ "et",
+ "voluptate"
+ ],
+ "friends":[
+ {
+ "id":0,
+ "name":"Kaufman Randall"
+ },
+ {
+ "id":1,
+ "name":"Byrd Cooley"
+ },
+ {
+ "id":2,
+ "name":"Obrien William"
+ }
+ ],
+ "greeting":"Hello, Lois Norris! You have 9 unread messages.",
+ "favoriteFruit":"banana"
+}`)
+
+// Has some changes (in tags field, in friends field).
+var testNewJsonData = []byte(`{
+ "_id":"662fb7df5110d6e8e9942fb2",
+ "index":0,
+ "guid":"a100afc6-fc35-47fd-8e3e-e8e9a81629ec",
+ "isActive":true,
+ "balance":"$2,784.25",
+ "picture":"http://placehold.it/32x32",
+ "age":21,
+ "eyeColor":"green",
+ "name":"Lois Norris",
+ "gender":"female",
+ "company":"ORGANICA",
+ "email":"loisnorris@organica.com",
+ "phone":"+1 (939) 451-2349",
+ "address":"774 Ide Court, Sabillasville, Virginia, 4034",
+ "about":"Cupidatat reprehenderit laboris aute pariatur nulla exercitation. Commodo aliqua cupidatat consectetur aliquip. Id irure nisi qui ullamco culpa reprehenderit nisi sunt consequat ipsum. Velit officia sint id voluptate anim. Sunt duis duis consequat mollit incididunt laborum enim amet ad aliqua esse nulla. Aliqua nulla adipisicing ad aliquip ut. Nostrud mollit ex aute magna culpa ea exercitation qui ex.\r\n",
+ "registered":"2023-02-28T11:09:34 -02:00",
+ "latitude":24.054483,
+ "longitude":38.953522,
+ "tags":[
+ "consequat",
+ "adipisicing",
+ "eiusmod"
+ ],
+ "friends":[
+ {
+ "id":0,
+ "name":"Kaufman Randall"
+ },
+ {
+ "id":1,
+ "name":"Byrd Cooley"
+ }
+ ],
+ "greeting":"Hello, Lois Norris! You have 9 unread messages.",
+ "favoriteFruit":"banana"
+}`)
+
+func TestJsonStringEncode(t *testing.T) {
+ testBenchmarkDeltaFossilPatch = fdelta.Create(testJsonData, testNewJsonData)
+ if len(testBenchmarkDeltaFossilPatch) == 0 {
+ t.Fatal("empty fossil patch")
+ }
+ testDeltaJsonData, err := json.Marshal(convert.BytesToString(testBenchmarkDeltaFossilPatch))
+ require.NoError(t, err)
+ require.NotNil(t, testDeltaJsonData)
+
+ alternativeDeltaJsonData := json.Escape(convert.BytesToString(testBenchmarkDeltaFossilPatch))
+ require.Equal(t, testDeltaJsonData, alternativeDeltaJsonData)
+}
+
+var testBenchmarkEncodeData []byte
+
+func BenchmarkEncodeJSONString(b *testing.B) {
+ jsonData := []byte(`{"input": "test"}`)
+ for i := 0; i < b.N; i++ {
+ testBenchmarkEncodeData = json.Escape(convert.BytesToString(jsonData))
+ if len(testBenchmarkEncodeData) == 0 {
+ b.Fatal("empty data")
+ }
+ }
+}
+
+var testBenchmarkDeltaFossilPatch []byte
+
+func BenchmarkDeltaFossil(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testBenchmarkDeltaFossilPatch = fdelta.Create(testJsonData, testNewJsonData)
+ if len(testBenchmarkDeltaFossilPatch) == 0 {
+ b.Fatal("empty fossil patch")
+ }
+ }
+}
diff --git a/internal/redis_lua/broker_history_add_list.lua b/internal/redis_lua/broker_history_add_list.lua
index 0e45b254..99bff5a1 100644
--- a/internal/redis_lua/broker_history_add_list.lua
+++ b/internal/redis_lua/broker_history_add_list.lua
@@ -9,6 +9,7 @@ local meta_expire = ARGV[5]
local new_epoch_if_empty = ARGV[6]
local publish_command = ARGV[7]
local result_key_expire = ARGV[8]
+local use_delta = ARGV[9]
if result_key_expire ~= '' then
local cached_result = redis.call("hmget", result_key, "e", "s")
@@ -30,12 +31,20 @@ if meta_expire ~= '0' then
redis.call("expire", meta_key, meta_expire)
end
+local prev_message_payload = ""
+if use_delta == "1" then
+ prev_message_payload = redis.call("lindex", list_key, 0) or ""
+end
+
local payload = "__" .. "p1:" .. top_offset .. ":" .. current_epoch .. "__" .. message_payload
redis.call("lpush", list_key, payload)
redis.call("ltrim", list_key, 0, ltrim_right_bound)
redis.call("expire", list_key, list_ttl)
if channel ~= '' then
+ if use_delta == "1" then
+ payload = "__" .. "d1:" .. top_offset .. ":" .. current_epoch .. ":" .. #prev_message_payload .. ":" .. prev_message_payload .. ":" .. #message_payload .. ":" .. message_payload
+ end
redis.call(publish_command, channel, payload)
end
diff --git a/internal/redis_lua/broker_history_add_stream.lua b/internal/redis_lua/broker_history_add_stream.lua
index 28251709..22065f33 100644
--- a/internal/redis_lua/broker_history_add_stream.lua
+++ b/internal/redis_lua/broker_history_add_stream.lua
@@ -9,6 +9,7 @@ local meta_expire = ARGV[5]
local new_epoch_if_empty = ARGV[6]
local publish_command = ARGV[7]
local result_key_expire = ARGV[8]
+local use_delta = ARGV[9]
if result_key_expire ~= '' then
local cached_result = redis.call("hmget", result_key, "e", "s")
@@ -30,11 +31,34 @@ if meta_expire ~= '0' then
redis.call("expire", meta_key, meta_expire)
end
+local prev_message_payload = ""
+if use_delta == "1" then
+ local prev_entries = redis.call("xrevrange", stream_key, "+", "-", "COUNT", 1)
+ if #prev_entries > 0 then
+ prev_message_payload = prev_entries[1][2]["d"]
+ local fields_and_values = prev_entries[1][2]
+ -- Loop through the fields and values to find the field "d"
+ for i = 1, #fields_and_values, 2 do
+ local field = fields_and_values[i]
+ local value = fields_and_values[i + 1]
+ if field == "d" then
+ prev_message_payload = value
+ break -- Stop the loop once we find the field "d"
+ end
+ end
+ end
+end
+
redis.call("xadd", stream_key, "MAXLEN", stream_size, top_offset, "d", message_payload)
redis.call("expire", stream_key, stream_ttl)
if channel ~= '' then
- local payload = "__" .. "p1:" .. top_offset .. ":" .. current_epoch .. "__" .. message_payload
+ local payload
+ if use_delta == "1" then
+ payload = "__" .. "d1:" .. top_offset .. ":" .. current_epoch .. ":" .. #prev_message_payload .. ":" .. prev_message_payload .. ":" .. #message_payload .. ":" .. message_payload
+ else
+ payload = "__" .. "p1:" .. top_offset .. ":" .. current_epoch .. "__" .. message_payload
+ end
redis.call(publish_command, channel, payload)
end
diff --git a/metrics.go b/metrics.go
index 4359a953..ff15a1ff 100644
--- a/metrics.go
+++ b/metrics.go
@@ -47,20 +47,21 @@ type metrics struct {
messagesSentCountLeave prometheus.Counter
messagesSentCountControl prometheus.Counter
- actionCountAddClient prometheus.Counter
- actionCountRemoveClient prometheus.Counter
- actionCountAddSub prometheus.Counter
- actionCountRemoveSub prometheus.Counter
- actionCountAddPresence prometheus.Counter
- actionCountRemovePresence prometheus.Counter
- actionCountPresence prometheus.Counter
- actionCountPresenceStats prometheus.Counter
- actionCountHistory prometheus.Counter
- actionCountHistoryRecover prometheus.Counter
- actionCountHistoryStreamTop prometheus.Counter
- actionCountHistoryRemove prometheus.Counter
- actionCountSurvey prometheus.Counter
- actionCountNotify prometheus.Counter
+ actionCountAddClient prometheus.Counter
+ actionCountRemoveClient prometheus.Counter
+ actionCountAddSub prometheus.Counter
+ actionCountRemoveSub prometheus.Counter
+ actionCountAddPresence prometheus.Counter
+ actionCountRemovePresence prometheus.Counter
+ actionCountPresence prometheus.Counter
+ actionCountPresenceStats prometheus.Counter
+ actionCountHistory prometheus.Counter
+ actionCountHistoryRecover prometheus.Counter
+ actionCountHistoryStreamTop prometheus.Counter
+ actionCountHistoryStreamTopLatestPub prometheus.Counter
+ actionCountHistoryRemove prometheus.Counter
+ actionCountSurvey prometheus.Counter
+ actionCountNotify prometheus.Counter
recoverCountYes prometheus.Counter
recoverCountNo prometheus.Counter
@@ -283,6 +284,8 @@ func (m *metrics) incActionCount(action string) {
m.actionCountHistoryRecover.Inc()
case "history_stream_top":
m.actionCountHistoryStreamTop.Inc()
+ case "history_stream_top_latest_pub":
+ m.actionCountHistoryStreamTopLatestPub.Inc()
case "history_remove":
m.actionCountHistoryRemove.Inc()
case "survey":
@@ -465,6 +468,7 @@ func initMetricsRegistry(registry prometheus.Registerer, metricsNamespace string
m.actionCountHistory = m.actionCount.WithLabelValues("history")
m.actionCountHistoryRecover = m.actionCount.WithLabelValues("history_recover")
m.actionCountHistoryStreamTop = m.actionCount.WithLabelValues("history_stream_top")
+ m.actionCountHistoryStreamTopLatestPub = m.actionCount.WithLabelValues("history_stream_top_latest_pub")
m.actionCountHistoryRemove = m.actionCount.WithLabelValues("history_remove")
m.actionCountSurvey = m.actionCount.WithLabelValues("survey")
m.actionCountNotify = m.actionCount.WithLabelValues("notify")
diff --git a/node.go b/node.go
index 3cc346de..9df9dfb0 100644
--- a/node.go
+++ b/node.go
@@ -83,6 +83,8 @@ type Node struct {
nodeInfoSendHandler NodeInfoSendHandler
emulationSurveyHandler *emulationSurveyHandler
+
+ mediums map[string]*channelMedium
}
const (
@@ -162,6 +164,7 @@ func New(c Config) (*Node, error) {
subDissolver: dissolve.New(numSubDissolverWorkers),
nowTimeGetter: nowtime.Get,
surveyRegistry: make(map[uint64]chan survey),
+ mediums: map[string]*channelMedium{},
}
n.emulationSurveyHandler = newEmulationSurveyHandler(n)
@@ -683,14 +686,14 @@ func (n *Node) handleControl(data []byte) error {
// handlePublication handles messages published into channel and
// coming from Broker. The goal of method is to deliver this message
// to all clients on this node currently subscribed to channel.
-func (n *Node) handlePublication(ch string, pub *Publication, sp StreamPosition) error {
+func (n *Node) handlePublication(ch string, sp StreamPosition, pub, prevPub, localPrevPub *Publication) error {
n.metrics.incMessagesReceived("publication")
numSubscribers := n.hub.NumSubscribers(ch)
hasCurrentSubscribers := numSubscribers > 0
if !hasCurrentSubscribers {
return nil
}
- return n.hub.BroadcastPublication(ch, pub, sp)
+ return n.hub.broadcastPublication(ch, sp, pub, prevPub, localPrevPub)
}
// handleJoin handles join messages - i.e. broadcasts it to
@@ -971,19 +974,37 @@ func (n *Node) removeClient(c *Client) error {
// addSubscription registers subscription of connection on channel in both
// Hub and Broker.
-func (n *Node) addSubscription(ch string, c *Client) error {
+func (n *Node) addSubscription(ch string, sub subInfo) error {
n.metrics.incActionCount("add_subscription")
mu := n.subLock(ch)
mu.Lock()
defer mu.Unlock()
- first, err := n.hub.addSub(ch, c)
+ first, err := n.hub.addSub(ch, sub)
if err != nil {
return err
}
if first {
+ if n.config.GetChannelMediumOptions != nil {
+ mediumOptions, ok := n.config.GetChannelMediumOptions(ch)
+ if ok {
+ medium, err := newChannelMedium(ch, n, mediumOptions)
+ if err != nil {
+ return err
+ }
+ n.mediums[ch] = medium
+ }
+ }
+
err := n.broker.Subscribe(ch)
if err != nil {
- _, _ = n.hub.removeSub(ch, c)
+ _, _ = n.hub.removeSub(ch, sub.client)
+ if n.config.GetChannelMediumOptions != nil {
+ medium, ok := n.mediums[ch]
+ if ok {
+ medium.close()
+ delete(n.mediums, ch)
+ }
+ }
return err
}
}
@@ -1017,6 +1038,12 @@ func (n *Node) removeSubscription(ch string, c *Client) error {
if err != nil {
// Cool down a bit since broker is not ready to process unsubscription.
time.Sleep(500 * time.Millisecond)
+ } else {
+ medium, ok := n.mediums[ch]
+ if ok {
+ medium.close()
+ delete(n.mediums, ch)
+ }
}
return err
}
@@ -1337,6 +1364,29 @@ func (n *Node) recoverHistory(ch string, since StreamPosition, historyMetaTTL ti
}), WithHistoryMetaTTL(historyMetaTTL))
}
+// recoverCache recovers last publication in channel.
+func (n *Node) recoverCache(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) {
+ n.metrics.incActionCount("history_recover")
+ return n.streamTopLatestPub(ch, historyMetaTTL)
+}
+
+// streamTopLatestPub returns latest publication in channel with actual stream position.
+func (n *Node) streamTopLatestPub(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) {
+ n.metrics.incActionCount("history_stream_top_latest_pub")
+ hr, err := n.History(ch, WithHistoryFilter(HistoryFilter{
+ Limit: 1,
+ Reverse: true,
+ }), WithHistoryMetaTTL(historyMetaTTL))
+ if err != nil {
+ return nil, StreamPosition{}, err
+ }
+ var latestPublication *Publication
+ if len(hr.Publications) > 0 {
+ latestPublication = hr.Publications[0]
+ }
+ return latestPublication, hr.StreamPosition, nil
+}
+
// streamTop returns current stream top StreamPosition for a channel.
func (n *Node) streamTop(ch string, historyMetaTTL time.Duration) (StreamPosition, error) {
n.metrics.incActionCount("history_stream_top")
@@ -1347,6 +1397,24 @@ func (n *Node) streamTop(ch string, historyMetaTTL time.Duration) (StreamPositio
return historyResult.StreamPosition, nil
}
+func (n *Node) checkPosition(ch string, clientPosition StreamPosition, historyMetaTTL time.Duration) (bool, error) {
+ mu := n.subLock(ch)
+ mu.Lock()
+ medium, ok := n.mediums[ch]
+ mu.Unlock()
+ if !ok || !medium.options.EnablePositionSync {
+ // No medium for channel or position sync disabled – we then check position over Broker.
+ streamTop, err := n.streamTop(ch, historyMetaTTL)
+ if err != nil {
+ // Will be checked later.
+ return false, err
+ }
+ return streamTop.Epoch == clientPosition.Epoch && clientPosition.Offset == streamTop.Offset, nil
+ }
+ validPosition := medium.CheckPosition(historyMetaTTL, clientPosition, n.config.ClientChannelPositionCheckDelay)
+ return validPosition, nil
+}
+
// RemoveHistory removes channel history.
func (n *Node) RemoveHistory(ch string) error {
n.metrics.incActionCount("history_remove")
@@ -1480,6 +1548,7 @@ type eventHub struct {
transportWriteHandler TransportWriteHandler
commandReadHandler CommandReadHandler
commandProcessedHandler CommandProcessedHandler
+ cacheEmptyHandler CacheEmptyHandler
}
// OnConnecting allows setting ConnectingHandler.
@@ -1512,16 +1581,34 @@ func (n *Node) OnCommandProcessed(handler CommandProcessedHandler) {
n.clientEvents.commandProcessedHandler = handler
}
+// OnCacheEmpty allows setting CacheEmptyHandler.
+// CacheEmptyHandler called when client subscribes on a channel with RecoveryModeCache but there is no
+// cached value in channel. In response to this handler it's possible to tell Centrifuge what to do with
+// subscribe request – keep it, or return error.
+func (n *Node) OnCacheEmpty(h CacheEmptyHandler) {
+ n.clientEvents.cacheEmptyHandler = h
+}
+
type brokerEventHandler struct {
node *Node
}
// HandlePublication coming from Broker.
-func (h *brokerEventHandler) HandlePublication(ch string, pub *Publication, sp StreamPosition) error {
+func (h *brokerEventHandler) HandlePublication(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error {
if pub == nil {
panic("nil Publication received, this must never happen")
}
- return h.node.handlePublication(ch, pub, sp)
+ if h.node.config.GetChannelMediumOptions != nil {
+ mu := h.node.subLock(ch)
+ mu.Lock()
+ medium, ok := h.node.mediums[ch]
+ mu.Unlock()
+ if ok {
+ medium.broadcastPublication(pub, sp, prevPub)
+ return nil
+ }
+ }
+ return h.node.handlePublication(ch, sp, pub, prevPub, nil)
}
// HandleJoin coming from Broker.
diff --git a/node_test.go b/node_test.go
index 16805e89..a31b7edc 100644
--- a/node_test.go
+++ b/node_test.go
@@ -32,6 +32,8 @@ type TestBroker struct {
publishJoinCount int32
publishLeaveCount int32
publishControlCount int32
+
+ historyFunc func(_ string, _ HistoryOptions) ([]*Publication, StreamPosition, error)
}
func NewTestBroker() *TestBroker {
@@ -91,7 +93,10 @@ func (e *TestBroker) Unsubscribe(_ string) error {
return nil
}
-func (e *TestBroker) History(_ string, _ HistoryOptions) ([]*Publication, StreamPosition, error) {
+func (e *TestBroker) History(ch string, opts HistoryOptions) ([]*Publication, StreamPosition, error) {
+ if e.historyFunc != nil {
+ return e.historyFunc(ch, opts)
+ }
if e.errorOnHistory {
return nil, StreamPosition{}, errors.New("boom")
}
@@ -1170,7 +1175,7 @@ func TestBrokerEventHandler_PanicsOnNil(t *testing.T) {
defer func() { _ = node.Shutdown(context.Background()) }()
handler := &brokerEventHandler{node: node}
require.Panics(t, func() {
- _ = handler.HandlePublication("test", nil, StreamPosition{})
+ _ = handler.HandlePublication("test", nil, StreamPosition{}, nil)
})
require.Panics(t, func() {
_ = handler.HandleJoin("test", nil)
@@ -1344,3 +1349,37 @@ func TestNode_OnCommandRead(t *testing.T) {
require.Fail(t, "timeout subscribe")
}
}
+
+func TestNodeCheckPosition(t *testing.T) {
+ node := defaultTestNode()
+ defer func() { _ = node.Shutdown(context.Background()) }()
+
+ broker := NewTestBroker()
+ broker.historyFunc = func(channel string, opts HistoryOptions) ([]*Publication, StreamPosition, error) {
+ return nil, StreamPosition{
+ Offset: 20, Epoch: "test",
+ }, nil
+ }
+ node.SetBroker(broker)
+
+ isValid, err := node.checkPosition("test", StreamPosition{
+ Offset: 20,
+ Epoch: "test",
+ }, 200*time.Second)
+ require.NoError(t, err)
+ require.True(t, isValid)
+
+ isValid, err = node.checkPosition("test", StreamPosition{
+ Offset: 19,
+ Epoch: "test",
+ }, 200*time.Second)
+ require.NoError(t, err)
+ require.False(t, isValid)
+
+ isValid, err = node.checkPosition("test", StreamPosition{
+ Offset: 20,
+ Epoch: "test_new",
+ }, 200*time.Second)
+ require.NoError(t, err)
+ require.False(t, isValid)
+}
diff --git a/options.go b/options.go
index 1ed740f4..f995d00e 100644
--- a/options.go
+++ b/options.go
@@ -24,6 +24,13 @@ func WithIdempotencyKey(key string) PublishOption {
}
}
+// WithDelta tells Broker to use delta streaming.
+func WithDelta(enabled bool) PublishOption {
+ return func(opts *PublishOptions) {
+ opts.UseDelta = enabled
+ }
+}
+
// WithIdempotentResultTTL sets the time of expiration for results of idempotent publications.
// See PublishOptions.IdempotentResultTTL for more description and defaults.
func WithIdempotentResultTTL(ttl time.Duration) PublishOption {
@@ -78,6 +85,8 @@ type SubscribeOptions struct {
// Make sure you are using EnableRecovery in channels that maintain Publication
// history stream.
EnableRecovery bool
+ // RecoveryMode is by default RecoveryModeStream, but can be also RecoveryModeCache.
+ RecoveryMode RecoveryMode
// Data to send to a client with Subscribe Push.
Data []byte
// RecoverSince will try to subscribe a client and recover from a certain StreamPosition.
@@ -87,6 +96,12 @@ type SubscribeOptions struct {
// meta information expiration time.
HistoryMetaTTL time.Duration
+ // AllowedDeltaTypes is a whitelist of DeltaType subscribers can negotiate. At this point Centrifuge
+ // only supports DeltaTypeFossil. If zero value – clients won't be able to negotiate delta encoding
+ // within a channel and will receive full data in publications.
+ // Delta encoding is an EXPERIMENTAL feature and may be changed.
+ AllowedDeltaTypes []DeltaType
+
// clientID to subscribe.
clientID string
// sessionID to subscribe.
@@ -148,6 +163,20 @@ func WithRecovery(enabled bool) SubscribeOption {
}
}
+type RecoveryMode int32
+
+const (
+ RecoveryModeStream RecoveryMode = 0
+ RecoveryModeCache RecoveryMode = 1
+)
+
+// WithRecoveryMode ...
+func WithRecoveryMode(mode RecoveryMode) SubscribeOption {
+ return func(opts *SubscribeOptions) {
+ opts.RecoveryMode = mode
+ }
+}
+
// WithSubscribeClient allows setting client ID that should be subscribed.
// This option not used when Client.Subscribe called.
func WithSubscribeClient(clientID string) SubscribeOption {
diff --git a/options_test.go b/options_test.go
index 7a9ecb80..1a1adb1b 100644
--- a/options_test.go
+++ b/options_test.go
@@ -22,6 +22,13 @@ func TestWithIdempotencyKey(t *testing.T) {
require.Equal(t, "ik", opts.IdempotencyKey)
}
+func TestWithDelta(t *testing.T) {
+ opt := WithDelta(true)
+ opts := &PublishOptions{}
+ opt(opts)
+ require.True(t, opts.UseDelta)
+}
+
func TestWithIdempotentResultTTL(t *testing.T) {
opt := WithIdempotentResultTTL(time.Minute)
opts := &PublishOptions{}
@@ -48,6 +55,7 @@ func TestSubscribeOptions(t *testing.T) {
WithSubscribeSession("session"),
WithSubscribeClient("test"),
WithSubscribeSource(4),
+ WithRecoveryMode(RecoveryModeCache),
}
opts := &SubscribeOptions{}
for _, opt := range subscribeOpts {
@@ -63,6 +71,7 @@ func TestSubscribeOptions(t *testing.T) {
require.Equal(t, "test", opts.clientID)
require.Equal(t, "session", opts.sessionID)
require.Equal(t, uint8(4), opts.Source)
+ require.Equal(t, RecoveryModeCache, opts.RecoveryMode)
}
func TestWithDisconnect(t *testing.T) {