From 2b2306e0e95c008b22cfe5dafec73a58572b4988 Mon Sep 17 00:00:00 2001 From: Alexander Emelin Date: Tue, 26 Dec 2023 18:56:26 +0200 Subject: [PATCH 01/61] start prototyping --- _examples/chat_json/index.html | 8 ++- _examples/chat_json/main.go | 103 ++++++++++++++++----------------- _examples/go.mod | 2 + _examples/go.sum | 2 - client.go | 66 ++++++++++++++++----- go.mod | 2 + go.sum | 8 --- 7 files changed, 112 insertions(+), 79 deletions(-) diff --git a/_examples/chat_json/index.html b/_examples/chat_json/index.html index 76b7bfb4..ad25eb90 100644 --- a/_examples/chat_json/index.html +++ b/_examples/chat_json/index.html @@ -112,7 +112,7 @@ max-width: 90%; } - + + + + + +
+ +
+ + diff --git a/_examples/document/main.go b/_examples/document/main.go new file mode 100644 index 00000000..ddab0e95 --- /dev/null +++ b/_examples/document/main.go @@ -0,0 +1,114 @@ +package main + +import ( + "encoding/json" + "fmt" + "math/rand" + "time" +) + +type Event struct { + Type string + Minute int +} + +type Player struct { + Name string + Events []Event +} + +type Team struct { + Name string + Score int + Players [11]Player +} + +type Match struct { + HomeTeam Team + AwayTeam Team +} + +// Define event types +const ( + Goal = "goal" + YellowCard = "yellow card" + RedCard = "red card" + Substitute = "substitute" +) + +func simulateMatch(match *Match) { + fmt.Println("Match started between", match.HomeTeam.Name, "and", match.AwayTeam.Name) + + totalSimulationTime := 9 // Total time for the simulation in seconds + totalEvents := 20 // Total number of events to simulate + eventInterval := float64(totalSimulationTime) / float64(totalEvents) // Time between events + + totalBytes := 0 + + for i := 0; i < totalEvents; i++ { + time.Sleep(time.Duration(eventInterval*1000) * time.Millisecond) // Sleep between events + + minute := int(float64(i) * eventInterval / float64(totalSimulationTime) * 90) // Calculate minute based on event occurrence + eventType := chooseRandomEventType() + team := chooseRandomTeam(match) + playerIndex := rand.Intn(11) // Choose one of the 11 players randomly + playerName := team.Players[playerIndex].Name + + event := Event{Type: eventType, Minute: minute} + team.Players[playerIndex].Events = append(team.Players[playerIndex].Events, event) + + if eventType == Goal { + team.Score++ + fmt.Printf("[%d'] GOAL! %s by %s. New score is %s %d - %d %s\n", minute, team.Name, playerName, match.HomeTeam.Name, match.HomeTeam.Score, match.AwayTeam.Score, match.AwayTeam.Name) + } else { + fmt.Printf("[%d'] %s for %s\n", minute, eventType, playerName) + } + + data, _ := json.Marshal(match) + totalBytes += len(data) + } + + fmt.Println("Match ended. Final Score:", match.HomeTeam.Name, match.HomeTeam.Score, "-", match.AwayTeam.Score, match.AwayTeam.Name) + fmt.Println("Total bytes sent:", totalBytes) +} + +func chooseRandomEventType() string { + events := []string{Goal, YellowCard, RedCard, Substitute} + return events[rand.Intn(len(events))] +} + +func chooseRandomTeam(match *Match) *Team { + if rand.Intn(2) == 0 { + return &match.HomeTeam + } + return &match.AwayTeam +} + +// Helper function to create players with names from a given list +func assignNamesToPlayers(names []string) [11]Player { + var players [11]Player + for i, name := range names { + players[i] = Player{Name: name} + } + return players +} + +func main() { + // Predefined lists of player names for each team + playerNamesTeamA := []string{"John Doe", "Jane Smith", "Alex Johnson", "Chris Lee", "Pat Kim", "Sam Morgan", "Jamie Brown", "Casey Davis", "Morgan Garcia", "Taylor White", "Jordan Martinez"} + playerNamesTeamB := []string{"Robin Wilson", "Drew Taylor", "Jessie Bailey", "Casey Flores", "Jordan Walker", "Charlie Green", "Alex Adams", "Morgan Thompson", "Taylor Clark", "Jordan Hernandez", "Jamie Lewis"} + + // Example setup + match := Match{ + HomeTeam: Team{ + Name: "Team A", + Players: assignNamesToPlayers(playerNamesTeamA), + }, + AwayTeam: Team{ + Name: "Team B", + Players: assignNamesToPlayers(playerNamesTeamB), + }, + } + + simulateMatch(&match) +} diff --git a/client.go b/client.go index caa90598..87cea44b 100644 --- a/client.go +++ b/client.go @@ -2769,7 +2769,7 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep cmdEpoch := req.Epoch recoveryMode := reply.Options.RecoveryMode - if recoveryMode == RecoveryModeState { + if recoveryMode == RecoveryModeDocument { historyResult, err := c.node.History(channel, WithHistoryFilter(HistoryFilter{ Limit: 1, Reverse: true, diff --git a/options.go b/options.go index ed8dc7c3..6df4f93d 100644 --- a/options.go +++ b/options.go @@ -78,7 +78,7 @@ type SubscribeOptions struct { // Make sure you are using EnableRecovery in channels that maintain Publication // history stream. EnableRecovery bool - // RecoveryMode is by default RecoveryModeStream, but can be also RecoveryModeState. + // RecoveryMode is by default RecoveryModeStream, but can be also RecoveryModeDocument. RecoveryMode RecoveryMode // Data to send to a client with Subscribe Push. Data []byte @@ -153,8 +153,8 @@ func WithRecovery(enabled bool) SubscribeOption { type RecoveryMode int32 const ( - RecoveryModeStream RecoveryMode = 0 - RecoveryModeState RecoveryMode = 1 + RecoveryModeStream RecoveryMode = 0 + RecoveryModeDocument RecoveryMode = 1 ) // WithRecoveryMode ... From 89175412cd04c616cba018117afb795853afb50e Mon Sep 17 00:00:00 2001 From: FZambia Date: Sat, 23 Mar 2024 07:59:05 +0200 Subject: [PATCH 10/61] prototyping --- _examples/document/main.go | 37 ++++++++++++++++++++++++++++++++----- _examples/go.mod | 7 +++++-- _examples/go.sum | 8 ++++++++ go.mod | 4 ++-- go.sum | 8 ++++---- 5 files changed, 51 insertions(+), 13 deletions(-) diff --git a/_examples/document/main.go b/_examples/document/main.go index ddab0e95..3b2a4eb9 100644 --- a/_examples/document/main.go +++ b/_examples/document/main.go @@ -1,10 +1,15 @@ package main import ( + "encoding/base64" "encoding/json" "fmt" + "log" "math/rand" "time" + + jsonpatch "github.com/evanphx/json-patch/v5" + fdelta "github.com/shadowspore/fossil-delta" ) type Event struct { @@ -39,11 +44,13 @@ const ( func simulateMatch(match *Match) { fmt.Println("Match started between", match.HomeTeam.Name, "and", match.AwayTeam.Name) - totalSimulationTime := 9 // Total time for the simulation in seconds + totalSimulationTime := 1 // Total time for the simulation in seconds totalEvents := 20 // Total number of events to simulate eventInterval := float64(totalSimulationTime) / float64(totalEvents) // Time between events + var prevData []byte totalBytes := 0 + totalBytesSent := 0 for i := 0; i < totalEvents; i++ { time.Sleep(time.Duration(eventInterval*1000) * time.Millisecond) // Sleep between events @@ -66,10 +73,30 @@ func simulateMatch(match *Match) { data, _ := json.Marshal(match) totalBytes += len(data) + if prevData != nil { + + patch, err := jsonpatch.CreateMergePatch(prevData, data) + if err != nil { + log.Fatal(err) + } + //fmt.Println(string(patch)) + + patch = fdelta.Create(prevData, data) + //fmt.Println(string(patch)) + patch = []byte(base64.StdEncoding.EncodeToString(patch)) + + totalBytesSent += len(patch) + } else { + totalBytesSent += len(data) + } + prevData = data } - fmt.Println("Match ended. Final Score:", match.HomeTeam.Name, match.HomeTeam.Score, "-", match.AwayTeam.Score, match.AwayTeam.Name) - fmt.Println("Total bytes sent:", totalBytes) + fmt.Println( + "Match ended. Final Score:", + match.HomeTeam.Name, match.HomeTeam.Score, "-", match.AwayTeam.Score, match.AwayTeam.Name) + fmt.Println("Bytes without delta:", totalBytes) + fmt.Println("Bytes with delta:", totalBytesSent) } func chooseRandomEventType() string { @@ -101,11 +128,11 @@ func main() { // Example setup match := Match{ HomeTeam: Team{ - Name: "Team A", + Name: "Real Madrid", Players: assignNamesToPlayers(playerNamesTeamA), }, AwayTeam: Team{ - Name: "Team B", + Name: "Barcelona", Players: assignNamesToPlayers(playerNamesTeamB), }, } diff --git a/_examples/go.mod b/_examples/go.mod index cb8c4f48..a5090fb1 100644 --- a/_examples/go.mod +++ b/_examples/go.mod @@ -7,9 +7,10 @@ replace github.com/centrifugal/centrifuge => ../ require ( github.com/FZambia/tarantool v0.2.2 github.com/centrifugal/centrifuge v0.8.2 - github.com/centrifugal/protocol v0.11.1-0.20231217174539-d7863acb8224 + github.com/centrifugal/protocol v0.12.1-0.20240323055736-2bac99578c1f github.com/cristalhq/jwt/v3 v3.0.0 github.com/dchest/uniuri v1.2.0 + github.com/evanphx/json-patch/v5 v5.9.0 github.com/gin-contrib/sessions v0.0.3 github.com/gin-gonic/gin v1.9.1 github.com/gobwas/ws v1.3.1 @@ -21,11 +22,12 @@ require ( github.com/nats-io/nats.go v1.31.0 github.com/prometheus/client_golang v1.17.0 github.com/quic-go/quic-go v0.40.0 + github.com/shadowspore/fossil-delta v0.0.0-20240102155221-e3a8590b820b github.com/stretchr/testify v1.8.4 github.com/vmihailenco/msgpack/v5 v5.4.1 golang.org/x/oauth2 v0.14.0 google.golang.org/grpc v1.59.0 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.33.0 nhooyr.io/websocket v1.8.10 ) @@ -66,6 +68,7 @@ require ( github.com/nats-io/nuid v1.0.1 // indirect github.com/onsi/ginkgo/v2 v2.12.1 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pkg/errors v0.8.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect github.com/prometheus/common v0.44.0 // indirect diff --git a/_examples/go.sum b/_examples/go.sum index b82b4817..c761ba0d 100644 --- a/_examples/go.sum +++ b/_examples/go.sum @@ -16,6 +16,7 @@ github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= github.com/centrifugal/protocol v0.11.1-0.20231217174539-d7863acb8224 h1:msOUFVZH4p0o/GYiIqp8IDfx9CjlBewG09L6Gl3gyxs= github.com/centrifugal/protocol v0.11.1-0.20231217174539-d7863acb8224/go.mod h1:33nZhrA2iRoR6jT+oVzu1ARx+iWIgxOgYuZMhWMWVM4= +github.com/centrifugal/protocol v0.12.1-0.20240323055736-2bac99578c1f/go.mod h1:5Z0SuNdXEt83Fkoi34BCyY23p1P8+zQakQS6/BfJHak= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= @@ -28,6 +29,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dchest/uniuri v1.2.0 h1:koIcOUdrTIivZgSLhHQvKgqdWZq5d7KdMEWF1Ud6+5g= github.com/dchest/uniuri v1.2.0/go.mod h1:fSzm4SLHzNZvWLvWJew423PhAzkpNQYq+uNLq4kxhkY= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/gin-contrib/sessions v0.0.3 h1:PoBXki+44XdJdlgDqDrY5nDVe3Wk7wDV/UCOuLP6fBI= @@ -130,6 +133,8 @@ github.com/onsi/ginkgo/v2 v2.12.1/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xl github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= @@ -154,6 +159,8 @@ github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/encoding v0.4.0 h1:MEBYvRqiUB2nfR2criEXWqwdY6HJOUrCn5hboVOVmy8= github.com/segmentio/encoding v0.4.0/go.mod h1:/d03Cd8PoaDeceuhUUUQWjU0KhWjrmYrWPgtJHYZSnI= +github.com/shadowspore/fossil-delta v0.0.0-20240102155221-e3a8590b820b h1:SCYeryKXBVdW38167VyumGakH+7E4Wxe6b/zxmQxwyM= +github.com/shadowspore/fossil-delta v0.0.0-20240102155221-e3a8590b820b/go.mod h1:daNLfX/GJKuZyN4HkMf0h8dVmTmgRbBSkd9bFQyGNIo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -226,6 +233,7 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= diff --git a/go.mod b/go.mod index 40d0ad08..654d27ea 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( github.com/FZambia/eagle v0.1.0 - github.com/centrifugal/protocol v0.11.0 + github.com/centrifugal/protocol v0.12.1-0.20240323055736-2bac99578c1f github.com/google/uuid v1.5.0 github.com/gorilla/websocket v1.5.0 github.com/igm/sockjs-go/v3 v3.0.2 @@ -13,7 +13,7 @@ require ( github.com/segmentio/encoding v0.4.0 github.com/stretchr/testify v1.8.4 golang.org/x/sync v0.5.0 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.33.0 ) require ( diff --git a/go.sum b/go.sum index 2badffdd..8de7a593 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ github.com/FZambia/eagle v0.1.0 h1:9gyX6x+xjoIfglgyPTcYm7dvY7FJ93us1QY5De4CyXA= github.com/FZambia/eagle v0.1.0/go.mod h1:YjGSPVkQTNcVLfzEUQJNgW9ScPR0K4u/Ky0yeFa4oDA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/centrifugal/protocol v0.11.0 h1:pQKfVT4c3/uiRNszaOenE4NqJqL4VBlzBku8skI739w= -github.com/centrifugal/protocol v0.11.0/go.mod h1:33nZhrA2iRoR6jT+oVzu1ARx+iWIgxOgYuZMhWMWVM4= +github.com/centrifugal/protocol v0.12.1-0.20240323055736-2bac99578c1f h1:lf8e3rcfAYSMC1GRPpSZWLYJ1Xb3iUbtVFehAGNr/S0= +github.com/centrifugal/protocol v0.12.1-0.20240323055736-2bac99578c1f/go.mod h1:5Z0SuNdXEt83Fkoi34BCyY23p1P8+zQakQS6/BfJHak= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -64,8 +64,8 @@ golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From 322156416b131e7983d7c11a9a355b4390a67c8a Mon Sep 17 00:00:00 2001 From: FZambia Date: Mon, 25 Mar 2024 18:07:37 +0200 Subject: [PATCH 11/61] continue prototyping --- _examples/chat_json/index.html | 4 +- _examples/chat_json/main.go | 123 ++++++------ _examples/go.mod | 3 +- _examples/go.sum | 3 +- client.go | 29 ++- client_experimental.go | 8 +- go.mod | 3 +- go.sum | 6 +- hub.go | 355 +++++++++++++++++++++++++++------ hub_test.go | 2 +- node.go | 35 ++-- 11 files changed, 403 insertions(+), 168 deletions(-) diff --git a/_examples/chat_json/index.html b/_examples/chat_json/index.html index f0ec4374..929ce5e0 100644 --- a/_examples/chat_json/index.html +++ b/_examples/chat_json/index.html @@ -112,7 +112,7 @@ max-width: 90%; } - + + + + +
+ +
+ + diff --git a/_examples/cache/main.go b/_examples/cache/main.go new file mode 100644 index 00000000..555970a6 --- /dev/null +++ b/_examples/cache/main.go @@ -0,0 +1,245 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "log" + "math/rand" + "net/http" + "time" + + "github.com/centrifugal/centrifuge" +) + +type Event struct { + Type string + Minute int +} + +type Player struct { + Name string + Events []Event +} + +type Team struct { + Name string + Score int + Players [11]Player +} + +type Match struct { + Number int + HomeTeam Team + AwayTeam Team +} + +// Define event types +const ( + Goal = "goal" + YellowCard = "yellow card" + RedCard = "red card" + Substitute = "substitute" +) + +func simulateMatch(ctx context.Context, num int, node *centrifuge.Node) { + // Predefined lists of player names for each team + playerNamesTeamA := []string{"John Doe", "Jane Smith", "Alex Johnson", "Chris Lee", "Pat Kim", "Sam Morgan", "Jamie Brown", "Casey Davis", "Morgan Garcia", "Taylor White", "Jordan Martinez"} + playerNamesTeamB := []string{"Robin Wilson", "Drew Taylor", "Jessie Bailey", "Casey Flores", "Jordan Walker", "Charlie Green", "Alex Adams", "Morgan Thompson", "Taylor Clark", "Jordan Hernandez", "Jamie Lewis"} + + // Example setup + match := &Match{ + Number: num, + HomeTeam: Team{ + Name: "Real Madrid", + Players: assignNamesToPlayers(playerNamesTeamA), + }, + AwayTeam: Team{ + Name: "Barcelona", + Players: assignNamesToPlayers(playerNamesTeamB), + }, + } + + totalSimulationTime := 1 // Total time for the simulation in seconds + totalEvents := 20 // Total number of events to simulate + eventInterval := float64(totalSimulationTime) / float64(totalEvents) // Time between events + + r := rand.New(rand.NewSource(17)) + + for i := 0; i < totalEvents; i++ { + // Sleep between events + select { + case <-ctx.Done(): + return + case <-time.After(time.Duration(eventInterval*1000) * time.Millisecond): + } + + // Calculate minute based on event occurrence. + minute := int(float64(i) * eventInterval / float64(totalSimulationTime) * 90) + eventType := chooseRandomEventType(r) + team := chooseRandomTeam(r, match) + playerIndex := r.Intn(11) // Choose one of the 11 players randomly + + event := Event{Type: eventType, Minute: minute} + team.Players[playerIndex].Events = append(team.Players[playerIndex].Events, event) + + if eventType == Goal { + team.Score++ + } + + data, _ := json.Marshal(match) + _, err := node.Publish( + "match:state:1", data, + centrifuge.WithDelta(true), + centrifuge.WithHistory(10, time.Minute), + ) + if err != nil { + log.Fatal(err) + } + } +} + +func chooseRandomEventType(r *rand.Rand) string { + events := []string{Goal, YellowCard, RedCard, Substitute} + return events[r.Intn(len(events))] +} + +func chooseRandomTeam(r *rand.Rand, match *Match) *Team { + if r.Intn(2) == 0 { + return &match.HomeTeam + } + return &match.AwayTeam +} + +// Helper function to create players with names from a given list +func assignNamesToPlayers(names []string) [11]Player { + var players [11]Player + for i, name := range names { + players[i] = Player{Name: name} + } + return players +} + +func auth(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + // Put authentication Credentials into request Context. + // Since we don't have any session backend here we simply + // set user ID as empty string. Users with empty ID called + // anonymous users, in real app you should decide whether + // anonymous users allowed to connect to your server or not. + cred := ¢rifuge.Credentials{ + UserID: "", + } + newCtx := centrifuge.SetCredentials(ctx, cred) + r = r.WithContext(newCtx) + h.ServeHTTP(w, r) + }) +} + +func main() { + // Node is the core object in Centrifuge library responsible for + // many useful things. For example Node allows publishing messages + // into channels with its Publish method. Here we initialize Node + // with Config which has reasonable defaults for zero values. + node, err := centrifuge.New(centrifuge.Config{ + LogLevel: centrifuge.LogLevelDebug, + LogHandler: func(entry centrifuge.LogEntry) { + log.Println(entry.Message, entry.Fields) + }, + }) + if err != nil { + log.Fatal(err) + } + + // Set ConnectHandler called when client successfully connected to Node. + // Your code inside a handler must be synchronized since it will be called + // concurrently from different goroutines (belonging to different client + // connections). See information about connection life cycle in library readme. + // This handler should not block – so do minimal work here, set required + // connection event handlers and return. + node.OnConnect(func(client *centrifuge.Client) { + // In our example transport will always be Websocket but it can be different. + transportName := client.Transport().Name() + // In our example clients connect with JSON protocol but it can also be Protobuf. + transportProto := client.Transport().Protocol() + log.Printf("client connected via %s (%s)", transportName, transportProto) + + //go func() { + // simulateMatch(client.Context(), 0, node) + //}() + + client.OnCacheEmpty(func(event centrifuge.CacheEmptyEvent) centrifuge.CacheEmptyReply { + simulateMatch(context.Background(), 0, node) + //go func() { + // num := 0 + // for { + // + // num++ + // time.Sleep(5 * time.Second) + // } + //}() + fmt.Println("simulated") + return centrifuge.CacheEmptyReply{} + }) + + // Set SubscribeHandler to react on every channel subscription attempt + // initiated by a client. Here you can theoretically return an error or + // disconnect a client from a server if needed. But here we just accept + // all subscriptions to all channels. In real life you may use a more + // complex permission check here. The reason why we use callback style + // inside client event handlers is that it gives a possibility to control + // operation concurrency to developer and still control order of events. + client.OnSubscribe(func(e centrifuge.SubscribeEvent, cb centrifuge.SubscribeCallback) { + log.Printf("client subscribes on channel %s", e.Channel) + cb(centrifuge.SubscribeReply{ + Options: centrifuge.SubscribeOptions{ + EnableRecovery: true, + RecoveryMode: centrifuge.RecoveryModeCache, + }, + }, nil) + }) + + // By default, clients can not publish messages into channels. By setting + // PublishHandler we tell Centrifuge that publish from a client-side is + // possible. Now each time client calls publish method this handler will be + // called and you have a possibility to validate publication request. After + // returning from this handler Publication will be published to a channel and + // reach active subscribers with at most once delivery guarantee. In our simple + // chat app we allow everyone to publish into any channel but in real case + // you may have more validation. + client.OnPublish(func(e centrifuge.PublishEvent, cb centrifuge.PublishCallback) { + log.Printf("client publishes into channel %s: %s", e.Channel, string(e.Data)) + cb(centrifuge.PublishReply{}, nil) + }) + + // Set Disconnect handler to react on client disconnect events. + client.OnDisconnect(func(e centrifuge.DisconnectEvent) { + log.Print("client disconnected", e.Code, e.Reason) + }) + }) + + // Run node. This method does not block. See also node.Shutdown method + // to finish application gracefully. + if err := node.Run(); err != nil { + log.Fatal(err) + } + + // Now configure HTTP routes. + + // Serve Websocket connections using WebsocketHandler. + wsHandler := centrifuge.NewWebsocketHandler(node, centrifuge.WebsocketConfig{ + //Compression: true, + //CompressionMinSize: 1, + //CompressionLevel: 1, + }) + http.Handle("/connection/websocket", auth(wsHandler)) + + // The second route is for serving index.html file. + http.Handle("/", http.FileServer(http.Dir("./"))) + + log.Printf("Starting server, visit http://localhost:8000") + if err := http.ListenAndServe("127.0.0.1:8000", nil); err != nil { + log.Fatal(err) + } +} diff --git a/_examples/cache/readme.md b/_examples/cache/readme.md new file mode 100644 index 00000000..ba76d9c0 --- /dev/null +++ b/_examples/cache/readme.md @@ -0,0 +1,21 @@ +Results with different configurations for total data sent over the interface from server to client, +caught with WireShark filter: + +``` +tcp.srcport == 8000 && websocket +``` + +| Protocol | Compression | Delta | Bytes sent | Percentage | +|--------------------|-------------|-----------|------------|------------| +| JSON over JSON | No | No | 29510 | 100.0 | +| JSON over JSON | Yes | No | 11135 | 37.73 | +| JSON over JSON | No | Yes | 6435 | 21.81 | +| JSON over JSON | Yes | Yes | 4963 | 16.82 | +| JSON over Protobuf | No | No | 28589 | 96.88 | +| JSON over Protobuf | Yes | No | 11133 | 37.73 | +| JSON over Protobuf | No | Yes | 4276 | 14.49 | +| JSON over Protobuf | Yes | Yes | 3454 | 11.70 | + +Note: since we send JSON over Protobuf, the JSON size is the same as the JSON over JSON case. +In this case Centrifugal protocol gives lower overhead, but the main part comes from the JSON payload size. +Another advantage of JSON over Protobuf is that we are not forced to use base64 encoding for delta case. diff --git a/_examples/chat_json/index.html b/_examples/chat_json/index.html index 929ce5e0..0369eb1e 100644 --- a/_examples/chat_json/index.html +++ b/_examples/chat_json/index.html @@ -224,7 +224,7 @@ // subscribe on channel and bind various event listeners. Actual // subscription request will be sent after client connects to // a server. - const sub = centrifuge.newSubscription(channel, {delta: 'jsonpatch'}); + const sub = centrifuge.newSubscription(channel, {delta: 'fossil'}); sub.on("publication", handlePublication) .on("join", handleJoin) diff --git a/_examples/document/index.html b/_examples/document/index.html deleted file mode 100644 index a33fd639..00000000 --- a/_examples/document/index.html +++ /dev/null @@ -1,45 +0,0 @@ - - - - - - - - - - -
- -
- - diff --git a/_examples/document/main.go b/_examples/document/main.go deleted file mode 100644 index 3b2a4eb9..00000000 --- a/_examples/document/main.go +++ /dev/null @@ -1,141 +0,0 @@ -package main - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "log" - "math/rand" - "time" - - jsonpatch "github.com/evanphx/json-patch/v5" - fdelta "github.com/shadowspore/fossil-delta" -) - -type Event struct { - Type string - Minute int -} - -type Player struct { - Name string - Events []Event -} - -type Team struct { - Name string - Score int - Players [11]Player -} - -type Match struct { - HomeTeam Team - AwayTeam Team -} - -// Define event types -const ( - Goal = "goal" - YellowCard = "yellow card" - RedCard = "red card" - Substitute = "substitute" -) - -func simulateMatch(match *Match) { - fmt.Println("Match started between", match.HomeTeam.Name, "and", match.AwayTeam.Name) - - totalSimulationTime := 1 // Total time for the simulation in seconds - totalEvents := 20 // Total number of events to simulate - eventInterval := float64(totalSimulationTime) / float64(totalEvents) // Time between events - - var prevData []byte - totalBytes := 0 - totalBytesSent := 0 - - for i := 0; i < totalEvents; i++ { - time.Sleep(time.Duration(eventInterval*1000) * time.Millisecond) // Sleep between events - - minute := int(float64(i) * eventInterval / float64(totalSimulationTime) * 90) // Calculate minute based on event occurrence - eventType := chooseRandomEventType() - team := chooseRandomTeam(match) - playerIndex := rand.Intn(11) // Choose one of the 11 players randomly - playerName := team.Players[playerIndex].Name - - event := Event{Type: eventType, Minute: minute} - team.Players[playerIndex].Events = append(team.Players[playerIndex].Events, event) - - if eventType == Goal { - team.Score++ - fmt.Printf("[%d'] GOAL! %s by %s. New score is %s %d - %d %s\n", minute, team.Name, playerName, match.HomeTeam.Name, match.HomeTeam.Score, match.AwayTeam.Score, match.AwayTeam.Name) - } else { - fmt.Printf("[%d'] %s for %s\n", minute, eventType, playerName) - } - - data, _ := json.Marshal(match) - totalBytes += len(data) - if prevData != nil { - - patch, err := jsonpatch.CreateMergePatch(prevData, data) - if err != nil { - log.Fatal(err) - } - //fmt.Println(string(patch)) - - patch = fdelta.Create(prevData, data) - //fmt.Println(string(patch)) - patch = []byte(base64.StdEncoding.EncodeToString(patch)) - - totalBytesSent += len(patch) - } else { - totalBytesSent += len(data) - } - prevData = data - } - - fmt.Println( - "Match ended. Final Score:", - match.HomeTeam.Name, match.HomeTeam.Score, "-", match.AwayTeam.Score, match.AwayTeam.Name) - fmt.Println("Bytes without delta:", totalBytes) - fmt.Println("Bytes with delta:", totalBytesSent) -} - -func chooseRandomEventType() string { - events := []string{Goal, YellowCard, RedCard, Substitute} - return events[rand.Intn(len(events))] -} - -func chooseRandomTeam(match *Match) *Team { - if rand.Intn(2) == 0 { - return &match.HomeTeam - } - return &match.AwayTeam -} - -// Helper function to create players with names from a given list -func assignNamesToPlayers(names []string) [11]Player { - var players [11]Player - for i, name := range names { - players[i] = Player{Name: name} - } - return players -} - -func main() { - // Predefined lists of player names for each team - playerNamesTeamA := []string{"John Doe", "Jane Smith", "Alex Johnson", "Chris Lee", "Pat Kim", "Sam Morgan", "Jamie Brown", "Casey Davis", "Morgan Garcia", "Taylor White", "Jordan Martinez"} - playerNamesTeamB := []string{"Robin Wilson", "Drew Taylor", "Jessie Bailey", "Casey Flores", "Jordan Walker", "Charlie Green", "Alex Adams", "Morgan Thompson", "Taylor Clark", "Jordan Hernandez", "Jamie Lewis"} - - // Example setup - match := Match{ - HomeTeam: Team{ - Name: "Real Madrid", - Players: assignNamesToPlayers(playerNamesTeamA), - }, - AwayTeam: Team{ - Name: "Barcelona", - Players: assignNamesToPlayers(playerNamesTeamB), - }, - } - - simulateMatch(&match) -} diff --git a/_examples/go.mod b/_examples/go.mod index 5f9423a1..22f2e201 100644 --- a/_examples/go.mod +++ b/_examples/go.mod @@ -7,10 +7,9 @@ replace github.com/centrifugal/centrifuge => ../ require ( github.com/FZambia/tarantool v0.2.2 github.com/centrifugal/centrifuge v0.8.2 - github.com/centrifugal/protocol v0.12.1-0.20240324055340-a826f3aa7e88 + github.com/centrifugal/protocol v0.12.1-0.20240330085231-151d7a9a3b26 github.com/cristalhq/jwt/v5 v5.4.0 github.com/dchest/uniuri v1.2.0 - github.com/evanphx/json-patch/v5 v5.9.0 github.com/gin-contrib/sessions v0.0.3 github.com/gin-gonic/gin v1.9.1 github.com/gobwas/ws v1.3.1 @@ -22,7 +21,6 @@ require ( github.com/nats-io/nats.go v1.31.0 github.com/prometheus/client_golang v1.19.0 github.com/quic-go/quic-go v0.40.1 - github.com/shadowspore/fossil-delta v0.0.0-20240102155221-e3a8590b820b github.com/stretchr/testify v1.9.0 github.com/vmihailenco/msgpack/v5 v5.4.1 golang.org/x/oauth2 v0.16.0 @@ -40,6 +38,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-playground/locales v0.14.1 // indirect @@ -76,6 +75,7 @@ require ( github.com/redis/rueidis v1.0.31 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/segmentio/encoding v0.4.0 // indirect + github.com/shadowspore/fossil-delta v0.0.0-20240102155221-e3a8590b820b // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.11 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect diff --git a/_examples/go.sum b/_examples/go.sum index b7ea1874..3e2ac493 100644 --- a/_examples/go.sum +++ b/_examples/go.sum @@ -14,8 +14,8 @@ github.com/bradleypeabody/gorilla-sessions-memcache v0.0.0-20181103040241-659414 github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= -github.com/centrifugal/protocol v0.12.1-0.20240324055340-a826f3aa7e88 h1:NTbCvqhbU58RYY7+cJtlZjSRos4XpHLTQAPuWAK9NiY= -github.com/centrifugal/protocol v0.12.1-0.20240324055340-a826f3aa7e88/go.mod h1:5Z0SuNdXEt83Fkoi34BCyY23p1P8+zQakQS6/BfJHak= +github.com/centrifugal/protocol v0.12.1-0.20240330085231-151d7a9a3b26 h1:665ZHIkTLdwl0eOrD1suFZzd5bmDTRTVr4i5RZtrUog= +github.com/centrifugal/protocol v0.12.1-0.20240330085231-151d7a9a3b26/go.mod h1:5Z0SuNdXEt83Fkoi34BCyY23p1P8+zQakQS6/BfJHak= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= diff --git a/broker.go b/broker.go index 462e741b..d1996525 100644 --- a/broker.go +++ b/broker.go @@ -35,7 +35,7 @@ type ClientInfo struct { // BrokerEventHandler can handle messages received from PUB/SUB system. type BrokerEventHandler interface { // HandlePublication to handle received Publications. - HandlePublication(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error + HandlePublication(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error // HandleJoin to handle received Join messages. HandleJoin(ch string, info *ClientInfo) error // HandleLeave to handle received Leave messages. diff --git a/broker_memory.go b/broker_memory.go index 41cf5487..0d73cc22 100644 --- a/broker_memory.go +++ b/broker_memory.go @@ -131,7 +131,7 @@ func (b *MemoryBroker) Publish(ch string, data []byte, opts PublishOptions) (Str } b.saveResultToCache(ch, opts.IdempotencyKey, streamTop, resultExpireSeconds) } - return streamTop, false, b.eventHandler.HandlePublication(ch, pub, streamTop, prevPub) + return streamTop, false, b.eventHandler.HandlePublication(ch, pub, streamTop, opts.UseDelta, prevPub) } streamPosition := StreamPosition{} if opts.IdempotencyKey != "" { @@ -141,7 +141,7 @@ func (b *MemoryBroker) Publish(ch string, data []byte, opts PublishOptions) (Str } b.saveResultToCache(ch, opts.IdempotencyKey, streamPosition, resultExpireSeconds) } - return streamPosition, false, b.eventHandler.HandlePublication(ch, pub, StreamPosition{}, prevPub) + return streamPosition, false, b.eventHandler.HandlePublication(ch, pub, StreamPosition{}, opts.UseDelta, prevPub) } func (b *MemoryBroker) getResultFromCache(ch string, key string) (StreamPosition, bool) { diff --git a/broker_memory_test.go b/broker_memory_test.go index aff3a46f..ebdb55ab 100644 --- a/broker_memory_test.go +++ b/broker_memory_test.go @@ -141,7 +141,7 @@ func TestMemoryBrokerPublishIdempotent(t *testing.T) { numPubs := 0 e.eventHandler = &testBrokerEventHandler{ - HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error { + HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { numPubs++ return nil }, @@ -169,7 +169,7 @@ func TestMemoryBrokerPublishIdempotentWithHistory(t *testing.T) { numPubs := 0 e.eventHandler = &testBrokerEventHandler{ - HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error { + HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { numPubs++ return nil }, diff --git a/broker_redis.go b/broker_redis.go index fa824f98..7ddd82f9 100644 --- a/broker_redis.go +++ b/broker_redis.go @@ -710,6 +710,11 @@ func (b *RedisBroker) publish(s *shardWrapper, ch string, data []byte, opts Publ script = b.addHistoryStreamScript } + var useDelta string + if opts.UseDelta { + useDelta = "1" + } + replies, err := script.Exec( context.Background(), s.shard.client, @@ -723,6 +728,7 @@ func (b *RedisBroker) publish(s *shardWrapper, ch string, data []byte, opts Publ strconv.FormatInt(time.Now().Unix(), 10), publishCommand, resultExpire, + useDelta, }, ).ToArray() if err != nil { @@ -996,7 +1002,7 @@ var ( ) func (b *RedisBroker) handleRedisClientMessage(eventHandler BrokerEventHandler, chID channelID, data []byte) error { - pushData, pushType, sp, ok := extractPushData(data) + pushData, pushType, sp, delta, prevPayload, ok := extractPushData(data) if !ok { return fmt.Errorf("malformed PUB/SUB data: %s", data) } @@ -1013,7 +1019,14 @@ func (b *RedisBroker) handleRedisClientMessage(eventHandler BrokerEventHandler, // it to unmarshalled Publication. pub.Offset = sp.Offset } - _ = eventHandler.HandlePublication(channel, pubFromProto(&pub), sp, nil) + var prevPub protocol.Publication + if delta && len(prevPayload) > 0 { + err = pub.UnmarshalVT(pushData) + if err != nil { + return err + } + } + _ = eventHandler.HandlePublication(channel, pubFromProto(&pub), sp, false, pubFromProto(&prevPub)) } else if pushType == joinPushType { var info protocol.ClientInfo err := info.UnmarshalVT(pushData) @@ -1199,7 +1212,7 @@ func (b *RedisBroker) historyList(s *RedisShard, ch string, filter HistoryFilter return nil, StreamPosition{}, errors.New("error getting value") } - pushData, _, sp, ok := extractPushData(convert.StringToBytes(value)) + pushData, _, sp, _, _, ok := extractPushData(convert.StringToBytes(value)) if !ok { return nil, StreamPosition{}, fmt.Errorf("malformed publication value: %s", value) } @@ -1281,15 +1294,15 @@ var ( ) // See tests for supported format examples. -func extractPushData(data []byte) ([]byte, pushType, StreamPosition, bool) { +func extractPushData(data []byte) ([]byte, pushType, StreamPosition, bool, []byte, bool) { var offset uint64 var epoch string if !bytes.HasPrefix(data, metaSep) { - return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, true + return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, true } nextMetaSepPos := bytes.Index(data[len(metaSep):], metaSep) if nextMetaSepPos <= 0 { - return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false + return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false } content := data[len(metaSep) : len(metaSep)+nextMetaSepPos] contentType := content[0] @@ -1298,28 +1311,117 @@ func extractPushData(data []byte) ([]byte, pushType, StreamPosition, bool) { switch contentType { case 'j': - return rest, joinPushType, StreamPosition{}, true + return rest, joinPushType, StreamPosition{}, false, nil, true case 'l': - return rest, leavePushType, StreamPosition{}, true + return rest, leavePushType, StreamPosition{}, false, nil, true } stringContent := convert.BytesToString(content) if contentType == 'p' { - // new format p1:offset:epoch + // p1:offset:epoch__payload stringContent = stringContent[3:] // offset:epoch epochDelimiterPos := strings.Index(stringContent, contentSep) if epochDelimiterPos <= 0 { - return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false + return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false } var err error offset, err = strconv.ParseUint(stringContent[:epochDelimiterPos], 10, 64) epoch = stringContent[epochDelimiterPos+1:] - return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, err == nil + return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, err == nil + } else if contentType == 'd' { + // d1:offset:epoch:prev_payload_length:prev_payload:payload_length:payload + parsedDelta, err := parseDeltaPush(stringContent) + return convert.StringToBytes(parsedDelta.Payload), pubPushType, StreamPosition{Epoch: parsedDelta.Epoch, Offset: parsedDelta.Offset}, true, convert.StringToBytes(parsedDelta.PrevPayload), err == nil } // old format with offset only: __offset__ var err error offset, err = strconv.ParseUint(stringContent, 10, 64) - return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, err == nil + return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, err == nil +} + +type deltaPublicationPush struct { + Offset uint64 + Epoch string + PrevPayloadLength int + PrevPayload string + PayloadLength int + Payload string +} + +func parseDeltaPush(input string) (*deltaPublicationPush, error) { + // d1:offset:epoch:prev_payload_length:prev_payload:payload_length:payload + const prefix = "d1:" + if !strings.HasPrefix(input, prefix) { + return nil, fmt.Errorf("input does not start with the expected prefix") + } + input = input[len(prefix):] // Remove prefix + + // offset:epoch:prev_payload_length:prev_payload:payload_length:payload + + idx := strings.IndexByte(input, ':') + if idx == -1 { + return nil, fmt.Errorf("invalid format, missing offset") + } + offset, err := strconv.ParseUint(input[:idx], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing offset: %v", err) + } + input = input[idx+1:] + + // epoch:prev_payload_length:prev_payload:payload_length:payload + + idx = strings.IndexByte(input, ':') + if idx == -1 { + return nil, fmt.Errorf("invalid format, missing epoch") + } + epoch := input[:idx] + input = input[idx+1:] + + // prev_payload_length:prev_payload:payload_length:payload + + idx = strings.IndexByte(input, ':') + if idx == -1 { + return nil, fmt.Errorf("invalid format, missing prev payload length") + } + prevPayloadLength, err := strconv.Atoi(input[:idx]) + if err != nil { + return nil, fmt.Errorf("error parsing prev payload length: %v", err) + } + + input = input[idx+1:] + + // Extract prev_payload based on prev_payload_length + if len(input) < prevPayloadLength { + return nil, fmt.Errorf("input is shorter than expected prev payload length") + } + prevPayload := input[:prevPayloadLength] + input = input[prevPayloadLength+1:] + + // payload_length:payload + idx = strings.IndexByte(input, ':') + if idx == -1 { + return nil, fmt.Errorf("invalid format, missing payload") + } + payloadLength, err := strconv.Atoi(input[:idx]) + if err != nil { + return nil, fmt.Errorf("error parsing payload_length: %v", err) + } + input = input[idx+1:] + + // Extract payload based on payload_length + if len(input) < payloadLength { + return nil, fmt.Errorf("input is shorter than expected payload length") + } + payload := input[:payloadLength] + + return &deltaPublicationPush{ + Offset: offset, + Epoch: epoch, + PrevPayloadLength: prevPayloadLength, + PrevPayload: prevPayload, + PayloadLength: payloadLength, + Payload: payload, + }, nil } diff --git a/broker_redis_test.go b/broker_redis_test.go index f805c6e7..eec4ff74 100644 --- a/broker_redis_test.go +++ b/broker_redis_test.go @@ -684,7 +684,7 @@ func TestRedisBrokerHandlePubSubMessage(t *testing.T) { b := NewTestRedisBroker(t, node, getUniquePrefix(), false) defer func() { _ = node.Shutdown(context.Background()) }() defer stopRedisBroker(b) - err := b.handleRedisClientMessage(&testBrokerEventHandler{HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error { + err := b.handleRedisClientMessage(&testBrokerEventHandler{HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { require.Equal(t, "test", ch) require.Equal(t, uint64(16901), sp.Offset) require.Equal(t, "xyz", sp.Epoch) @@ -692,7 +692,7 @@ func TestRedisBrokerHandlePubSubMessage(t *testing.T) { }}, b.messageChannelID(b.shards[0].shard, "test"), []byte("__p1:16901:xyz__dsdsd")) require.Error(t, err) - err = b.handleRedisClientMessage(&testBrokerEventHandler{HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error { + err = b.handleRedisClientMessage(&testBrokerEventHandler{HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { return nil }}, b.messageChannelID(b.shards[0].shard, "test"), []byte("__p1:16901")) require.Error(t, err) @@ -703,7 +703,7 @@ func TestRedisBrokerHandlePubSubMessage(t *testing.T) { data, err := pub.MarshalVT() require.NoError(t, err) var publicationHandlerCalled bool - err = b.handleRedisClientMessage(&testBrokerEventHandler{HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error { + err = b.handleRedisClientMessage(&testBrokerEventHandler{HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { publicationHandlerCalled = true require.Equal(t, "test", ch) require.Equal(t, uint64(16901), sp.Offset) @@ -744,7 +744,7 @@ func BenchmarkRedisExtractPushData(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - _, _, sp, ok := extractPushData(data) + _, _, sp, _, _, ok := extractPushData(data) if !ok { b.Fatal("wrong data") } @@ -759,7 +759,7 @@ func BenchmarkRedisExtractPushData(b *testing.B) { func TestRedisExtractPushData(t *testing.T) { data := []byte(`__p1:16901:xyz.123__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) - pushData, pushType, sp, ok := extractPushData(data) + pushData, pushType, sp, _, _, ok := extractPushData(data) require.True(t, ok) require.Equal(t, pubPushType, pushType) require.Equal(t, uint64(16901), sp.Offset) @@ -767,7 +767,7 @@ func TestRedisExtractPushData(t *testing.T) { require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) data = []byte(`__16901__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) - pushData, pushType, sp, ok = extractPushData(data) + pushData, pushType, sp, _, _, ok = extractPushData(data) require.True(t, ok) require.Equal(t, pubPushType, pushType) require.Equal(t, uint64(16901), sp.Offset) @@ -775,39 +775,39 @@ func TestRedisExtractPushData(t *testing.T) { require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) data = []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) - pushData, pushType, sp, ok = extractPushData(data) + pushData, pushType, sp, _, _, ok = extractPushData(data) require.True(t, ok) require.Equal(t, pubPushType, pushType) require.Equal(t, uint64(0), sp.Offset) require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) data = []byte(`__4294967337__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) - pushData, pushType, sp, ok = extractPushData(data) + pushData, pushType, sp, _, _, ok = extractPushData(data) require.True(t, ok) require.Equal(t, pubPushType, pushType) require.Equal(t, uint64(4294967337), sp.Offset) require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) data = []byte(`__j__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) - pushData, pushType, sp, ok = extractPushData(data) + pushData, pushType, sp, _, _, ok = extractPushData(data) require.True(t, ok) require.Equal(t, joinPushType, pushType) require.Equal(t, uint64(0), sp.Offset) require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) data = []byte(`__l__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) - pushData, pushType, sp, ok = extractPushData(data) + pushData, pushType, sp, _, _, ok = extractPushData(data) require.True(t, ok) require.Equal(t, leavePushType, pushType) require.Equal(t, uint64(0), sp.Offset) require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) data = []byte(`____\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) - _, _, _, ok = extractPushData(data) + _, _, _, _, _, ok = extractPushData(data) require.False(t, ok) data = []byte(`__a__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) - _, _, _, ok = extractPushData(data) + _, _, _, _, _, ok = extractPushData(data) require.False(t, ok) } @@ -973,7 +973,7 @@ func TestRedisPubSubTwoNodes(t *testing.T) { HandleControlFunc: func(bytes []byte) error { return nil }, - HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error { + HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { c := atomic.AddInt64(&numPublications, 1) if c == int64(msgNum) { close(pubCh) @@ -1080,7 +1080,7 @@ func TestRedisClusterShardedPubSub(t *testing.T) { HandleControlFunc: func(bytes []byte) error { return nil }, - HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error { + HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { c := atomic.AddInt64(&numPublications, 1) if c == int64(msgNum) { close(pubCh) @@ -1536,7 +1536,7 @@ func testRedisClientSubscribeRecover(t *testing.T, tt recoverTest, useStreams bo historyResult, err := node.recoverHistory(channel, StreamPosition{tt.SinceOffset, streamTop.Epoch}, 0) require.NoError(t, err) - recoveredPubs, recovered := isRecovered(historyResult, tt.SinceOffset, streamTop.Epoch) + recoveredPubs, recovered := isStreamRecovered(historyResult, tt.SinceOffset, streamTop.Epoch) require.Equal(t, tt.NumRecovered, len(recoveredPubs)) require.Equal(t, tt.Recovered, recovered) } @@ -1708,7 +1708,7 @@ func BenchmarkPubSubThroughput(b *testing.B) { HandleControlFunc: func(bytes []byte) error { return nil }, - HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error { + HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { pubCh <- struct{}{} return nil }, @@ -1883,3 +1883,76 @@ func TestPreShardedSlots(t *testing.T) { }) } } + +func TestParseDeltaPush(t *testing.T) { + tests := []struct { + name string + input string + expectError bool + expectedResult *deltaPublicationPush + }{ + { + name: "valid data with colon in payload", + input: "d1:1234567890:epoch1:4:test:18:payload:with:colon", + expectError: false, + expectedResult: &deltaPublicationPush{ + Offset: 1234567890, + Epoch: "epoch1", + PrevPayloadLength: 4, + PrevPayload: "test", + PayloadLength: 18, + Payload: "payload:with:colon", + }, + }, + { + name: "valid data with empty payload", + input: "d1:1234567890:epoch2:0::0:", + expectError: false, + expectedResult: &deltaPublicationPush{ + Offset: 1234567890, + Epoch: "epoch2", + PrevPayloadLength: 0, + PrevPayload: "", + PayloadLength: 0, + Payload: "", + }, + }, + { + name: "invalid format - missing parts", + input: "d1:123456:epoch3", + expectError: true, + }, + { + name: "invalid offset", + input: "d1:notanumber:epoch4:4:test:5:hello", + expectError: true, + }, + { + name: "invalid prev payload length", + input: "d1:12:epoch4:invalid:test:5:hello", + expectError: true, + }, + { + name: "invalid prev payload length", + input: "d1:12:epoch4:4:test:invalid:hello", + expectError: true, + }, + { + name: "invalid format no payload", + input: "d1:12:epoch4:4:test:5:", + expectError: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result, err := parseDeltaPush(tc.input) + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tc.expectedResult, result) + } + }) + } +} diff --git a/client.go b/client.go index 17fbf33b..2571b341 100644 --- a/client.go +++ b/client.go @@ -2,9 +2,11 @@ package centrifuge import ( "context" + "encoding/base64" "errors" "fmt" "io" + "slices" "sync" "time" @@ -15,6 +17,7 @@ import ( "github.com/centrifugal/protocol" "github.com/google/uuid" "github.com/segmentio/encoding/json" + fdelta "github.com/shadowspore/fossil-delta" ) // Empty Replies/Pushes for pings. @@ -48,6 +51,7 @@ type clientEventHub struct { presenceStatsHandler PresenceStatsHandler historyHandler HistoryHandler stateSnapshotHandler StateSnapshotHandler + cacheEmptyHandler CacheEmptyHandler } // OnAlive allows setting AliveHandler. @@ -104,6 +108,14 @@ func (c *Client) OnPublish(h PublishHandler) { c.eventHub.publishHandler = h } +// OnCacheEmpty allows setting CacheEmptyHandler. +// CacheEmptyHandler called when client subscribes on a channel with RecoveryModeCache but there is no +// cached value in channel. In response to this handler it's possible to tell Centrifuge what to do with +// subscribe request – keep it, or return error. +func (c *Client) OnCacheEmpty(h CacheEmptyHandler) { + c.eventHub.cacheEmptyHandler = h +} + // OnPresence allows setting PresenceHandler. // PresenceHandler called when Presence request from client received. // At this moment you can only return a custom error or disconnect client. @@ -1598,6 +1610,20 @@ func (c *Client) handleSubscribe(req *protocol.SubscribeRequest, cmd *protocol.C return ErrorNotAvailable } + if req.Channel == "" { + return c.logDisconnectBadRequest("channel required for subscribe") + } + + if req.Delta != "" { + dt, ok := stringToDeltaType[req.Delta] + if !ok { + return c.logDisconnectBadRequest("unknown delta type in subscribe request: " + req.Delta) + } + if !slices.Contains(c.node.config.AllowedDeltaTypes, dt) { + return c.logDisconnectBadRequest("disabled delta type in subscribe request: " + req.Delta) + } + } + replyError, disconnect := c.validateSubscribeRequest(req) if disconnect != nil || replyError != nil { if disconnect != nil { @@ -2647,7 +2673,7 @@ type subscribeContext struct { channelContext ChannelContext } -func isRecovered(historyResult HistoryResult, cmdOffset uint64, cmdEpoch string) ([]*protocol.Publication, bool) { +func isStreamRecovered(historyResult HistoryResult, cmdOffset uint64, cmdEpoch string) ([]*protocol.Publication, bool) { latestOffset := historyResult.Offset latestEpoch := historyResult.Epoch @@ -2670,7 +2696,7 @@ func isRecovered(historyResult HistoryResult, cmdOffset uint64, cmdEpoch string) return recoveredPubs, recovered } -func isStateRecovered(historyResult HistoryResult, cmdOffset uint64, cmdEpoch string) ([]*protocol.Publication, bool) { +func isCacheRecovered(historyResult HistoryResult, cmdOffset uint64, cmdEpoch string) ([]*protocol.Publication, bool) { latestOffset := historyResult.Offset latestEpoch := historyResult.Epoch var recovered bool @@ -2732,7 +2758,13 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep c.pubSubSync.StartBuffering(channel) } - err := c.node.addSubscription(channel, subInfo{client: c, deltaType: deltaTypeFossil}) + sub := subInfo{client: c, deltaType: ""} + if req.Delta != "" { + if dt, deltaFound := stringToDeltaType[req.Delta]; deltaFound { + sub.deltaType = dt + } + } + err := c.node.addSubscription(channel, sub) if err != nil { c.node.logger.log(newLogEntry(LogLevelError, "error adding subscription", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()})) c.pubSubSync.StopBuffering(channel) @@ -2760,6 +2792,16 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep ) if reply.Options.EnablePositioning || reply.Options.EnableRecovery { + handleErr := func(err error) subscribeContext { + c.pubSubSync.StopBuffering(channel) + var clientErr *Error + if errors.As(err, &clientErr) && !errors.Is(clientErr, ErrorInternal) { + return errorDisconnectContext(clientErr, nil) + } + ctx.disconnect = &DisconnectServerError + return ctx + } + res.Positioned = true if reply.Options.EnableRecovery { res.Recoverable = true @@ -2770,29 +2812,41 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep cmdEpoch := req.Epoch recoveryMode := reply.Options.RecoveryMode - if recoveryMode == RecoveryModeDocument { - historyResult, err := c.node.History(channel, WithHistoryFilter(HistoryFilter{ - Limit: 1, - Reverse: true, - }), WithHistoryMetaTTL(reply.Options.HistoryMetaTTL)) + // Client provided subscribe request with recover flag on. Try to recover missed + // publications automatically from history (we assume here that the history configured wisely). + + if recoveryMode == RecoveryModeCache { + historyResult, err := c.node.recoverCache(channel, reply.Options.HistoryMetaTTL) if err != nil { - c.node.logger.log(newLogEntry(LogLevelError, "error on recover", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()})) - c.pubSubSync.StopBuffering(channel) - if clientErr, ok := err.(*Error); ok && clientErr != ErrorInternal { - return errorDisconnectContext(clientErr, nil) - } - ctx.disconnect = &DisconnectServerError - return ctx + c.node.logger.log(newLogEntry(LogLevelError, "error on cache recover", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()})) + return handleErr(err) } latestOffset = historyResult.Offset latestEpoch = historyResult.Epoch var recovered bool - recoveredPubs, recovered = isStateRecovered(historyResult, cmdOffset, cmdEpoch) + recoveredPubs, recovered = isCacheRecovered(historyResult, cmdOffset, cmdEpoch) res.Recovered = recovered - c.node.metrics.incRecover(res.Recovered) + if len(historyResult.Publications) == 0 && c.eventHub.cacheEmptyHandler != nil { + cacheReply := c.eventHub.cacheEmptyHandler(CacheEmptyEvent{Channel: channel}) + if cacheReply.Populated && !recovered { + // One more chance to recover in case we know cache was populated. + historyResult, err = c.node.recoverCache(channel, reply.Options.HistoryMetaTTL) + if err != nil { + c.node.logger.log(newLogEntry(LogLevelError, "error on populated cache recover", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()})) + return handleErr(err) + } + latestOffset = historyResult.Offset + latestEpoch = historyResult.Epoch + recoveredPubs, recovered = isCacheRecovered(historyResult, cmdOffset, cmdEpoch) + res.Recovered = recovered + c.node.metrics.incRecover(res.Recovered) + } else { + c.node.metrics.incRecover(res.Recovered) + } + } else { + c.node.metrics.incRecover(res.Recovered) + } } else { - // Client provided subscribe request with recover flag on. Try to recover missed - // publications automatically from history (we suppose here that history configured wisely). historyResult, err := c.node.recoverHistory(channel, StreamPosition{Offset: cmdOffset, Epoch: cmdEpoch}, reply.Options.HistoryMetaTTL) if err != nil { if errors.Is(err, ErrorUnrecoverablePosition) { @@ -2804,18 +2858,13 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep c.node.metrics.incRecover(res.Recovered) } else { c.node.logger.log(newLogEntry(LogLevelError, "error on recover", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()})) - c.pubSubSync.StopBuffering(channel) - if clientErr, ok := err.(*Error); ok && clientErr != ErrorInternal { - return errorDisconnectContext(clientErr, nil) - } - ctx.disconnect = &DisconnectServerError - return ctx + return handleErr(err) } } else { latestOffset = historyResult.Offset latestEpoch = historyResult.Epoch var recovered bool - recoveredPubs, recovered = isRecovered(historyResult, cmdOffset, cmdEpoch) + recoveredPubs, recovered = isStreamRecovered(historyResult, cmdOffset, cmdEpoch) res.Recovered = recovered c.node.metrics.incRecover(res.Recovered) } @@ -2824,12 +2873,7 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep streamTop, err := c.node.streamTop(channel, reply.Options.HistoryMetaTTL) if err != nil { c.node.logger.log(newLogEntry(LogLevelError, "error getting stream state for channel", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()})) - c.pubSubSync.StopBuffering(channel) - if clientErr, ok := err.(*Error); ok && clientErr != ErrorInternal { - return errorDisconnectContext(clientErr, nil) - } - ctx.disconnect = &DisconnectServerError - return ctx + return handleErr(err) } latestOffset = streamTop.Offset latestEpoch = streamTop.Epoch @@ -2846,6 +2890,11 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep ctx.disconnect = &DisconnectInsufficientState return ctx } + if reply.Options.RecoveryMode == RecoveryModeCache && len(recoveredPubs) > 1 && req.Delta == "" { + // In RecoveryModeCache case client is only interested in last message. So if delta encoding is + // not used then we can only send the last publication. + recoveredPubs = recoveredPubs[len(recoveredPubs)-1:] + } } if len(recoveredPubs) > 0 { @@ -2859,9 +2908,18 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep } } + var channelFlags uint8 + if res.Recovered { // Only append recovered publications in case continuity in a channel can be achieved. - res.Publications = recoveredPubs + if req.Delta == string(DeltaTypeFossil) { + res.Publications = c.makeRecoveredPubsDeltaFossil(recoveredPubs) + // Allow delta for the following real-time publications since recovery is successful + // and makeRecoveredPubsDeltaFossil already created publication with base data if required. + channelFlags |= flagDeltaAllowed + } else { + res.Publications = recoveredPubs + } // In case of successful recovery attach stream offset from request to subscribe response. // This simplifies client implementation as it doesn't need to distinguish between cases when // subscribe response has recovered publications, or it has no recovered publications. @@ -2890,7 +2948,6 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep defer c.handleCommandFinished(cmd, protocol.FrameTypeSubscribe, nil, protoReply, started) } - var channelFlags uint8 channelFlags |= flagSubscribed if serverSide { channelFlags |= flagServerSide @@ -2946,6 +3003,55 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep return ctx } +func (c *Client) makeRecoveredPubsDeltaFossil(recoveredPubs []*protocol.Publication) []*protocol.Publication { + if len(recoveredPubs) == 0 { + return nil + } + prevPub := recoveredPubs[0] + if c.transport.Protocol() == ProtocolTypeJSON { + // For JSON case we need to use b64 for data. + pub := &protocol.Publication{ + Offset: prevPub.Offset, + Info: prevPub.Info, + Tags: prevPub.Tags, + Data: nil, + B64Data: base64.StdEncoding.EncodeToString(prevPub.Data), + Delta: false, + } + recoveredPubs[0] = pub + } + // Probably during recovery we should not make deltas? This is something to investigate, in + // RecoveryModeCache case this won't be used since there is only one publication max recovered. + if len(recoveredPubs) > 1 { + for i, pub := range recoveredPubs[1:] { + patch := fdelta.Create(prevPub.Data, pub.Data) + var deltaPub *protocol.Publication + if c.transport.Protocol() == ProtocolTypeJSON { + b64patch := base64.StdEncoding.EncodeToString(patch) + deltaPub = &protocol.Publication{ + Offset: pub.Offset, + //Data: nil, + Info: pub.Info, + Tags: pub.Tags, + Delta: true, + B64Data: b64patch, + } + } else { + deltaPub = &protocol.Publication{ + Offset: pub.Offset, + Data: patch, + Info: pub.Info, + Tags: pub.Tags, + Delta: true, + } + } + recoveredPubs[i+1] = deltaPub + prevPub = recoveredPubs[i] + } + } + return recoveredPubs +} + func (c *Client) releaseSubscribeCommandReply(reply *protocol.Reply) { protocol.ReplyPool.ReleaseSubscribeReply(reply) } diff --git a/client_test.go b/client_test.go index 07ecfc27..c881cc7e 100644 --- a/client_test.go +++ b/client_test.go @@ -678,7 +678,7 @@ func testUnexpectedOffsetEpochProtocolV2(t *testing.T, offset uint64, epoch stri err = node.handlePublication("test", &Publication{ Offset: offset, - }, StreamPosition{offset, epoch}) + }, StreamPosition{offset, epoch}, false, nil) require.NoError(t, err) select { @@ -1503,7 +1503,7 @@ func TestClientPublishNotAvailable(t *testing.T) { type testBrokerEventHandler struct { // Publication must register callback func to handle Publications received. - HandlePublicationFunc func(ch string, pub *Publication, sp StreamPosition) error + HandlePublicationFunc func(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error // Join must register callback func to handle Join messages received. HandleJoinFunc func(ch string, info *ClientInfo) error // Leave must register callback func to handle Leave messages received. @@ -1512,9 +1512,9 @@ type testBrokerEventHandler struct { HandleControlFunc func([]byte) error } -func (b *testBrokerEventHandler) HandlePublication(ch string, pub *Publication, sp StreamPosition) error { +func (b *testBrokerEventHandler) HandlePublication(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { if b.HandlePublicationFunc != nil { - return b.HandlePublicationFunc(ch, pub, sp) + return b.HandlePublicationFunc(ch, pub, sp, delta, prevPub) } return nil } @@ -1560,7 +1560,7 @@ func TestClientPublishHandler(t *testing.T) { connectClientV2(t, client) node.broker.(*MemoryBroker).eventHandler = &testBrokerEventHandler{ - HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition) error { + HandlePublicationFunc: func(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { var msg testClientMessage err := json.Unmarshal(pub.Data, &msg) require.NoError(t, err) @@ -3080,7 +3080,7 @@ func TestErrLogLevel(t *testing.T) { func errLogLevel(err error) LogLevel { logLevel := LogLevelInfo - if err != ErrorNotAvailable { + if !errors.Is(err, ErrorNotAvailable) { logLevel = LogLevelError } return logLevel diff --git a/config.go b/config.go index 7af2cbbe..812099a6 100644 --- a/config.go +++ b/config.go @@ -108,6 +108,10 @@ type Config struct { // function for extracting channel_namespace label for transport_messages_received and // transport_messages_received_size. ChannelNamespaceLabelForTransportMessagesReceived bool + + // AllowedDeltaTypes is a whitelist of DeltaType subscribers can use. At this point Centrifuge + // only supports DeltaTypeFossil. If not set clients won't be able to negotiate delta encoding. + AllowedDeltaTypes []DeltaType } const ( diff --git a/events.go b/events.go index 7e88b9fb..9843fdbd 100644 --- a/events.go +++ b/events.go @@ -358,6 +358,19 @@ type HistoryHandler func(HistoryEvent, HistoryCallback) // internal state. Returning a copy is important to avoid data races. type StateSnapshotHandler func() (any, error) +// CacheEmptyEvent ... +type CacheEmptyEvent struct { + Channel string +} + +// CacheEmptyReply ... +type CacheEmptyReply struct { + Populated bool +} + +// CacheEmptyHandler ... +type CacheEmptyHandler func(CacheEmptyEvent) CacheEmptyReply + // SurveyEvent with Op and Data of survey. type SurveyEvent struct { Op string diff --git a/go.mod b/go.mod index 6f86171c..53623ce9 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( github.com/FZambia/eagle v0.1.0 - github.com/centrifugal/protocol v0.12.1-0.20240324055340-a826f3aa7e88 + github.com/centrifugal/protocol v0.12.1-0.20240330085231-151d7a9a3b26 github.com/evanphx/json-patch/v5 v5.9.0 github.com/google/uuid v1.6.0 github.com/prometheus/client_golang v1.19.0 diff --git a/go.sum b/go.sum index 9d109413..f4dc0c81 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ github.com/FZambia/eagle v0.1.0 h1:9gyX6x+xjoIfglgyPTcYm7dvY7FJ93us1QY5De4CyXA= github.com/FZambia/eagle v0.1.0/go.mod h1:YjGSPVkQTNcVLfzEUQJNgW9ScPR0K4u/Ky0yeFa4oDA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/centrifugal/protocol v0.12.1-0.20240324055340-a826f3aa7e88 h1:NTbCvqhbU58RYY7+cJtlZjSRos4XpHLTQAPuWAK9NiY= -github.com/centrifugal/protocol v0.12.1-0.20240324055340-a826f3aa7e88/go.mod h1:5Z0SuNdXEt83Fkoi34BCyY23p1P8+zQakQS6/BfJHak= +github.com/centrifugal/protocol v0.12.1-0.20240330085231-151d7a9a3b26 h1:665ZHIkTLdwl0eOrD1suFZzd5bmDTRTVr4i5RZtrUog= +github.com/centrifugal/protocol v0.12.1-0.20240330085231-151d7a9a3b26/go.mod h1:5Z0SuNdXEt83Fkoi34BCyY23p1P8+zQakQS6/BfJHak= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= diff --git a/hub.go b/hub.go index 83bd412b..9196278a 100644 --- a/hub.go +++ b/hub.go @@ -3,12 +3,10 @@ package centrifuge import ( "context" "encoding/base64" - "fmt" "io" "sync" "github.com/centrifugal/protocol" - jsonpatch "github.com/evanphx/json-patch/v5" fdelta "github.com/shadowspore/fossil-delta" ) @@ -140,15 +138,8 @@ func (h *Hub) BroadcastPublication(ch string, pub *Publication, sp StreamPositio return h.subShards[index(ch, numHubShards)].broadcastPublication(ch, pubToProto(pub), sp) } -type broadcastData struct { - pub *Publication - prevPub *Publication - jsonPatchPub *Publication - fossilPub *Publication -} - -func (h *Hub) broadcastData(ch string, bd *broadcastData, sp StreamPosition) error { - return h.subShards[index(ch, numHubShards)].broadcastData(ch, bd, sp) +func (h *Hub) broadcastPublicationDelta(ch string, pub *Publication, prevPub *Publication, sp StreamPosition) error { + return h.subShards[index(ch, numHubShards)].broadcastPublicationDelta(ch, pub, prevPub, sp) } // broadcastJoin sends message to all clients subscribed on channel. @@ -485,14 +476,17 @@ func (h *connShard) NumUsers() int { return len(h.users) } -type DeltaType int +type DeltaType string const ( - deltaTypeDisabled DeltaType = iota - deltaTypeJsonPatch - deltaTypeFossil + // DeltaTypeFossil is Fossil delta encoding. See https://fossil-scm.org/home/doc/tip/www/delta_encoder_algorithm.wiki. + DeltaTypeFossil DeltaType = "fossil" ) +var stringToDeltaType = map[string]DeltaType{ + "fossil": DeltaTypeFossil, +} + type subInfo struct { client *Client deltaType DeltaType @@ -574,9 +568,9 @@ type dataValue struct { deltaData []byte } -// broadcastPublication sends message to all clients subscribed on channel. -func (h *subShard) broadcastData(channel string, bd *broadcastData, sp StreamPosition) error { - pub := pubToProto(bd.pub) +// broadcastPublicationDelta sends message to all clients subscribed on channel trying to use deltas. +func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, prevPub *Publication, sp StreamPosition) error { + fullPub := pubToProto(pub) dataByKey := make(map[broadcastKey]dataValue) @@ -593,57 +587,43 @@ func (h *subShard) broadcastData(channel string, bd *broadcastData, sp StreamPos ) for _, sub := range channelSubscribers { - protoType := sub.client.Transport().Protocol().toProto() - isUnidirectional := sub.client.transport.Unidirectional() - deltaType := sub.deltaType - key := broadcastKey{ProtocolType: protoType, Unidirectional: isUnidirectional, DeltaType: deltaType} - value, dataFound := dataByKey[key] - if !dataFound { - deltaPub := pub - if bd.prevPub != nil && key.DeltaType == deltaTypeJsonPatch { - // Generate the patch required to turn originalJSON into modifiedJSON - patch, err := jsonpatch.CreateMergePatch(bd.prevPub.Data, pub.Data) - if err != nil { - jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err} - continue - } - deltaPub = &protocol.Publication{ - Offset: pub.Offset, - Data: patch, - Info: pub.Info, - Tags: pub.Tags, - Delta: true, - } - } else if bd.prevPub != nil && key.DeltaType == deltaTypeFossil { - patch := fdelta.Create(bd.prevPub.Data, pub.Data) - fmt.Println(string(patch)) + key := broadcastKey{ + ProtocolType: sub.client.Transport().Protocol().toProto(), + Unidirectional: sub.client.transport.Unidirectional(), + DeltaType: sub.deltaType, + } + value, valueFound := dataByKey[key] + if !valueFound { + deltaPub := fullPub + if prevPub != nil && key.DeltaType == DeltaTypeFossil { + patch := fdelta.Create(prevPub.Data, fullPub.Data) if key.ProtocolType == protocol.TypeJSON { b64patch := base64.StdEncoding.EncodeToString(patch) deltaPub = &protocol.Publication{ - Offset: pub.Offset, + Offset: fullPub.Offset, //Data: nil, - Info: pub.Info, - Tags: pub.Tags, + Info: fullPub.Info, + Tags: fullPub.Tags, Delta: true, B64Data: b64patch, } } else { deltaPub = &protocol.Publication{ - Offset: pub.Offset, + Offset: fullPub.Offset, Data: patch, - Info: pub.Info, - Tags: pub.Tags, + Info: fullPub.Info, + Tags: fullPub.Tags, Delta: true, } } - } else if bd.prevPub == nil && key.ProtocolType == protocol.TypeJSON && key.DeltaType == deltaTypeFossil { + } else if prevPub == nil && key.ProtocolType == protocol.TypeJSON && key.DeltaType == DeltaTypeFossil { // In JSON and Fossil case we need to send full state in base64 format. - b64data := base64.StdEncoding.EncodeToString(pub.Data) + b64data := base64.StdEncoding.EncodeToString(fullPub.Data) deltaPub = &protocol.Publication{ - Offset: pub.Offset, + Offset: fullPub.Offset, //Data: nil, - Info: pub.Info, - Tags: pub.Tags, + Info: fullPub.Info, + Tags: fullPub.Tags, B64Data: b64data, } } @@ -653,14 +633,14 @@ func (h *subShard) broadcastData(channel string, bd *broadcastData, sp StreamPos if key.ProtocolType == protocol.TypeJSON { if sub.client.transport.Unidirectional() { - pubToUse := pub - if key.ProtocolType == protocol.TypeJSON && key.DeltaType == deltaTypeFossil { + pubToUse := fullPub + if key.ProtocolType == protocol.TypeJSON && key.DeltaType == DeltaTypeFossil { pubToUse = &protocol.Publication{ - Offset: pub.Offset, + Offset: fullPub.Offset, //Data: nil, - Info: pub.Info, - Tags: pub.Tags, - B64Data: base64.StdEncoding.EncodeToString(pub.Data), + Info: fullPub.Info, + Tags: fullPub.Tags, + B64Data: base64.StdEncoding.EncodeToString(fullPub.Data), } } push := &protocol.Push{Channel: channel, Pub: pubToUse} @@ -670,14 +650,14 @@ func (h *subShard) broadcastData(channel string, bd *broadcastData, sp StreamPos jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err} } } else { - pubToUse := pub - if key.ProtocolType == protocol.TypeJSON && key.DeltaType == deltaTypeFossil { + pubToUse := fullPub + if key.ProtocolType == protocol.TypeJSON && key.DeltaType == DeltaTypeFossil { pubToUse = &protocol.Publication{ - Offset: pub.Offset, + Offset: fullPub.Offset, //Data: nil, - Info: pub.Info, - Tags: pub.Tags, - B64Data: base64.StdEncoding.EncodeToString(pub.Data), + Info: fullPub.Info, + Tags: fullPub.Tags, + B64Data: base64.StdEncoding.EncodeToString(fullPub.Data), } } push := &protocol.Push{Channel: channel, Pub: pubToUse} @@ -687,16 +667,16 @@ func (h *subShard) broadcastData(channel string, bd *broadcastData, sp StreamPos jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err} } } - } else if protoType == protocol.TypeProtobuf { + } else if key.ProtocolType == protocol.TypeProtobuf { if sub.client.transport.Unidirectional() { - push := &protocol.Push{Channel: channel, Pub: pub} + push := &protocol.Push{Channel: channel, Pub: fullPub} var err error data, err = protocol.DefaultProtobufPushEncoder.Encode(push) if err != nil { return err } } else { - push := &protocol.Push{Channel: channel, Pub: pub} + push := &protocol.Push{Channel: channel, Pub: fullPub} var err error data, err = protocol.DefaultProtobufReplyEncoder.Encode(&protocol.Reply{Push: push}) if err != nil { @@ -721,7 +701,7 @@ func (h *subShard) broadcastData(channel string, bd *broadcastData, sp StreamPos jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err} } } - } else if protoType == protocol.TypeProtobuf { + } else if key.ProtocolType == protocol.TypeProtobuf { if sub.client.transport.Unidirectional() { push := &protocol.Push{Channel: channel, Pub: deltaPub} var err error @@ -746,7 +726,7 @@ func (h *subShard) broadcastData(channel string, bd *broadcastData, sp StreamPos go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(sub.client) continue } - _ = sub.client.writePublication(channel, pub, value, sp) + _ = sub.client.writePublication(channel, fullPub, value, sp) } if jsonEncodeErr != nil && h.logger.enabled(LogLevelWarn) { // Log that we had clients with inappropriate protocol, and point to the first such client. diff --git a/hub_test.go b/hub_test.go index d3b4db4c..81119c57 100644 --- a/hub_test.go +++ b/hub_test.go @@ -621,8 +621,8 @@ func TestHubSubscriptions(t *testing.T) { c, err := newClient(context.Background(), defaultTestNode(), newTestTransport(func() {})) require.NoError(t, err) - _, _ = h.addSub("test1", c) - _, _ = h.addSub("test2", c) + _, _ = h.addSub("test1", subInfo{client: c, deltaType: ""}) + _, _ = h.addSub("test2", subInfo{client: c, deltaType: ""}) require.Equal(t, 2, h.NumChannels()) require.Contains(t, h.Channels(), "test1") require.Contains(t, h.Channels(), "test2") @@ -687,7 +687,7 @@ func TestHubSharding(t *testing.T) { require.NoError(t, err) _ = n.hub.add(c) for _, ch := range channels { - _, _ = n.hub.addSub(ch, c) + _, _ = n.hub.addSub(ch, subInfo{client: c, deltaType: ""}) } } } @@ -726,7 +726,7 @@ func BenchmarkHub_Contention(b *testing.B) { _ = n.hub.add(c) clients = append(clients, c) for _, ch := range channels { - _, _ = n.hub.addSub(ch, c) + _, _ = n.hub.addSub(ch, subInfo{client: c, deltaType: ""}) } } @@ -746,7 +746,7 @@ func BenchmarkHub_Contention(b *testing.B) { defer wg.Done() _ = n.hub.BroadcastPublication(channels[(i+numChannels/2)%numChannels], pub, streamPosition) }() - _, _ = n.hub.addSub(channels[i%numChannels], clients[i%numClients]) + _, _ = n.hub.addSub(channels[i%numChannels], subInfo{client: clients[i%numClients], deltaType: ""}) wg.Wait() } }) @@ -787,7 +787,7 @@ func BenchmarkHub_MassiveBroadcast(b *testing.B) { c := newTestConnectedClientWithTransport(b, context.Background(), n, t, "12") _ = n.hub.add(c) for _, ch := range channels { - _, _ = n.hub.addSub(ch, c) + _, _ = n.hub.addSub(ch, subInfo{client: c, deltaType: ""}) } } diff --git a/internal/redis_lua/broker_history_add_list.lua b/internal/redis_lua/broker_history_add_list.lua index 0e45b254..1ffbc646 100644 --- a/internal/redis_lua/broker_history_add_list.lua +++ b/internal/redis_lua/broker_history_add_list.lua @@ -9,6 +9,7 @@ local meta_expire = ARGV[5] local new_epoch_if_empty = ARGV[6] local publish_command = ARGV[7] local result_key_expire = ARGV[8] +local use_delta = ARGV[9] if result_key_expire ~= '' then local cached_result = redis.call("hmget", result_key, "e", "s") @@ -30,12 +31,18 @@ if meta_expire ~= '0' then redis.call("expire", meta_key, meta_expire) end +local prev_message_payload = redis.call("lindex", list_key, 0) or "" + local payload = "__" .. "p1:" .. top_offset .. ":" .. current_epoch .. "__" .. message_payload redis.call("lpush", list_key, payload) redis.call("ltrim", list_key, 0, ltrim_right_bound) redis.call("expire", list_key, list_ttl) if channel ~= '' then + local payload + if use_delta == "1" then + payload = "__" .. "d1:" .. top_offset .. ":" .. current_epoch .. ":" .. #prev_message_payload .. ":" .. prev_message_payload .. ":" .. #message_payload .. ":" .. message_payload + end redis.call(publish_command, channel, payload) end diff --git a/internal/redis_lua/broker_history_add_stream.lua b/internal/redis_lua/broker_history_add_stream.lua index 28251709..ead4af38 100644 --- a/internal/redis_lua/broker_history_add_stream.lua +++ b/internal/redis_lua/broker_history_add_stream.lua @@ -9,6 +9,7 @@ local meta_expire = ARGV[5] local new_epoch_if_empty = ARGV[6] local publish_command = ARGV[7] local result_key_expire = ARGV[8] +local use_delta = ARGV[9] if result_key_expire ~= '' then local cached_result = redis.call("hmget", result_key, "e", "s") @@ -30,11 +31,24 @@ if meta_expire ~= '0' then redis.call("expire", meta_key, meta_expire) end +local prev_message_payload = "" +if use_delta == "1" then + local prev_entries = redis.call("xrevrange", stream_key, "+", "-", "COUNT", 1) + if #prev_entries > 0 then + prev_message_payload = prev_entries[1][2]["d"] + end +end + redis.call("xadd", stream_key, "MAXLEN", stream_size, top_offset, "d", message_payload) redis.call("expire", stream_key, stream_ttl) if channel ~= '' then - local payload = "__" .. "p1:" .. top_offset .. ":" .. current_epoch .. "__" .. message_payload + local payload + if use_delta == "1" then + payload = "__" .. "d1:" .. top_offset .. ":" .. current_epoch .. ":" .. #prev_message_payload .. ":" .. prev_message_payload .. ":" .. #message_payload .. ":" .. message_payload + else + payload = "__" .. "p1:" .. top_offset .. ":" .. current_epoch .. "__" .. message_payload + end redis.call(publish_command, channel, payload) end diff --git a/node.go b/node.go index 73b3b6c4..b5615d37 100644 --- a/node.go +++ b/node.go @@ -683,24 +683,21 @@ func (n *Node) handleControl(data []byte) error { // handlePublication handles messages published into channel and // coming from Broker. The goal of method is to deliver this message // to all clients on this node currently subscribed to channel. -func (n *Node) handlePublication(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error { +func (n *Node) handlePublication(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { n.metrics.incMessagesReceived("publication") numSubscribers := n.hub.NumSubscribers(ch) hasCurrentSubscribers := numSubscribers > 0 if !hasCurrentSubscribers { return nil } - //if prevPub != nil { - err := n.hub.broadcastData(ch, &broadcastData{ - pub: pub, - prevPub: prevPub, - }, sp) - if err != nil { - n.Log(newLogEntry(LogLevelError, "error broadcast", map[string]any{"error": err.Error()})) + if delta { + err := n.hub.broadcastPublicationDelta(ch, pub, prevPub, sp) + if err != nil { + n.Log(newLogEntry(LogLevelError, "error broadcast delta", map[string]any{"error": err.Error()})) + } + return err } - return err - //} - //return n.hub.BroadcastPublication(ch, pub, sp) + return n.hub.BroadcastPublication(ch, pub, sp) } // handleJoin handles join messages - i.e. broadcasts it to @@ -1347,6 +1344,15 @@ func (n *Node) recoverHistory(ch string, since StreamPosition, historyMetaTTL ti }), WithHistoryMetaTTL(historyMetaTTL)) } +// recoverCache recovers last publication in channel. +func (n *Node) recoverCache(ch string, historyMetaTTL time.Duration) (HistoryResult, error) { + n.metrics.incActionCount("history_recover") + return n.History(ch, WithHistoryFilter(HistoryFilter{ + Limit: 1, + Reverse: true, + }), WithHistoryMetaTTL(historyMetaTTL)) +} + // streamTop returns current stream top StreamPosition for a channel. func (n *Node) streamTop(ch string, historyMetaTTL time.Duration) (StreamPosition, error) { n.metrics.incActionCount("history_stream_top") @@ -1527,11 +1533,11 @@ type brokerEventHandler struct { } // HandlePublication coming from Broker. -func (h *brokerEventHandler) HandlePublication(ch string, pub *Publication, sp StreamPosition, prevPub *Publication) error { +func (h *brokerEventHandler) HandlePublication(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { if pub == nil { panic("nil Publication received, this must never happen") } - return h.node.handlePublication(ch, pub, sp, prevPub) + return h.node.handlePublication(ch, pub, sp, delta, prevPub) } // HandleJoin coming from Broker. diff --git a/node_test.go b/node_test.go index 16805e89..2516e69f 100644 --- a/node_test.go +++ b/node_test.go @@ -1170,7 +1170,7 @@ func TestBrokerEventHandler_PanicsOnNil(t *testing.T) { defer func() { _ = node.Shutdown(context.Background()) }() handler := &brokerEventHandler{node: node} require.Panics(t, func() { - _ = handler.HandlePublication("test", nil, StreamPosition{}) + _ = handler.HandlePublication("test", nil, StreamPosition{}, false, nil) }) require.Panics(t, func() { _ = handler.HandleJoin("test", nil) diff --git a/options.go b/options.go index 02346f1a..33537496 100644 --- a/options.go +++ b/options.go @@ -85,7 +85,7 @@ type SubscribeOptions struct { // Make sure you are using EnableRecovery in channels that maintain Publication // history stream. EnableRecovery bool - // RecoveryMode is by default RecoveryModeStream, but can be also RecoveryModeDocument. + // RecoveryMode is by default RecoveryModeStream, but can be also RecoveryModeCache. RecoveryMode RecoveryMode // Data to send to a client with Subscribe Push. Data []byte @@ -160,8 +160,8 @@ func WithRecovery(enabled bool) SubscribeOption { type RecoveryMode int32 const ( - RecoveryModeStream RecoveryMode = 0 - RecoveryModeDocument RecoveryMode = 1 + RecoveryModeStream RecoveryMode = 0 + RecoveryModeCache RecoveryMode = 1 ) // WithRecoveryMode ... From 930c5fc190bafdb243a36d96d29e72158b1db700 Mon Sep 17 00:00:00 2001 From: FZambia Date: Sun, 31 Mar 2024 20:22:10 +0300 Subject: [PATCH 13/61] up go version --- .github/workflows/ci.yml | 8 ++++---- go.mod | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8deb47b7..2081e47c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,11 +10,11 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: - go-version: '1.20.x' + go-version: '1.22.x' - name: golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: v1.53.3 + version: v1.57.2 args: --timeout 3m0s build: name: Test with Go ${{ matrix.go-version }} Redis ${{ matrix.redis-version }} @@ -23,7 +23,7 @@ jobs: if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository strategy: matrix: - go-version: ["1.20", "1.21"] + go-version: ["1.21", "1.22"] redis-version: [5, 6, 7] steps: - name: Install Go stable version @@ -43,7 +43,7 @@ jobs: run: go test -v -race -tags integration -coverprofile=coverage.out $(go list ./... | grep -v /_examples/) - name: Upload code coverage to codecov - if: matrix.go-version == '1.21' + if: matrix.go-version == '1.22' uses: codecov/codecov-action@v4 with: file: ./coverage.out diff --git a/go.mod b/go.mod index 53623ce9..8f061b8b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/centrifugal/centrifuge -go 1.20 +go 1.21 require ( github.com/FZambia/eagle v0.1.0 From fcac0e5de1978877210297967505ca3da0406547 Mon Sep 17 00:00:00 2001 From: FZambia Date: Sun, 31 Mar 2024 20:26:54 +0300 Subject: [PATCH 14/61] fix list lua --- internal/redis_lua/broker_history_add_list.lua | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/redis_lua/broker_history_add_list.lua b/internal/redis_lua/broker_history_add_list.lua index 1ffbc646..7bd80518 100644 --- a/internal/redis_lua/broker_history_add_list.lua +++ b/internal/redis_lua/broker_history_add_list.lua @@ -39,7 +39,6 @@ redis.call("ltrim", list_key, 0, ltrim_right_bound) redis.call("expire", list_key, list_ttl) if channel ~= '' then - local payload if use_delta == "1" then payload = "__" .. "d1:" .. top_offset .. ":" .. current_epoch .. ":" .. #prev_message_payload .. ":" .. prev_message_payload .. ":" .. #message_payload .. ":" .. message_payload end From a7a5c99239be5dea0f84ce1306efd53d330edff3 Mon Sep 17 00:00:00 2001 From: Alexander Emelin Date: Mon, 1 Apr 2024 09:04:20 +0300 Subject: [PATCH 15/61] tidy go mod --- go.mod | 2 -- go.sum | 12 ++++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 8f061b8b..8e60add7 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,6 @@ go 1.21 require ( github.com/FZambia/eagle v0.1.0 github.com/centrifugal/protocol v0.12.1-0.20240330085231-151d7a9a3b26 - github.com/evanphx/json-patch/v5 v5.9.0 github.com/google/uuid v1.6.0 github.com/prometheus/client_golang v1.19.0 github.com/redis/rueidis v1.0.31 @@ -22,7 +21,6 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/pkg/errors v0.8.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.48.0 // indirect diff --git a/go.sum b/go.sum index f4dc0c81..39b72289 100644 --- a/go.sum +++ b/go.sum @@ -8,20 +8,20 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= -github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= @@ -35,6 +35,7 @@ github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3c github.com/redis/rueidis v1.0.31 h1:S2NlrMB1N+yB+QEKD4o0lV+5GNIeLo/ZMpN42ONcwg0= github.com/redis/rueidis v1.0.31/go.mod h1:g8nPmgR4C68N3abFiOc/gUOSEKw3Tom6/teYMehg4RE= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/encoding v0.4.0 h1:MEBYvRqiUB2nfR2criEXWqwdY6HJOUrCn5hboVOVmy8= @@ -46,14 +47,17 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8 github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From d491b610a059344843f4390a9afde9ba8afc430a Mon Sep 17 00:00:00 2001 From: FZambia Date: Mon, 8 Apr 2024 15:31:24 +0300 Subject: [PATCH 16/61] compression playground --- _examples/cache/index.html | 106 ---- _examples/cache/readme.md | 21 - .../compression_playground/apppb/app.pb.go | 475 ++++++++++++++++++ .../compression_playground/apppb/app.proto | 35 ++ .../compression_playground/apppb/generate.sh | 13 + .../{cache => compression_playground}/main.go | 205 +++++--- _examples/compression_playground/readme.md | 28 ++ .../compression_playground/static/app.css | 48 ++ .../compression_playground/static/app.proto | 35 ++ .../templates/index.html | 53 ++ .../templates/json.html | 93 ++++ .../templates/protobuf.html | 101 ++++ _examples/go.mod | 2 +- 13 files changed, 1006 insertions(+), 209 deletions(-) delete mode 100644 _examples/cache/index.html delete mode 100644 _examples/cache/readme.md create mode 100644 _examples/compression_playground/apppb/app.pb.go create mode 100644 _examples/compression_playground/apppb/app.proto create mode 100755 _examples/compression_playground/apppb/generate.sh rename _examples/{cache => compression_playground}/main.go (60%) create mode 100644 _examples/compression_playground/readme.md create mode 100644 _examples/compression_playground/static/app.css create mode 100644 _examples/compression_playground/static/app.proto create mode 100644 _examples/compression_playground/templates/index.html create mode 100644 _examples/compression_playground/templates/json.html create mode 100644 _examples/compression_playground/templates/protobuf.html diff --git a/_examples/cache/index.html b/_examples/cache/index.html deleted file mode 100644 index 19184e75..00000000 --- a/_examples/cache/index.html +++ /dev/null @@ -1,106 +0,0 @@ - - - - - - - - - - -
- -
- - diff --git a/_examples/cache/readme.md b/_examples/cache/readme.md deleted file mode 100644 index ba76d9c0..00000000 --- a/_examples/cache/readme.md +++ /dev/null @@ -1,21 +0,0 @@ -Results with different configurations for total data sent over the interface from server to client, -caught with WireShark filter: - -``` -tcp.srcport == 8000 && websocket -``` - -| Protocol | Compression | Delta | Bytes sent | Percentage | -|--------------------|-------------|-----------|------------|------------| -| JSON over JSON | No | No | 29510 | 100.0 | -| JSON over JSON | Yes | No | 11135 | 37.73 | -| JSON over JSON | No | Yes | 6435 | 21.81 | -| JSON over JSON | Yes | Yes | 4963 | 16.82 | -| JSON over Protobuf | No | No | 28589 | 96.88 | -| JSON over Protobuf | Yes | No | 11133 | 37.73 | -| JSON over Protobuf | No | Yes | 4276 | 14.49 | -| JSON over Protobuf | Yes | Yes | 3454 | 11.70 | - -Note: since we send JSON over Protobuf, the JSON size is the same as the JSON over JSON case. -In this case Centrifugal protocol gives lower overhead, but the main part comes from the JSON payload size. -Another advantage of JSON over Protobuf is that we are not forced to use base64 encoding for delta case. diff --git a/_examples/compression_playground/apppb/app.pb.go b/_examples/compression_playground/apppb/app.pb.go new file mode 100644 index 00000000..70f98cb0 --- /dev/null +++ b/_examples/compression_playground/apppb/app.pb.go @@ -0,0 +1,475 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.3 +// source: app.proto + +package apppb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EventType int32 + +const ( + EventType_UNKNOWN EventType = 0 // Default value, should not be used + EventType_GOAL EventType = 1 + EventType_YELLOW_CARD EventType = 2 + EventType_RED_CARD EventType = 3 + EventType_SUBSTITUTE EventType = 4 +) + +// Enum value maps for EventType. +var ( + EventType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "GOAL", + 2: "YELLOW_CARD", + 3: "RED_CARD", + 4: "SUBSTITUTE", + } + EventType_value = map[string]int32{ + "UNKNOWN": 0, + "GOAL": 1, + "YELLOW_CARD": 2, + "RED_CARD": 3, + "SUBSTITUTE": 4, + } +) + +func (x EventType) Enum() *EventType { + p := new(EventType) + *p = x + return p +} + +func (x EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EventType) Descriptor() protoreflect.EnumDescriptor { + return file_app_proto_enumTypes[0].Descriptor() +} + +func (EventType) Type() protoreflect.EnumType { + return &file_app_proto_enumTypes[0] +} + +func (x EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EventType.Descriptor instead. +func (EventType) EnumDescriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{0} +} + +type Event struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type EventType `protobuf:"varint,1,opt,name=type,proto3,enum=centrifugal.centrifuge.examples.compression_playground.EventType" json:"type,omitempty"` + Minute int32 `protobuf:"varint,2,opt,name=minute,proto3" json:"minute,omitempty"` +} + +func (x *Event) Reset() { + *x = Event{} + if protoimpl.UnsafeEnabled { + mi := &file_app_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Event) ProtoMessage() {} + +func (x *Event) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Event.ProtoReflect.Descriptor instead. +func (*Event) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{0} +} + +func (x *Event) GetType() EventType { + if x != nil { + return x.Type + } + return EventType_UNKNOWN +} + +func (x *Event) GetMinute() int32 { + if x != nil { + return x.Minute + } + return 0 +} + +type Player struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Events []*Event `protobuf:"bytes,2,rep,name=events,proto3" json:"events,omitempty"` +} + +func (x *Player) Reset() { + *x = Player{} + if protoimpl.UnsafeEnabled { + mi := &file_app_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Player) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Player) ProtoMessage() {} + +func (x *Player) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Player.ProtoReflect.Descriptor instead. +func (*Player) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{1} +} + +func (x *Player) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Player) GetEvents() []*Event { + if x != nil { + return x.Events + } + return nil +} + +type Team struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Score int32 `protobuf:"varint,2,opt,name=score,proto3" json:"score,omitempty"` + Players []*Player `protobuf:"bytes,3,rep,name=players,proto3" json:"players,omitempty"` +} + +func (x *Team) Reset() { + *x = Team{} + if protoimpl.UnsafeEnabled { + mi := &file_app_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Team) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Team) ProtoMessage() {} + +func (x *Team) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Team.ProtoReflect.Descriptor instead. +func (*Team) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{2} +} + +func (x *Team) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Team) GetScore() int32 { + if x != nil { + return x.Score + } + return 0 +} + +func (x *Team) GetPlayers() []*Player { + if x != nil { + return x.Players + } + return nil +} + +type Match struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + HomeTeam *Team `protobuf:"bytes,2,opt,name=home_team,json=homeTeam,proto3" json:"home_team,omitempty"` + AwayTeam *Team `protobuf:"bytes,3,opt,name=away_team,json=awayTeam,proto3" json:"away_team,omitempty"` +} + +func (x *Match) Reset() { + *x = Match{} + if protoimpl.UnsafeEnabled { + mi := &file_app_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Match) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Match) ProtoMessage() {} + +func (x *Match) ProtoReflect() protoreflect.Message { + mi := &file_app_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Match.ProtoReflect.Descriptor instead. +func (*Match) Descriptor() ([]byte, []int) { + return file_app_proto_rawDescGZIP(), []int{3} +} + +func (x *Match) GetId() int32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Match) GetHomeTeam() *Team { + if x != nil { + return x.HomeTeam + } + return nil +} + +func (x *Match) GetAwayTeam() *Team { + if x != nil { + return x.AwayTeam + } + return nil +} + +var File_app_proto protoreflect.FileDescriptor + +var file_app_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x61, 0x70, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x36, 0x63, 0x65, 0x6e, + 0x74, 0x72, 0x69, 0x66, 0x75, 0x67, 0x61, 0x6c, 0x2e, 0x63, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x66, + 0x75, 0x67, 0x65, 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6c, 0x61, 0x79, 0x67, 0x72, 0x6f, + 0x75, 0x6e, 0x64, 0x22, 0x76, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x55, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x63, 0x65, 0x6e, + 0x74, 0x72, 0x69, 0x66, 0x75, 0x67, 0x61, 0x6c, 0x2e, 0x63, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x66, + 0x75, 0x67, 0x65, 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6c, 0x61, 0x79, 0x67, 0x72, 0x6f, + 0x75, 0x6e, 0x64, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x22, 0x73, 0x0a, 0x06, 0x50, + 0x6c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x55, 0x0a, 0x06, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x63, 0x65, 0x6e, 0x74, + 0x72, 0x69, 0x66, 0x75, 0x67, 0x61, 0x6c, 0x2e, 0x63, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x66, 0x75, + 0x67, 0x65, 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x70, + 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6c, 0x61, 0x79, 0x67, 0x72, 0x6f, 0x75, + 0x6e, 0x64, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x22, 0x8a, 0x01, 0x0a, 0x04, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x63, + 0x6f, 0x72, 0x65, 0x12, 0x58, 0x0a, 0x07, 0x70, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x63, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x66, 0x75, 0x67, + 0x61, 0x6c, 0x2e, 0x63, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x66, 0x75, 0x67, 0x65, 0x2e, 0x65, 0x78, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x6c, 0x61, 0x79, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x2e, 0x50, 0x6c, + 0x61, 0x79, 0x65, 0x72, 0x52, 0x07, 0x70, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x22, 0xcd, 0x01, + 0x0a, 0x05, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x09, 0x68, 0x6f, 0x6d, 0x65, 0x5f, + 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x63, 0x65, 0x6e, + 0x74, 0x72, 0x69, 0x66, 0x75, 0x67, 0x61, 0x6c, 0x2e, 0x63, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x66, + 0x75, 0x67, 0x65, 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6c, 0x61, 0x79, 0x67, 0x72, 0x6f, + 0x75, 0x6e, 0x64, 0x2e, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x08, 0x68, 0x6f, 0x6d, 0x65, 0x54, 0x65, + 0x61, 0x6d, 0x12, 0x59, 0x0a, 0x09, 0x61, 0x77, 0x61, 0x79, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x63, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x66, 0x75, + 0x67, 0x61, 0x6c, 0x2e, 0x63, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x66, 0x75, 0x67, 0x65, 0x2e, 0x65, + 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6c, 0x61, 0x79, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x2e, 0x54, + 0x65, 0x61, 0x6d, 0x52, 0x08, 0x61, 0x77, 0x61, 0x79, 0x54, 0x65, 0x61, 0x6d, 0x2a, 0x51, 0x0a, + 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x4f, 0x41, 0x4c, 0x10, + 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x59, 0x45, 0x4c, 0x4c, 0x4f, 0x57, 0x5f, 0x43, 0x41, 0x52, 0x44, + 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x44, 0x5f, 0x43, 0x41, 0x52, 0x44, 0x10, 0x03, + 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x55, 0x42, 0x53, 0x54, 0x49, 0x54, 0x55, 0x54, 0x45, 0x10, 0x04, + 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x3b, 0x61, 0x70, 0x70, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_app_proto_rawDescOnce sync.Once + file_app_proto_rawDescData = file_app_proto_rawDesc +) + +func file_app_proto_rawDescGZIP() []byte { + file_app_proto_rawDescOnce.Do(func() { + file_app_proto_rawDescData = protoimpl.X.CompressGZIP(file_app_proto_rawDescData) + }) + return file_app_proto_rawDescData +} + +var file_app_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_app_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_app_proto_goTypes = []interface{}{ + (EventType)(0), // 0: centrifugal.centrifuge.examples.compression_playground.EventType + (*Event)(nil), // 1: centrifugal.centrifuge.examples.compression_playground.Event + (*Player)(nil), // 2: centrifugal.centrifuge.examples.compression_playground.Player + (*Team)(nil), // 3: centrifugal.centrifuge.examples.compression_playground.Team + (*Match)(nil), // 4: centrifugal.centrifuge.examples.compression_playground.Match +} +var file_app_proto_depIdxs = []int32{ + 0, // 0: centrifugal.centrifuge.examples.compression_playground.Event.type:type_name -> centrifugal.centrifuge.examples.compression_playground.EventType + 1, // 1: centrifugal.centrifuge.examples.compression_playground.Player.events:type_name -> centrifugal.centrifuge.examples.compression_playground.Event + 2, // 2: centrifugal.centrifuge.examples.compression_playground.Team.players:type_name -> centrifugal.centrifuge.examples.compression_playground.Player + 3, // 3: centrifugal.centrifuge.examples.compression_playground.Match.home_team:type_name -> centrifugal.centrifuge.examples.compression_playground.Team + 3, // 4: centrifugal.centrifuge.examples.compression_playground.Match.away_team:type_name -> centrifugal.centrifuge.examples.compression_playground.Team + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_app_proto_init() } +func file_app_proto_init() { + if File_app_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_app_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Event); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_app_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Player); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_app_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Team); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_app_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Match); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_app_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_app_proto_goTypes, + DependencyIndexes: file_app_proto_depIdxs, + EnumInfos: file_app_proto_enumTypes, + MessageInfos: file_app_proto_msgTypes, + }.Build() + File_app_proto = out.File + file_app_proto_rawDesc = nil + file_app_proto_goTypes = nil + file_app_proto_depIdxs = nil +} diff --git a/_examples/compression_playground/apppb/app.proto b/_examples/compression_playground/apppb/app.proto new file mode 100644 index 00000000..44a697e6 --- /dev/null +++ b/_examples/compression_playground/apppb/app.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package centrifugal.centrifuge.examples.compression_playground; + +option go_package = "./;apppb"; + +enum EventType { + UNKNOWN = 0; // Default value, should not be used + GOAL = 1; + YELLOW_CARD = 2; + RED_CARD = 3; + SUBSTITUTE = 4; +} + +message Event { + EventType type = 1; + int32 minute = 2; +} + +message Player { + string name = 1; + repeated Event events = 2; +} + +message Team { + string name = 1; + int32 score = 2; + repeated Player players = 3; +} + +message Match { + int32 id = 1; + Team home_team = 2; + Team away_team = 3; +} diff --git a/_examples/compression_playground/apppb/generate.sh b/_examples/compression_playground/apppb/generate.sh new file mode 100755 index 00000000..5d641f55 --- /dev/null +++ b/_examples/compression_playground/apppb/generate.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# go install google.golang.org/protobuf/cmd/protoc-gen-go@latest +# go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest + +set -e + +DST_DIR=./ + +protoc -I ./ \ + app.proto \ + --go_out=${DST_DIR} \ + --go-grpc_out=${DST_DIR} diff --git a/_examples/cache/main.go b/_examples/compression_playground/main.go similarity index 60% rename from _examples/cache/main.go rename to _examples/compression_playground/main.go index 555970a6..89f8752e 100644 --- a/_examples/cache/main.go +++ b/_examples/compression_playground/main.go @@ -2,59 +2,33 @@ package main import ( "context" - "encoding/json" - "fmt" + "html/template" "log" "math/rand" "net/http" + "strings" "time" - "github.com/centrifugal/centrifuge" -) - -type Event struct { - Type string - Minute int -} - -type Player struct { - Name string - Events []Event -} - -type Team struct { - Name string - Score int - Players [11]Player -} - -type Match struct { - Number int - HomeTeam Team - AwayTeam Team -} + "github.com/centrifugal/centrifuge/_examples/compression_playground/apppb" -// Define event types -const ( - Goal = "goal" - YellowCard = "yellow card" - RedCard = "red card" - Substitute = "substitute" + "github.com/centrifugal/centrifuge" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) -func simulateMatch(ctx context.Context, num int, node *centrifuge.Node) { - // Predefined lists of player names for each team +func simulateMatch(client *centrifuge.Client, num int32, node *centrifuge.Node, useProtobufPayload bool) { + // Predefined lists of player names for each team. playerNamesTeamA := []string{"John Doe", "Jane Smith", "Alex Johnson", "Chris Lee", "Pat Kim", "Sam Morgan", "Jamie Brown", "Casey Davis", "Morgan Garcia", "Taylor White", "Jordan Martinez"} playerNamesTeamB := []string{"Robin Wilson", "Drew Taylor", "Jessie Bailey", "Casey Flores", "Jordan Walker", "Charlie Green", "Alex Adams", "Morgan Thompson", "Taylor Clark", "Jordan Hernandez", "Jamie Lewis"} // Example setup - match := &Match{ - Number: num, - HomeTeam: Team{ + match := &apppb.Match{ + Id: num, + HomeTeam: &apppb.Team{ Name: "Real Madrid", Players: assignNamesToPlayers(playerNamesTeamA), }, - AwayTeam: Team{ + AwayTeam: &apppb.Team{ Name: "Barcelona", Players: assignNamesToPlayers(playerNamesTeamB), }, @@ -64,12 +38,12 @@ func simulateMatch(ctx context.Context, num int, node *centrifuge.Node) { totalEvents := 20 // Total number of events to simulate eventInterval := float64(totalSimulationTime) / float64(totalEvents) // Time between events - r := rand.New(rand.NewSource(17)) + r := rand.New(rand.NewSource(27)) for i := 0; i < totalEvents; i++ { // Sleep between events select { - case <-ctx.Done(): + case <-client.Context().Done(): return case <-time.After(time.Duration(eventInterval*1000) * time.Millisecond): } @@ -80,16 +54,33 @@ func simulateMatch(ctx context.Context, num int, node *centrifuge.Node) { team := chooseRandomTeam(r, match) playerIndex := r.Intn(11) // Choose one of the 11 players randomly - event := Event{Type: eventType, Minute: minute} + event := &apppb.Event{Type: eventType, Minute: int32(minute)} team.Players[playerIndex].Events = append(team.Players[playerIndex].Events, event) - if eventType == Goal { + if eventType == apppb.EventType_GOAL { team.Score++ } - data, _ := json.Marshal(match) - _, err := node.Publish( - "match:state:1", data, + var data []byte + var err error + + if useProtobufPayload { + data, err = proto.Marshal(match) + } else { + data, err = protojson.MarshalOptions{ + UseProtoNames: false, + }.Marshal(match) + } + if err != nil { + log.Fatal(err) + + } + ch := "match:js:1" + if useProtobufPayload { + ch = "match:pb:1" + } + _, err = node.Publish( + ch, data, centrifuge.WithDelta(true), centrifuge.WithHistory(10, time.Minute), ) @@ -99,25 +90,26 @@ func simulateMatch(ctx context.Context, num int, node *centrifuge.Node) { } } -func chooseRandomEventType(r *rand.Rand) string { - events := []string{Goal, YellowCard, RedCard, Substitute} +func chooseRandomEventType(r *rand.Rand) apppb.EventType { + events := []apppb.EventType{ + apppb.EventType_GOAL, apppb.EventType_YELLOW_CARD, apppb.EventType_RED_CARD, apppb.EventType_SUBSTITUTE} return events[r.Intn(len(events))] } -func chooseRandomTeam(r *rand.Rand, match *Match) *Team { +func chooseRandomTeam(r *rand.Rand, match *apppb.Match) *apppb.Team { if r.Intn(2) == 0 { - return &match.HomeTeam + return match.HomeTeam } - return &match.AwayTeam + return match.AwayTeam } // Helper function to create players with names from a given list -func assignNamesToPlayers(names []string) [11]Player { - var players [11]Player +func assignNamesToPlayers(names []string) []*apppb.Player { + var players [11]*apppb.Player for i, name := range names { - players[i] = Player{Name: name} + players[i] = &apppb.Player{Name: name} } - return players + return players[:] } func auth(h http.Handler) http.Handler { @@ -130,6 +122,7 @@ func auth(h http.Handler) http.Handler { // anonymous users allowed to connect to your server or not. cred := ¢rifuge.Credentials{ UserID: "", + Info: []byte(r.URL.RawQuery), // This is a hack for the playground. } newCtx := centrifuge.SetCredentials(ctx, cred) r = r.WithContext(newCtx) @@ -147,11 +140,23 @@ func main() { LogHandler: func(entry centrifuge.LogEntry) { log.Println(entry.Message, entry.Fields) }, + AllowedDeltaTypes: []centrifuge.DeltaType{centrifuge.DeltaTypeFossil}, }) if err != nil { log.Fatal(err) } + node.OnConnecting(func(ctx context.Context, event centrifuge.ConnectEvent) (centrifuge.ConnectReply, error) { + cred, _ := centrifuge.GetCredentials(ctx) + reply := centrifuge.ConnectReply{} + if strings.Contains(string(cred.Info), "delay") { + reply.MaxMessagesInFrame = -1 + reply.WriteDelay = 200 * time.Millisecond + reply.ReplyWithoutQueue = true + } + return reply, nil + }) + // Set ConnectHandler called when client successfully connected to Node. // Your code inside a handler must be synchronized since it will be called // concurrently from different goroutines (belonging to different client @@ -165,23 +170,29 @@ func main() { transportProto := client.Transport().Protocol() log.Printf("client connected via %s (%s)", transportName, transportProto) - //go func() { - // simulateMatch(client.Context(), 0, node) - //}() - - client.OnCacheEmpty(func(event centrifuge.CacheEmptyEvent) centrifuge.CacheEmptyReply { - simulateMatch(context.Background(), 0, node) - //go func() { - // num := 0 - // for { - // - // num++ - // time.Sleep(5 * time.Second) - // } - //}() - fmt.Println("simulated") - return centrifuge.CacheEmptyReply{} - }) + var useProtobufPayload bool + if strings.Contains(string(client.Info()), "protobuf") { + useProtobufPayload = true + } + + go func() { + log.Printf("using protobuf payload: %v", useProtobufPayload) + simulateMatch(client, 0, node, useProtobufPayload) + }() + + //client.OnCacheEmpty(func(event centrifuge.CacheEmptyEvent) centrifuge.CacheEmptyReply { + // simulateMatch(context.Background(), 0, node) + // //go func() { + // // num := 0 + // // for { + // // + // // num++ + // // time.Sleep(5 * time.Second) + // // } + // //}() + // fmt.Println("simulated") + // return centrifuge.CacheEmptyReply{Populated: true} + //}) // Set SubscribeHandler to react on every channel subscription attempt // initiated by a client. Here you can theoretically return an error or @@ -215,7 +226,7 @@ func main() { // Set Disconnect handler to react on client disconnect events. client.OnDisconnect(func(e centrifuge.DisconnectEvent) { - log.Print("client disconnected", e.Code, e.Reason) + log.Printf("client disconnected: %d (%s)", e.Code, e.Reason) }) }) @@ -227,19 +238,51 @@ func main() { // Now configure HTTP routes. - // Serve Websocket connections using WebsocketHandler. - wsHandler := centrifuge.NewWebsocketHandler(node, centrifuge.WebsocketConfig{ - //Compression: true, - //CompressionMinSize: 1, - //CompressionLevel: 1, - }) - http.Handle("/connection/websocket", auth(wsHandler)) + http.Handle("/connection/websocket/no_compression", auth(centrifuge.NewWebsocketHandler(node, centrifuge.WebsocketConfig{}))) - // The second route is for serving index.html file. - http.Handle("/", http.FileServer(http.Dir("./"))) + http.Handle("/connection/websocket/with_compression", auth(centrifuge.NewWebsocketHandler(node, centrifuge.WebsocketConfig{ + Compression: true, + CompressionMinSize: 1, + CompressionLevel: 1, + }))) + + http.HandleFunc("/", serveIndex) + http.HandleFunc("/json", serveJsonApp) + http.HandleFunc("/protobuf", serveProtobufApp) + + // Serve static files from the /static folder + fs := http.FileServer(http.Dir("static")) + http.Handle("/static/", http.StripPrefix("/static/", fs)) log.Printf("Starting server, visit http://localhost:8000") if err := http.ListenAndServe("127.0.0.1:8000", nil); err != nil { log.Fatal(err) } } + +func serveIndex(w http.ResponseWriter, r *http.Request) { + t, err := template.ParseFiles("templates/index.html") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + _ = t.Execute(w, nil) +} + +func serveJsonApp(w http.ResponseWriter, r *http.Request) { + t, err := template.ParseFiles("templates/json.html") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + _ = t.Execute(w, nil) +} + +func serveProtobufApp(w http.ResponseWriter, r *http.Request) { + t, err := template.ParseFiles("templates/protobuf.html") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + _ = t.Execute(w, nil) +} diff --git a/_examples/compression_playground/readme.md b/_examples/compression_playground/readme.md new file mode 100644 index 00000000..4c6a83ed --- /dev/null +++ b/_examples/compression_playground/readme.md @@ -0,0 +1,28 @@ +This is a sample simulation of football match where the entire state is sent into WebSocket connection upon every +match event. + +Results with different configurations for total data sent over the interface from server to client, +caught with WireShark filter: + +``` +tcp.srcport == 8000 && websocket +``` + +| Protocol | Compression | Delta | Bytes sent | Percentage | +|--------------------------|-------------|-----------|------------|------------| +| JSON over JSON | No | No | 29510 | 100.0 | +| JSON over JSON | Yes | No | 11135 | 37.73 | +| JSON over JSON | No | Yes | 6435 | 21.81 | +| JSON over JSON | Yes | Yes | 4963 | 16.82 | +| JSON over Protobuf | No | No | 28589 | 96.88 | +| JSON over Protobuf | Yes | No | 11133 | 37.73 | +| JSON over Protobuf | No | Yes | 4276 | 14.49 | +| JSON over Protobuf | Yes | Yes | 3454 | 11.70 | +| Protobuf over Protobuf | No | No | ? | ? | +| Protobuf over Protobuf | Yes | No | ? | ? | +| Protobuf over Protobuf | No | Yes | ? | ? | +| Protobuf over Protobuf | Yes | Yes | ? | ? | + +Note: since we send JSON over Protobuf, the JSON size is the same as the JSON over JSON case. +In this case Centrifugal protocol gives lower overhead, but the main part comes from the JSON payload size. +Another advantage of JSON over Protobuf is that we are not forced to use base64 encoding for delta case. diff --git a/_examples/compression_playground/static/app.css b/_examples/compression_playground/static/app.css new file mode 100644 index 00000000..d3a83b95 --- /dev/null +++ b/_examples/compression_playground/static/app.css @@ -0,0 +1,48 @@ +html, body { + height: 100%; + margin: 0; + display: flex; + justify-content: center; + align-items: center; + font-family: 'Arial', sans-serif; + background-color: #f0f0f0; + color: #333; +} + +#app { + width: 100%; + max-width: 800px; /* Larger width for better display of big elements */ + padding: 40px; + box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2); + background-color: white; + border-radius: 10px; + margin: 20px; +} + +#app li { + margin-bottom: 5px; +} + +#app h2 { + font-size: 36px; /* Larger heading */ + color: #444; + margin-bottom: 30px; +} + +#app p { + font-size: 28px; /* Larger font size for the scores */ + line-height: 1.6; + margin: 20px 0; + color: #555; +} + +/* Styling for team names to make them stand out */ +#app p span.team-name { + font-weight: bold; +} + +/* Making the scores pop out even more */ +#app p span.team-score { + color: #d9534f; /* A vibrant color for the scores */ + font-weight: bold; +} \ No newline at end of file diff --git a/_examples/compression_playground/static/app.proto b/_examples/compression_playground/static/app.proto new file mode 100644 index 00000000..44a697e6 --- /dev/null +++ b/_examples/compression_playground/static/app.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package centrifugal.centrifuge.examples.compression_playground; + +option go_package = "./;apppb"; + +enum EventType { + UNKNOWN = 0; // Default value, should not be used + GOAL = 1; + YELLOW_CARD = 2; + RED_CARD = 3; + SUBSTITUTE = 4; +} + +message Event { + EventType type = 1; + int32 minute = 2; +} + +message Player { + string name = 1; + repeated Event events = 2; +} + +message Team { + string name = 1; + int32 score = 2; + repeated Player players = 3; +} + +message Match { + int32 id = 1; + Team home_team = 2; + Team away_team = 3; +} diff --git a/_examples/compression_playground/templates/index.html b/_examples/compression_playground/templates/index.html new file mode 100644 index 00000000..46fae638 --- /dev/null +++ b/_examples/compression_playground/templates/index.html @@ -0,0 +1,53 @@ + + + + + + + + + + + diff --git a/_examples/compression_playground/templates/json.html b/_examples/compression_playground/templates/json.html new file mode 100644 index 00000000..c5f40163 --- /dev/null +++ b/_examples/compression_playground/templates/json.html @@ -0,0 +1,93 @@ + + + + + + + + + + + + Home +
+ + diff --git a/_examples/compression_playground/templates/protobuf.html b/_examples/compression_playground/templates/protobuf.html new file mode 100644 index 00000000..e93219e7 --- /dev/null +++ b/_examples/compression_playground/templates/protobuf.html @@ -0,0 +1,101 @@ + + + + + + + + + + + + Home +
+ + diff --git a/_examples/go.mod b/_examples/go.mod index 22f2e201..91aef28a 100644 --- a/_examples/go.mod +++ b/_examples/go.mod @@ -1,6 +1,6 @@ module github.com/centrifugal/centrifuge/_examples -go 1.20 +go 1.21 replace github.com/centrifugal/centrifuge => ../ From 265d73c0d985beff3cfdca6dcfe679891bdba2ba Mon Sep 17 00:00:00 2001 From: FZambia Date: Tue, 9 Apr 2024 12:42:52 +0300 Subject: [PATCH 17/61] experiments --- _examples/compression_playground/main.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/_examples/compression_playground/main.go b/_examples/compression_playground/main.go index 89f8752e..51980b2a 100644 --- a/_examples/compression_playground/main.go +++ b/_examples/compression_playground/main.go @@ -16,7 +16,7 @@ import ( "google.golang.org/protobuf/proto" ) -func simulateMatch(client *centrifuge.Client, num int32, node *centrifuge.Node, useProtobufPayload bool) { +func simulateMatch(ctx context.Context, num int32, node *centrifuge.Node, useProtobufPayload bool) { // Predefined lists of player names for each team. playerNamesTeamA := []string{"John Doe", "Jane Smith", "Alex Johnson", "Chris Lee", "Pat Kim", "Sam Morgan", "Jamie Brown", "Casey Davis", "Morgan Garcia", "Taylor White", "Jordan Martinez"} playerNamesTeamB := []string{"Robin Wilson", "Drew Taylor", "Jessie Bailey", "Casey Flores", "Jordan Walker", "Charlie Green", "Alex Adams", "Morgan Thompson", "Taylor Clark", "Jordan Hernandez", "Jamie Lewis"} @@ -43,7 +43,7 @@ func simulateMatch(client *centrifuge.Client, num int32, node *centrifuge.Node, for i := 0; i < totalEvents; i++ { // Sleep between events select { - case <-client.Context().Done(): + case <-ctx.Done(): return case <-time.After(time.Duration(eventInterval*1000) * time.Millisecond): } @@ -141,6 +141,11 @@ func main() { log.Println(entry.Message, entry.Fields) }, AllowedDeltaTypes: []centrifuge.DeltaType{centrifuge.DeltaTypeFossil}, + GetChannelCacheOptions: func(channel string) (centrifuge.ChannelCacheOptions, bool) { + return centrifuge.ChannelCacheOptions{ + Delay: 400 * time.Millisecond, + }, true + }, }) if err != nil { log.Fatal(err) @@ -177,7 +182,7 @@ func main() { go func() { log.Printf("using protobuf payload: %v", useProtobufPayload) - simulateMatch(client, 0, node, useProtobufPayload) + simulateMatch(client.Context(), 0, node, useProtobufPayload) }() //client.OnCacheEmpty(func(event centrifuge.CacheEmptyEvent) centrifuge.CacheEmptyReply { @@ -236,6 +241,12 @@ func main() { log.Fatal(err) } + go func() { + for { + simulateMatch(context.Background(), 0, node, false) + } + }() + // Now configure HTTP routes. http.Handle("/connection/websocket/no_compression", auth(centrifuge.NewWebsocketHandler(node, centrifuge.WebsocketConfig{}))) From bd448373aee1a550d4b49089b93d85560a78b19b Mon Sep 17 00:00:00 2001 From: FZambia Date: Tue, 9 Apr 2024 12:42:58 +0300 Subject: [PATCH 18/61] experiments --- channel_cache.go | 101 +++++++++++++++++++++++++++++++++++++ client.go | 16 +++--- client_experimental.go | 8 +-- config.go | 7 +++ hub.go | 22 ++++---- node.go | 112 ++++++++++++++++++++++++++++++++++++++--- 6 files changed, 235 insertions(+), 31 deletions(-) create mode 100644 channel_cache.go diff --git a/channel_cache.go b/channel_cache.go new file mode 100644 index 00000000..d140a20e --- /dev/null +++ b/channel_cache.go @@ -0,0 +1,101 @@ +package centrifuge + +import ( + "time" +) + +// channelCache is responsible for keeping last publication and stream position in channel. +// It should periodically sync its state with a Broker if there were no new publications for a long time. +// It should check continuity in channel stream, if it's broken it should take appropriate actions. +// It should also handle delta flag correctly. +// When on it must be used by clients to check proper stream position in channel. +type channelCache struct { + channel string + node *Node + options ChannelCacheOptions + closeCh chan struct{} + pubCh chan latestPub + + // prevPublication is a previous publication in channel. + prevPublication *Publication + // prevStreamPosition is a stream position of previous publication. + prevStreamPosition StreamPosition + + latestPub latestPub +} + +func newChannelCache( + channel string, + node *Node, + options ChannelCacheOptions, +) *channelCache { + c := &channelCache{ + channel: channel, + node: node, + options: options, + closeCh: make(chan struct{}), + pubCh: make(chan latestPub), + } + return c +} + +type latestPub struct { + pub *Publication + sp StreamPosition + // TODO: probably it's unnecessary if we load and cache prev publication anyway? Publisher can publish + // without delta flag? + delta bool +} + +func (c *channelCache) initState(latestPublication *Publication, currentStreamPosition StreamPosition) { + c.prevStreamPosition = currentStreamPosition + c.prevPublication = latestPublication + go c.run() +} + +func (c *channelCache) handlePublication(pub *Publication, sp StreamPosition, delta bool, _ *Publication) { + select { + case c.pubCh <- latestPub{pub: pub, sp: sp, delta: delta}: + case <-c.closeCh: + } +} + +func (c *channelCache) broadcast() { + _ = c.node.handlePublication(c.channel, c.latestPub.pub, c.latestPub.sp, c.latestPub.delta, c.prevPublication, true) + c.prevPublication = c.latestPub.pub + c.prevStreamPosition = c.latestPub.sp +} + +func (c *channelCache) setLatestPub(cp latestPub) { + c.latestPub = cp +} + +func (c *channelCache) run() { + t := time.NewTimer(c.options.Delay) + t.Stop() + defer t.Stop() + scheduled := false + for { + select { + case <-c.closeCh: + return + case cp := <-c.pubCh: + c.setLatestPub(cp) + if c.options.Delay == 0 { + c.broadcast() + continue + } + if !scheduled { + t.Reset(c.options.Delay) + scheduled = true + } + case <-t.C: + c.broadcast() + scheduled = false + } + } +} + +func (c *channelCache) close() { + close(c.closeCh) +} diff --git a/client.go b/client.go index 2571b341..8b62d9e4 100644 --- a/client.go +++ b/client.go @@ -535,11 +535,11 @@ func (c *Client) checkPong() { func (c *Client) addPingUpdate(isFirst bool) { delay := c.pingInterval if isFirst { - // Send first ping in random interval between 0 and PingInterval to + // Send first ping in random interval between PingInterval/2 and PingInterval to // spread ping-pongs in time (useful when many connections reconnect // almost immediately). pingNanoseconds := c.pingInterval.Nanoseconds() - delay = time.Duration(randSource.Int63n(pingNanoseconds)) * time.Nanosecond + delay = time.Duration(pingNanoseconds/2) + time.Duration(randSource.Int63n(pingNanoseconds/2))*time.Nanosecond } c.nextPing = time.Now().Add(delay).UnixNano() c.scheduleNextTimer() @@ -3089,7 +3089,7 @@ func (c *Client) handleAsyncUnsubscribe(ch string, unsub Unsubscribe) { } } -func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publication, data dataValue, sp StreamPosition) error { +func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publication, data dataValue, sp StreamPosition, bypassOffset bool) error { c.mu.Lock() channelContext, ok := c.channels[ch] if !ok || !channelHasFlag(channelContext.flags, flagSubscribed) { @@ -3119,7 +3119,7 @@ func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publica c.mu.Unlock() return nil } - if pubOffset != nextExpectedOffset { + if !bypassOffset && pubOffset != nextExpectedOffset { if c.node.logger.enabled(LogLevelDebug) { c.node.logger.log(newLogEntry(LogLevelDebug, "client insufficient state", map[string]any{"channel": ch, "user": c.user, "client": c.uid, "offset": pubOffset, "expectedOffset": nextExpectedOffset})) } @@ -3149,11 +3149,11 @@ func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publica return c.transportEnqueue(data.data, ch, protocol.FrameTypePushPublication) } -func (c *Client) writePublicationNoDelta(ch string, pub *protocol.Publication, data []byte, sp StreamPosition) error { - return c.writePublication(ch, pub, dataValue{data: data, deltaData: data}, sp) +func (c *Client) writePublicationNoDelta(ch string, pub *protocol.Publication, data []byte, sp StreamPosition, bypassOffset bool) error { + return c.writePublication(ch, pub, dataValue{data: data, deltaData: data}, sp, bypassOffset) } -func (c *Client) writePublication(ch string, pub *protocol.Publication, data dataValue, sp StreamPosition) error { +func (c *Client) writePublication(ch string, pub *protocol.Publication, data dataValue, sp StreamPosition, bypassOffset bool) error { if c.node.LogEnabled(LogLevelTrace) { c.traceOutPush(&protocol.Push{Channel: ch, Pub: pub}) } @@ -3164,7 +3164,7 @@ func (c *Client) writePublication(ch string, pub *protocol.Publication, data dat return c.transportEnqueue(data.data, ch, protocol.FrameTypePushPublication) } c.pubSubSync.SyncPublication(ch, pub, func() { - _ = c.writePublicationUpdatePosition(ch, pub, data, sp) + _ = c.writePublicationUpdatePosition(ch, pub, data, sp, bypassOffset) }) return nil } diff --git a/client_experimental.go b/client_experimental.go index bfcb355c..0760ecc6 100644 --- a/client_experimental.go +++ b/client_experimental.go @@ -29,7 +29,7 @@ func (c *Client) WritePublication(channel string, publication *Publication, sp S go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c) return err } - return c.writePublicationNoDelta(channel, pub, jsonPush, sp) + return c.writePublicationNoDelta(channel, pub, jsonPush, sp, false) } else { push := &protocol.Push{Channel: channel, Pub: pub} var err error @@ -38,7 +38,7 @@ func (c *Client) WritePublication(channel string, publication *Publication, sp S go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c) return err } - return c.writePublicationNoDelta(channel, pub, jsonReply, sp) + return c.writePublicationNoDelta(channel, pub, jsonReply, sp, false) } } else if protoType == protocol.TypeProtobuf { if c.transport.Unidirectional() { @@ -48,7 +48,7 @@ func (c *Client) WritePublication(channel string, publication *Publication, sp S if err != nil { return err } - return c.writePublicationNoDelta(channel, pub, protobufPush, sp) + return c.writePublicationNoDelta(channel, pub, protobufPush, sp, false) } else { push := &protocol.Push{Channel: channel, Pub: pub} var err error @@ -56,7 +56,7 @@ func (c *Client) WritePublication(channel string, publication *Publication, sp S if err != nil { return err } - return c.writePublicationNoDelta(channel, pub, protobufReply, sp) + return c.writePublicationNoDelta(channel, pub, protobufReply, sp, false) } } diff --git a/config.go b/config.go index 812099a6..4b08c25d 100644 --- a/config.go +++ b/config.go @@ -112,6 +112,13 @@ type Config struct { // AllowedDeltaTypes is a whitelist of DeltaType subscribers can use. At this point Centrifuge // only supports DeltaTypeFossil. If not set clients won't be able to negotiate delta encoding. AllowedDeltaTypes []DeltaType + + GetChannelCacheOptions func(channel string) (ChannelCacheOptions, bool) +} + +type ChannelCacheOptions struct { + // Delay broadcasting. + Delay time.Duration } const ( diff --git a/hub.go b/hub.go index 9196278a..4d620291 100644 --- a/hub.go +++ b/hub.go @@ -134,12 +134,12 @@ func (h *Hub) removeSub(ch string, c *Client) (bool, error) { // uses a Broker to deliver publications to all Nodes in a cluster and maintains publication history // in a channel with incremental offset. By calling BroadcastPublication messages will only be sent // to the current node subscribers without any defined offset semantics. -func (h *Hub) BroadcastPublication(ch string, pub *Publication, sp StreamPosition) error { - return h.subShards[index(ch, numHubShards)].broadcastPublication(ch, pubToProto(pub), sp) +func (h *Hub) BroadcastPublication(ch string, pub *Publication, sp StreamPosition, bypassOffset bool) error { + return h.subShards[index(ch, numHubShards)].broadcastPublication(ch, pubToProto(pub), sp, bypassOffset) } -func (h *Hub) broadcastPublicationDelta(ch string, pub *Publication, prevPub *Publication, sp StreamPosition) error { - return h.subShards[index(ch, numHubShards)].broadcastPublicationDelta(ch, pub, prevPub, sp) +func (h *Hub) broadcastPublicationDelta(ch string, pub *Publication, prevPub *Publication, sp StreamPosition, bypassOffset bool) error { + return h.subShards[index(ch, numHubShards)].broadcastPublicationDelta(ch, pub, prevPub, sp, bypassOffset) } // broadcastJoin sends message to all clients subscribed on channel. @@ -569,7 +569,7 @@ type dataValue struct { } // broadcastPublicationDelta sends message to all clients subscribed on channel trying to use deltas. -func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, prevPub *Publication, sp StreamPosition) error { +func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, prevPub *Publication, sp StreamPosition, bypassOffset bool) error { fullPub := pubToProto(pub) dataByKey := make(map[broadcastKey]dataValue) @@ -726,7 +726,7 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(sub.client) continue } - _ = sub.client.writePublication(channel, fullPub, value, sp) + _ = sub.client.writePublication(channel, fullPub, value, sp, bypassOffset) } if jsonEncodeErr != nil && h.logger.enabled(LogLevelWarn) { // Log that we had clients with inappropriate protocol, and point to the first such client. @@ -741,7 +741,7 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p } // broadcastPublication sends message to all clients subscribed on channel. -func (h *subShard) broadcastPublication(channel string, pub *protocol.Publication, sp StreamPosition) error { +func (h *subShard) broadcastPublication(channel string, pub *protocol.Publication, sp StreamPosition, bypassOffset bool) error { h.mu.RLock() defer h.mu.RUnlock() @@ -778,7 +778,7 @@ func (h *subShard) broadcastPublication(channel string, pub *protocol.Publicatio continue } } - _ = sub.client.writePublicationNoDelta(channel, pub, jsonPush, sp) + _ = sub.client.writePublicationNoDelta(channel, pub, jsonPush, sp, bypassOffset) } else { if jsonReply == nil { push := &protocol.Push{Channel: channel, Pub: pub} @@ -790,7 +790,7 @@ func (h *subShard) broadcastPublication(channel string, pub *protocol.Publicatio continue } } - _ = sub.client.writePublicationNoDelta(channel, pub, jsonReply, sp) + _ = sub.client.writePublicationNoDelta(channel, pub, jsonReply, sp, bypassOffset) } } else if protoType == protocol.TypeProtobuf { if sub.client.transport.Unidirectional() { @@ -802,7 +802,7 @@ func (h *subShard) broadcastPublication(channel string, pub *protocol.Publicatio return err } } - _ = sub.client.writePublicationNoDelta(channel, pub, protobufPush, sp) + _ = sub.client.writePublicationNoDelta(channel, pub, protobufPush, sp, bypassOffset) } else { if protobufReply == nil { push := &protocol.Push{Channel: channel, Pub: pub} @@ -812,7 +812,7 @@ func (h *subShard) broadcastPublication(channel string, pub *protocol.Publicatio return err } } - _ = sub.client.writePublicationNoDelta(channel, pub, protobufReply, sp) + _ = sub.client.writePublicationNoDelta(channel, pub, protobufReply, sp, bypassOffset) } } } diff --git a/node.go b/node.go index b5615d37..94dd3503 100644 --- a/node.go +++ b/node.go @@ -83,6 +83,8 @@ type Node struct { nodeInfoSendHandler NodeInfoSendHandler emulationSurveyHandler *emulationSurveyHandler + + caches map[string]*channelCache } const ( @@ -162,6 +164,7 @@ func New(c Config) (*Node, error) { subDissolver: dissolve.New(numSubDissolverWorkers), nowTimeGetter: nowtime.Get, surveyRegistry: make(map[uint64]chan survey), + caches: make(map[string]*channelCache), } n.emulationSurveyHandler = newEmulationSurveyHandler(n) @@ -683,7 +686,7 @@ func (n *Node) handleControl(data []byte) error { // handlePublication handles messages published into channel and // coming from Broker. The goal of method is to deliver this message // to all clients on this node currently subscribed to channel. -func (n *Node) handlePublication(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { +func (n *Node) handlePublication(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication, bypassOffset bool) error { n.metrics.incMessagesReceived("publication") numSubscribers := n.hub.NumSubscribers(ch) hasCurrentSubscribers := numSubscribers > 0 @@ -691,13 +694,26 @@ func (n *Node) handlePublication(ch string, pub *Publication, sp StreamPosition, return nil } if delta { - err := n.hub.broadcastPublicationDelta(ch, pub, prevPub, sp) + err := n.hub.broadcastPublicationDelta(ch, pub, prevPub, sp, bypassOffset) if err != nil { n.Log(newLogEntry(LogLevelError, "error broadcast delta", map[string]any{"error": err.Error()})) } return err } - return n.hub.BroadcastPublication(ch, pub, sp) + return n.hub.BroadcastPublication(ch, pub, sp, bypassOffset) +} + +func (n *Node) handlePublicationCached(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { + mu := n.subLock(ch) + mu.Lock() + cache, ok := n.caches[ch] + if ok { + cache.handlePublication(pub, sp, delta, prevPub) + mu.Unlock() + return nil + } + mu.Unlock() + return n.handlePublication(ch, pub, sp, delta, prevPub, false) } // handleJoin handles join messages - i.e. broadcasts it to @@ -988,11 +1004,69 @@ func (n *Node) addSubscription(ch string, sub subInfo) error { return err } if first { + // TODO: there is a gap between subscribe and cache initialization. + // Need to synchronize. + if n.config.GetChannelCacheOptions != nil { + cacheOpts, ok := n.config.GetChannelCacheOptions(ch) + if ok { + chCache := newChannelCache(ch, n, cacheOpts) + n.caches[ch] = chCache + } + } + err := n.broker.Subscribe(ch) if err != nil { _, _ = n.hub.removeSub(ch, sub.client) + if n.config.GetChannelCacheOptions != nil { + delete(n.caches, ch) + } return err } + + // TODO: there is a gap between subscribe and cache initialization. + // Need to synchronize. + if n.config.GetChannelCacheOptions != nil { + cache, ok := n.caches[ch] + if ok { + hr, err := n.History(ch, WithHistoryFilter(HistoryFilter{ + Limit: 1, + Reverse: true, + })) + if err != nil { + _, _ = n.hub.removeSub(ch, sub.client) + + // TODO: eliminate code duplication. + submittedAt := time.Now() + _ = n.subDissolver.Submit(func() error { + timeSpent := time.Since(submittedAt) + if timeSpent < time.Second { + time.Sleep(time.Second - timeSpent) + } + mu := n.subLock(ch) + mu.Lock() + defer mu.Unlock() + empty := n.hub.NumSubscribers(ch) == 0 + if empty { + err := n.broker.Unsubscribe(ch) + if err != nil { + // Cool down a bit since broker is not ready to process unsubscription. + time.Sleep(500 * time.Millisecond) + } + return err + } + return nil + }) + + return err + } + currentStreamPosition := hr.StreamPosition + var latestPublication *Publication + if len(hr.Publications) > 0 { + latestPublication = hr.Publications[0] + } + cache.initState(latestPublication, currentStreamPosition) + } + } } return nil } @@ -1009,6 +1083,13 @@ func (n *Node) removeSubscription(ch string, c *Client) error { return err } if empty { + cache, ok := n.caches[ch] + if ok { + println("close cache") + cache.close() + delete(n.caches, ch) + } + submittedAt := time.Now() _ = n.subDissolver.Submit(func() error { timeSpent := time.Since(submittedAt) @@ -1347,10 +1428,22 @@ func (n *Node) recoverHistory(ch string, since StreamPosition, historyMetaTTL ti // recoverCache recovers last publication in channel. func (n *Node) recoverCache(ch string, historyMetaTTL time.Duration) (HistoryResult, error) { n.metrics.incActionCount("history_recover") - return n.History(ch, WithHistoryFilter(HistoryFilter{ - Limit: 1, - Reverse: true, - }), WithHistoryMetaTTL(historyMetaTTL)) + if n.caches[ch] == nil { + return n.History(ch, WithHistoryFilter(HistoryFilter{ + Limit: 1, + Reverse: true, + }), WithHistoryMetaTTL(historyMetaTTL)) + } + if n.caches[ch].prevPublication != nil { + return HistoryResult{ + StreamPosition: n.caches[ch].prevStreamPosition, + Publications: []*Publication{n.caches[ch].prevPublication}, + }, nil + } + return HistoryResult{ + StreamPosition: n.caches[ch].prevStreamPosition, + Publications: nil, + }, nil } // streamTop returns current stream top StreamPosition for a channel. @@ -1537,7 +1630,10 @@ func (h *brokerEventHandler) HandlePublication(ch string, pub *Publication, sp S if pub == nil { panic("nil Publication received, this must never happen") } - return h.node.handlePublication(ch, pub, sp, delta, prevPub) + if h.node.config.GetChannelCacheOptions != nil { + return h.node.handlePublicationCached(ch, pub, sp, delta, prevPub) + } + return h.node.handlePublication(ch, pub, sp, delta, prevPub, false) } // HandleJoin coming from Broker. From b668b5bac209b110190a60f07534562f5c9d8a05 Mon Sep 17 00:00:00 2001 From: FZambia Date: Mon, 15 Apr 2024 21:33:14 +0300 Subject: [PATCH 19/61] make redis work --- _examples/compression_playground/main.go | 25 +- .../templates/json.html | 4 +- .../templates/protobuf.html | 4 +- broker_redis.go | 61 ++- broker_redis_test.go | 26 +- channel_cache.go | 473 +++++++++++++++--- client_test.go | 2 +- config.go | 5 - hub_test.go | 10 +- .../redis_lua/broker_history_add_stream.lua | 10 + node.go | 15 +- 11 files changed, 524 insertions(+), 111 deletions(-) diff --git a/_examples/compression_playground/main.go b/_examples/compression_playground/main.go index 51980b2a..cebae518 100644 --- a/_examples/compression_playground/main.go +++ b/_examples/compression_playground/main.go @@ -143,7 +143,9 @@ func main() { AllowedDeltaTypes: []centrifuge.DeltaType{centrifuge.DeltaTypeFossil}, GetChannelCacheOptions: func(channel string) (centrifuge.ChannelCacheOptions, bool) { return centrifuge.ChannelCacheOptions{ - Delay: 400 * time.Millisecond, + Delay: 200 * time.Millisecond, + SyncInterval: 10 * time.Millisecond, + KeepLatestPublication: true, }, true }, }) @@ -151,6 +153,27 @@ func main() { log.Fatal(err) } + redisShardConfigs := []centrifuge.RedisShardConfig{ + {Address: "localhost:6379"}, + } + var redisShards []*centrifuge.RedisShard + for _, redisConf := range redisShardConfigs { + redisShard, err := centrifuge.NewRedisShard(node, redisConf) + if err != nil { + log.Fatal(err) + } + redisShards = append(redisShards, redisShard) + } + + broker, err := centrifuge.NewRedisBroker(node, centrifuge.RedisBrokerConfig{ + // And configure a couple of shards to use. + Shards: redisShards, + }) + if err != nil { + log.Fatal(err) + } + node.SetBroker(broker) + node.OnConnecting(func(ctx context.Context, event centrifuge.ConnectEvent) (centrifuge.ConnectReply, error) { cred, _ := centrifuge.GetCredentials(ctx) reply := centrifuge.ConnectReply{} diff --git a/_examples/compression_playground/templates/json.html b/_examples/compression_playground/templates/json.html index c5f40163..0e4e40da 100644 --- a/_examples/compression_playground/templates/json.html +++ b/_examples/compression_playground/templates/json.html @@ -49,7 +49,9 @@ app.appendChild(scoreDisplay); } - const centrifuge = new Centrifuge(wsEndpoint, {}); + const centrifuge = new Centrifuge(wsEndpoint, { + debug: true, + }); let subOptions = { since: {}, diff --git a/_examples/compression_playground/templates/protobuf.html b/_examples/compression_playground/templates/protobuf.html index e93219e7..f0e88c6c 100644 --- a/_examples/compression_playground/templates/protobuf.html +++ b/_examples/compression_playground/templates/protobuf.html @@ -57,7 +57,9 @@ app.appendChild(scoreDisplay); } - const centrifuge = new Centrifuge(wsEndpoint, {}); + const centrifuge = new Centrifuge(wsEndpoint, { + debug: true, + }); let subOptions = { since: {}, diff --git a/broker_redis.go b/broker_redis.go index 7ddd82f9..93761075 100644 --- a/broker_redis.go +++ b/broker_redis.go @@ -1019,14 +1019,16 @@ func (b *RedisBroker) handleRedisClientMessage(eventHandler BrokerEventHandler, // it to unmarshalled Publication. pub.Offset = sp.Offset } - var prevPub protocol.Publication if delta && len(prevPayload) > 0 { - err = pub.UnmarshalVT(pushData) + var prevPub protocol.Publication + err = prevPub.UnmarshalVT(prevPayload) if err != nil { return err } + _ = eventHandler.HandlePublication(channel, pubFromProto(&pub), sp, delta, pubFromProto(&prevPub)) + } else { + _ = eventHandler.HandlePublication(channel, pubFromProto(&pub), sp, false, nil) } - _ = eventHandler.HandlePublication(channel, pubFromProto(&pub), sp, false, pubFromProto(&prevPub)) } else if pushType == joinPushType { var info protocol.ClientInfo err := info.UnmarshalVT(pushData) @@ -1300,45 +1302,60 @@ func extractPushData(data []byte) ([]byte, pushType, StreamPosition, bool, []byt if !bytes.HasPrefix(data, metaSep) { return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, true } - nextMetaSepPos := bytes.Index(data[len(metaSep):], metaSep) - if nextMetaSepPos <= 0 { + + content := data[len(metaSep):] + if len(content) < 0 { return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false } - content := data[len(metaSep) : len(metaSep)+nextMetaSepPos] - contentType := content[0] - rest := data[len(metaSep)+nextMetaSepPos+len(metaSep):] + contentType := content[0] switch contentType { case 'j': + // __j__payload. + nextMetaSepPos := bytes.Index(data[len(metaSep):], metaSep) + if nextMetaSepPos <= 0 { + return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false + } + rest := data[len(metaSep)+nextMetaSepPos+len(metaSep):] return rest, joinPushType, StreamPosition{}, false, nil, true case 'l': + // __l__payload. + nextMetaSepPos := bytes.Index(data[len(metaSep):], metaSep) + if nextMetaSepPos <= 0 { + return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false + } + rest := data[len(metaSep)+nextMetaSepPos+len(metaSep):] return rest, leavePushType, StreamPosition{}, false, nil, true - } + case 'p': + // p1:offset:epoch__payload + nextMetaSepPos := bytes.Index(data[len(metaSep):], metaSep) + if nextMetaSepPos <= 0 { + return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false + } + header := data[len(metaSep) : len(metaSep)+nextMetaSepPos] + stringHeader := convert.BytesToString(header) - stringContent := convert.BytesToString(content) + rest := data[len(metaSep)+nextMetaSepPos+len(metaSep):] - if contentType == 'p' { - // p1:offset:epoch__payload - stringContent = stringContent[3:] // offset:epoch - epochDelimiterPos := strings.Index(stringContent, contentSep) + stringHeader = stringHeader[3:] // offset:epoch + epochDelimiterPos := strings.Index(stringHeader, contentSep) if epochDelimiterPos <= 0 { return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false } var err error - offset, err = strconv.ParseUint(stringContent[:epochDelimiterPos], 10, 64) - epoch = stringContent[epochDelimiterPos+1:] + offset, err = strconv.ParseUint(stringHeader[:epochDelimiterPos], 10, 64) + epoch = stringHeader[epochDelimiterPos+1:] return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, err == nil - } else if contentType == 'd' { + case 'd': // d1:offset:epoch:prev_payload_length:prev_payload:payload_length:payload + stringContent := convert.BytesToString(content) parsedDelta, err := parseDeltaPush(stringContent) return convert.StringToBytes(parsedDelta.Payload), pubPushType, StreamPosition{Epoch: parsedDelta.Epoch, Offset: parsedDelta.Offset}, true, convert.StringToBytes(parsedDelta.PrevPayload), err == nil + default: + // Unknown content type. + return nil, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false } - - // old format with offset only: __offset__ - var err error - offset, err = strconv.ParseUint(stringContent, 10, 64) - return rest, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, err == nil } type deltaPublicationPush struct { diff --git a/broker_redis_test.go b/broker_redis_test.go index eec4ff74..d160166f 100644 --- a/broker_redis_test.go +++ b/broker_redis_test.go @@ -766,13 +766,13 @@ func TestRedisExtractPushData(t *testing.T) { require.Equal(t, "xyz.123", sp.Epoch) require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) - data = []byte(`__16901__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) - pushData, pushType, sp, _, _, ok = extractPushData(data) - require.True(t, ok) - require.Equal(t, pubPushType, pushType) - require.Equal(t, uint64(16901), sp.Offset) - require.Equal(t, "", sp.Epoch) - require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) + //data = []byte(`__16901__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) + //pushData, pushType, sp, _, _, ok = extractPushData(data) + //require.True(t, ok) + //require.Equal(t, pubPushType, pushType) + //require.Equal(t, uint64(16901), sp.Offset) + //require.Equal(t, "", sp.Epoch) + //require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) data = []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) pushData, pushType, sp, _, _, ok = extractPushData(data) @@ -781,12 +781,12 @@ func TestRedisExtractPushData(t *testing.T) { require.Equal(t, uint64(0), sp.Offset) require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) - data = []byte(`__4294967337__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) - pushData, pushType, sp, _, _, ok = extractPushData(data) - require.True(t, ok) - require.Equal(t, pubPushType, pushType) - require.Equal(t, uint64(4294967337), sp.Offset) - require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) + //data = []byte(`__4294967337__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) + //pushData, pushType, sp, _, _, ok = extractPushData(data) + //require.True(t, ok) + //require.Equal(t, pubPushType, pushType) + //require.Equal(t, uint64(4294967337), sp.Offset) + //require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) data = []byte(`__j__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) pushData, pushType, sp, _, _, ok = extractPushData(data) diff --git a/channel_cache.go b/channel_cache.go index d140a20e..7e5d70d9 100644 --- a/channel_cache.go +++ b/channel_cache.go @@ -1,97 +1,277 @@ package centrifuge import ( + "github.com/centrifugal/centrifuge/internal/timers" + "math" + "sync" + "sync/atomic" "time" ) -// channelCache is responsible for keeping last publication and stream position in channel. -// It should periodically sync its state with a Broker if there were no new publications for a long time. -// It should check continuity in channel stream, if it's broken it should take appropriate actions. -// It should also handle delta flag correctly. -// When on it must be used by clients to check proper stream position in channel. +type ChannelCacheOptions struct { + // Delay broadcasting. In this case intermediate publications may be skipped. May be used to + // reduce number of messages sent to clients. If zero, then all publications will be sent to clients. + Delay time.Duration + // KeepLatestPublication enables keeping latest publication in channel cache. This is required + // for using deltas when delay > 0. Also, this enables fast recovery after reconnect. + KeepLatestPublication bool + // SyncInterval is a time interval to check if we need to sync state with Broker. + // By default, no sync will be done. In this case each individual connection will + // sync separately. + SyncInterval time.Duration +} + +// channelCache is an optional intermediary layer between broker and client connections. +// It may periodically sync its state with a Broker if there were no new publications for a long time. +// If it finds that a continuity in a channel stream is broken it marks channel subscribers with +// insufficient state flag. +// It may be used by clients to check proper stream position in channel thus drastically reduce +// load on Broker. +// It may keep last publication – and with Delay option only send latest publication to clients skipping +// intermediate publications. +// It may also handle delta updates and send deltas (between several publications if Delay is used). type channelCache struct { - channel string - node *Node - options ChannelCacheOptions + initialized atomic.Int64 + channel string + node node + options ChannelCacheOptions + + mu sync.Mutex + messages *cacheQueue + closeCh chan struct{} - pubCh chan latestPub - // prevPublication is a previous publication in channel. - prevPublication *Publication - // prevStreamPosition is a stream position of previous publication. - prevStreamPosition StreamPosition + // latestPublication is an initial publication in channel or publication last sent. + latestPublication *Publication + // currentStreamPosition is an initial stream position or stream position lastly sent. + currentStreamPosition StreamPosition + + latestQueuedStreamPosition StreamPosition + + positionCheckTime int64 + nowTimeGetter func() time.Time + metaTTLSeconds time.Duration // TODO: not used yet +} - latestPub latestPub +type node interface { + handlePublication( + channel string, pub *Publication, sp StreamPosition, delta bool, + prevPublication *Publication, bypassOffset bool, + ) error + History(ch string, opts ...HistoryOption) (HistoryResult, error) } func newChannelCache( channel string, - node *Node, + node node, options ChannelCacheOptions, ) *channelCache { c := &channelCache{ - channel: channel, - node: node, - options: options, - closeCh: make(chan struct{}), - pubCh: make(chan latestPub), + channel: channel, + node: node, + options: options, + closeCh: make(chan struct{}), + messages: newCacheQueue(2), + nowTimeGetter: func() time.Time { + return time.Now() + }, + positionCheckTime: time.Now().Unix(), } return c } -type latestPub struct { - pub *Publication - sp StreamPosition - // TODO: probably it's unnecessary if we load and cache prev publication anyway? Publisher can publish - // without delta flag? - delta bool +type queuedPub struct { + pub *Publication + sp StreamPosition + delta bool + prevPub *Publication + isInsufficientState bool } func (c *channelCache) initState(latestPublication *Publication, currentStreamPosition StreamPosition) { - c.prevStreamPosition = currentStreamPosition - c.prevPublication = latestPublication - go c.run() + if c.options.KeepLatestPublication { + c.latestPublication = latestPublication + } + c.currentStreamPosition = currentStreamPosition + c.latestQueuedStreamPosition = currentStreamPosition + go c.runChecks() + go c.writer() + c.initialized.Store(1) } -func (c *channelCache) handlePublication(pub *Publication, sp StreamPosition, delta bool, _ *Publication) { - select { - case c.pubCh <- latestPub{pub: pub, sp: sp, delta: delta}: - case <-c.closeCh: +func (c *channelCache) handlePublication(pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) { + if c.initialized.Load() == 0 { + // Skip publications while cache is not initialized. + return } + bp := queuedPub{pub: pub, sp: sp, delta: delta, prevPub: prevPub} + c.mu.Lock() + defer c.mu.Unlock() + c.latestQueuedStreamPosition = sp + c.positionCheckTime = c.nowTimeGetter().Unix() + c.messages.Add(queuedItem{Publication: bp}) } -func (c *channelCache) broadcast() { - _ = c.node.handlePublication(c.channel, c.latestPub.pub, c.latestPub.sp, c.latestPub.delta, c.prevPublication, true) - c.prevPublication = c.latestPub.pub - c.prevStreamPosition = c.latestPub.sp +func (c *channelCache) handleInsufficientState(currentStreamTop StreamPosition, latestPublication *Publication) { + bp := queuedPub{pub: latestPublication, sp: currentStreamTop, delta: false, isInsufficientState: true, prevPub: nil} + c.mu.Lock() + defer c.mu.Unlock() + c.latestQueuedStreamPosition = currentStreamTop + c.positionCheckTime = c.nowTimeGetter().Unix() + // TODO: possibly c.messages.dropQueued() ? + c.messages.Add(queuedItem{Publication: bp}) } -func (c *channelCache) setLatestPub(cp latestPub) { - c.latestPub = cp +func (c *channelCache) broadcast(qp queuedPub) { + bypassOffset := c.options.Delay > 0 && !qp.isInsufficientState + pubToBroadcast := qp.pub + if qp.isInsufficientState { + pubToBroadcast = &Publication{ + Offset: math.MaxUint64, + } + } + prevPub := qp.prevPub + if c.options.KeepLatestPublication && c.options.Delay > 0 { + prevPub = c.latestPublication + } + delta := qp.delta + if c.options.Delay > 0 && !c.options.KeepLatestPublication { + delta = false + } + _ = c.node.handlePublication( + c.channel, pubToBroadcast, qp.sp, delta, prevPub, bypassOffset) + c.mu.Lock() + defer c.mu.Unlock() + if qp.sp.Offset > c.currentStreamPosition.Offset { + c.currentStreamPosition = qp.sp + if c.options.KeepLatestPublication { + c.latestPublication = qp.pub + } + } } -func (c *channelCache) run() { - t := time.NewTimer(c.options.Delay) - t.Stop() - defer t.Stop() - scheduled := false +func (c *channelCache) writer() { for { + if ok := c.waitSendPub(c.options.Delay); !ok { + return + } + } +} + +func (c *channelCache) waitSendPub(delay time.Duration) bool { + // Wait for message from the queue. + ok := c.messages.Wait() + if !ok { + return false + } + + if delay > 0 { + tm := timers.AcquireTimer(delay) select { + case <-tm.C: case <-c.closeCh: - return - case cp := <-c.pubCh: - c.setLatestPub(cp) - if c.options.Delay == 0 { - c.broadcast() - continue + timers.ReleaseTimer(tm) + return false + } + + timers.ReleaseTimer(tm) + } + + msg, ok := c.messages.Remove() + if !ok { + return !c.messages.Closed() + } + if delay == 0 || msg.Publication.isInsufficientState { + c.broadcast(msg.Publication) + return true + } + messageCount := c.messages.Len() + for messageCount > 0 { + messageCount-- + var ok bool + msg, ok = c.messages.Remove() + if !ok { + if c.messages.Closed() { + return false } - if !scheduled { - t.Reset(c.options.Delay) - scheduled = true + break + } + if msg.Publication.isInsufficientState { + break + } + } + c.broadcast(msg.Publication) + return true +} + +func (c *channelCache) checkPosition() (*Publication, StreamPosition, bool) { + nowUnix := c.nowTimeGetter().Unix() + needCheckPosition := nowUnix-c.positionCheckTime >= int64(c.options.SyncInterval.Seconds()) + + if !needCheckPosition { + return nil, StreamPosition{}, true + } + + var historyMetaTTL time.Duration + if c.metaTTLSeconds > 0 { + historyMetaTTL = c.metaTTLSeconds * time.Second + } + + hr, err := c.node.History(c.channel, WithHistoryFilter(HistoryFilter{ + Limit: 1, + Reverse: true, + }), WithHistoryMetaTTL(historyMetaTTL)) + + currentStreamPosition := hr.StreamPosition + var latestPublication *Publication + if len(hr.Publications) > 0 { + latestPublication = hr.Publications[0] + } + if err != nil { + // Check later. + return nil, StreamPosition{}, true + } + + return latestPublication, currentStreamPosition, c.isValidPosition(currentStreamPosition, nowUnix) +} + +func (c *channelCache) isValidPosition(streamTop StreamPosition, nowUnix int64) bool { + c.mu.Lock() + defer c.mu.Unlock() + position := c.latestQueuedStreamPosition + isValidPosition := streamTop.Epoch == position.Epoch && position.Offset >= streamTop.Offset + if isValidPosition { + c.positionCheckTime = nowUnix + return true + } + return false +} + +func (c *channelCache) runChecks() { + var syncCh <-chan time.Time + if c.options.SyncInterval > 0 { + ticker := time.NewTicker(c.options.SyncInterval) + syncCh = ticker.C + defer ticker.Stop() + } + for { + select { + case <-c.closeCh: + return + case <-syncCh: + // Sync state with Broker. + _, _, validPosition := c.checkPosition() + if !validPosition { + // One retry. + var ( + latestPublication *Publication + streamTop StreamPosition + ) + latestPublication, streamTop, validPosition = c.checkPosition() + if !validPosition { + c.handleInsufficientState(streamTop, latestPublication) + } } - case <-t.C: - c.broadcast() - scheduled = false } } } @@ -99,3 +279,184 @@ func (c *channelCache) run() { func (c *channelCache) close() { close(c.closeCh) } + +type queuedItem struct { + Publication queuedPub +} + +// cacheQueue is an unbounded queue of queuedItem. +// The queue is goroutine safe. +// Inspired by http://blog.dubbelboer.com/2015/04/25/go-faster-queue.html (MIT) +type cacheQueue struct { + mu sync.RWMutex + cond *sync.Cond + nodes []queuedItem + head int + tail int + cnt int + size int + closed bool + initCap int +} + +// newCacheQueue returns a new queuedItem queue with initial capacity. +func newCacheQueue(initialCapacity int) *cacheQueue { + sq := &cacheQueue{ + initCap: initialCapacity, + nodes: make([]queuedItem, initialCapacity), + } + sq.cond = sync.NewCond(&sq.mu) + return sq +} + +// WriteMany mutex must be held when calling +func (q *cacheQueue) resize(n int) { + nodes := make([]queuedItem, n) + if q.head < q.tail { + copy(nodes, q.nodes[q.head:q.tail]) + } else { + copy(nodes, q.nodes[q.head:]) + copy(nodes[len(q.nodes)-q.head:], q.nodes[:q.tail]) + } + + q.tail = q.cnt % n + q.head = 0 + q.nodes = nodes +} + +// Add an queuedItem to the back of the queue +// will return false if the queue is closed. +// In that case the queuedItem is dropped. +func (q *cacheQueue) Add(i queuedItem) bool { + q.mu.Lock() + if q.closed { + q.mu.Unlock() + return false + } + if q.cnt == len(q.nodes) { + // Also tested a growth rate of 1.5, see: http://stackoverflow.com/questions/2269063/buffer-growth-strategy + // In Go this resulted in a higher memory usage. + q.resize(q.cnt * 2) + } + q.nodes[q.tail] = i + q.tail = (q.tail + 1) % len(q.nodes) + if i.Publication.pub != nil { + q.size += len(i.Publication.pub.Data) + } + q.cnt++ + q.cond.Signal() + q.mu.Unlock() + return true +} + +// Close the queue and discard all entries in the queue +// all goroutines in wait() will return +func (q *cacheQueue) Close() { + q.mu.Lock() + defer q.mu.Unlock() + q.closed = true + q.cnt = 0 + q.nodes = nil + q.size = 0 + q.cond.Broadcast() +} + +// CloseRemaining will close the queue and return all entries in the queue. +// All goroutines in wait() will return. +func (q *cacheQueue) CloseRemaining() []queuedItem { + q.mu.Lock() + defer q.mu.Unlock() + if q.closed { + return []queuedItem{} + } + rem := make([]queuedItem, 0, q.cnt) + for q.cnt > 0 { + i := q.nodes[q.head] + q.head = (q.head + 1) % len(q.nodes) + q.cnt-- + rem = append(rem, i) + } + q.closed = true + q.cnt = 0 + q.nodes = nil + q.size = 0 + q.cond.Broadcast() + return rem +} + +// Closed returns true if the queue has been closed +// The call cannot guarantee that the queue hasn't been +// closed while the function returns, so only "true" has a definite meaning. +func (q *cacheQueue) Closed() bool { + q.mu.RLock() + c := q.closed + q.mu.RUnlock() + return c +} + +// Wait for a message to be added. +// If there are items on the queue will return immediately. +// Will return false if the queue is closed. +// Otherwise, returns true. +func (q *cacheQueue) Wait() bool { + q.mu.Lock() + if q.closed { + q.mu.Unlock() + return false + } + if q.cnt != 0 { + q.mu.Unlock() + return true + } + q.cond.Wait() + q.mu.Unlock() + return true +} + +// Remove will remove an queuedItem from the queue. +// If false is returned, it either means 1) there were no items on the queue +// or 2) the queue is closed. +func (q *cacheQueue) Remove() (queuedItem, bool) { + q.mu.Lock() + if q.cnt == 0 { + q.mu.Unlock() + return queuedItem{}, false + } + i := q.nodes[q.head] + q.head = (q.head + 1) % len(q.nodes) + q.cnt-- + if i.Publication.pub != nil { + q.size -= len(i.Publication.pub.Data) + } + + if n := len(q.nodes) / 2; n >= q.initCap && q.cnt <= n { + q.resize(n) + } + + q.mu.Unlock() + return i, true +} + +// Cap returns the capacity (without allocations) +func (q *cacheQueue) Cap() int { + q.mu.RLock() + c := cap(q.nodes) + q.mu.RUnlock() + return c +} + +// Len returns the current length of the queue. +func (q *cacheQueue) Len() int { + q.mu.RLock() + l := q.cnt + q.mu.RUnlock() + return l +} + +// Size returns the current size of the queue. +func (q *cacheQueue) Size() int { + q.mu.RLock() + s := q.size + q.mu.RUnlock() + return s +} diff --git a/client_test.go b/client_test.go index c881cc7e..4395b501 100644 --- a/client_test.go +++ b/client_test.go @@ -678,7 +678,7 @@ func testUnexpectedOffsetEpochProtocolV2(t *testing.T, offset uint64, epoch stri err = node.handlePublication("test", &Publication{ Offset: offset, - }, StreamPosition{offset, epoch}, false, nil) + }, StreamPosition{offset, epoch}, false, nil, false) require.NoError(t, err) select { diff --git a/config.go b/config.go index 4b08c25d..964990b4 100644 --- a/config.go +++ b/config.go @@ -116,11 +116,6 @@ type Config struct { GetChannelCacheOptions func(channel string) (ChannelCacheOptions, bool) } -type ChannelCacheOptions struct { - // Delay broadcasting. - Delay time.Duration -} - const ( // nodeInfoPublishInterval is an interval how often node must publish // node control message. diff --git a/hub_test.go b/hub_test.go index 81119c57..71e66b38 100644 --- a/hub_test.go +++ b/hub_test.go @@ -470,7 +470,7 @@ func TestHubBroadcastPublication(t *testing.T) { err := n.hub.BroadcastPublication( "non_existing_channel", &Publication{Data: []byte(`{"data": "broadcast_data"}`)}, - StreamPosition{}, + StreamPosition{}, false, ) require.NoError(t, err) @@ -478,7 +478,7 @@ func TestHubBroadcastPublication(t *testing.T) { err = n.hub.BroadcastPublication( "test_channel", &Publication{Data: []byte(`{"data": "broadcast_data"}`)}, - StreamPosition{}, + StreamPosition{}, false, ) require.NoError(t, err) LOOP: @@ -744,7 +744,7 @@ func BenchmarkHub_Contention(b *testing.B) { wg.Add(1) go func() { defer wg.Done() - _ = n.hub.BroadcastPublication(channels[(i+numChannels/2)%numChannels], pub, streamPosition) + _ = n.hub.BroadcastPublication(channels[(i+numChannels/2)%numChannels], pub, streamPosition, false) }() _, _ = n.hub.addSub(channels[i%numChannels], subInfo{client: clients[i%numClients], deltaType: ""}) wg.Wait() @@ -806,7 +806,7 @@ func BenchmarkHub_MassiveBroadcast(b *testing.B) { } } }() - _ = n.hub.BroadcastPublication(channels[i%numChannels], pub, streamPosition) + _ = n.hub.BroadcastPublication(channels[i%numChannels], pub, streamPosition, false) wg.Wait() } }) @@ -831,7 +831,7 @@ func TestHubBroadcastInappropriateProtocol_Publication(t *testing.T) { } err := n.hub.BroadcastPublication("test_channel", &Publication{ Data: []byte(`{111`), - }, StreamPosition{}) + }, StreamPosition{}, false) require.NoError(t, err) waitWithTimeout(t, done) } diff --git a/internal/redis_lua/broker_history_add_stream.lua b/internal/redis_lua/broker_history_add_stream.lua index ead4af38..22065f33 100644 --- a/internal/redis_lua/broker_history_add_stream.lua +++ b/internal/redis_lua/broker_history_add_stream.lua @@ -36,6 +36,16 @@ if use_delta == "1" then local prev_entries = redis.call("xrevrange", stream_key, "+", "-", "COUNT", 1) if #prev_entries > 0 then prev_message_payload = prev_entries[1][2]["d"] + local fields_and_values = prev_entries[1][2] + -- Loop through the fields and values to find the field "d" + for i = 1, #fields_and_values, 2 do + local field = fields_and_values[i] + local value = fields_and_values[i + 1] + if field == "d" then + prev_message_payload = value + break -- Stop the loop once we find the field "d" + end + end end end diff --git a/node.go b/node.go index 94dd3503..0da4b047 100644 --- a/node.go +++ b/node.go @@ -1085,7 +1085,6 @@ func (n *Node) removeSubscription(ch string, c *Client) error { if empty { cache, ok := n.caches[ch] if ok { - println("close cache") cache.close() delete(n.caches, ch) } @@ -1428,20 +1427,24 @@ func (n *Node) recoverHistory(ch string, since StreamPosition, historyMetaTTL ti // recoverCache recovers last publication in channel. func (n *Node) recoverCache(ch string, historyMetaTTL time.Duration) (HistoryResult, error) { n.metrics.incActionCount("history_recover") - if n.caches[ch] == nil { + mu := n.subLock(ch) + mu.Lock() + cache := n.caches[ch] + mu.Unlock() + if cache == nil || !cache.options.KeepLatestPublication { return n.History(ch, WithHistoryFilter(HistoryFilter{ Limit: 1, Reverse: true, }), WithHistoryMetaTTL(historyMetaTTL)) } - if n.caches[ch].prevPublication != nil { + if n.caches[ch].latestPublication != nil { return HistoryResult{ - StreamPosition: n.caches[ch].prevStreamPosition, - Publications: []*Publication{n.caches[ch].prevPublication}, + StreamPosition: n.caches[ch].currentStreamPosition, + Publications: []*Publication{n.caches[ch].latestPublication}, }, nil } return HistoryResult{ - StreamPosition: n.caches[ch].prevStreamPosition, + StreamPosition: n.caches[ch].currentStreamPosition, Publications: nil, }, nil } From 3c02f7fe1a7c87170c030267bc2d6494bb6d09c3 Mon Sep 17 00:00:00 2001 From: FZambia Date: Mon, 22 Apr 2024 21:33:08 +0300 Subject: [PATCH 20/61] experimental channel cache layer --- _examples/compression_playground/main.go | 56 +++---- broker.go | 12 ++ broker_redis.go | 4 + broker_redis_test.go | 15 -- channel_cache.go | 186 +++++++++++++++------- channel_cache_test.go | 192 +++++++++++++++++++++++ client.go | 42 ++--- config.go | 2 - hub.go | 22 +-- metrics.go | 32 ++-- node.go | 129 +++------------ 11 files changed, 435 insertions(+), 257 deletions(-) create mode 100644 channel_cache_test.go diff --git a/_examples/compression_playground/main.go b/_examples/compression_playground/main.go index cebae518..fad6de12 100644 --- a/_examples/compression_playground/main.go +++ b/_examples/compression_playground/main.go @@ -141,38 +141,31 @@ func main() { log.Println(entry.Message, entry.Fields) }, AllowedDeltaTypes: []centrifuge.DeltaType{centrifuge.DeltaTypeFossil}, - GetChannelCacheOptions: func(channel string) (centrifuge.ChannelCacheOptions, bool) { - return centrifuge.ChannelCacheOptions{ - Delay: 200 * time.Millisecond, - SyncInterval: 10 * time.Millisecond, - KeepLatestPublication: true, - }, true - }, }) if err != nil { log.Fatal(err) } - redisShardConfigs := []centrifuge.RedisShardConfig{ - {Address: "localhost:6379"}, - } - var redisShards []*centrifuge.RedisShard - for _, redisConf := range redisShardConfigs { - redisShard, err := centrifuge.NewRedisShard(node, redisConf) - if err != nil { - log.Fatal(err) - } - redisShards = append(redisShards, redisShard) - } - - broker, err := centrifuge.NewRedisBroker(node, centrifuge.RedisBrokerConfig{ - // And configure a couple of shards to use. - Shards: redisShards, - }) - if err != nil { - log.Fatal(err) - } - node.SetBroker(broker) + //redisShardConfigs := []centrifuge.RedisShardConfig{ + // {Address: "localhost:6379"}, + //} + //var redisShards []*centrifuge.RedisShard + //for _, redisConf := range redisShardConfigs { + // redisShard, err := centrifuge.NewRedisShard(node, redisConf) + // if err != nil { + // log.Fatal(err) + // } + // redisShards = append(redisShards, redisShard) + //} + // + //broker, err := centrifuge.NewRedisBroker(node, centrifuge.RedisBrokerConfig{ + // // And configure a couple of shards to use. + // Shards: redisShards, + //}) + //if err != nil { + // log.Fatal(err) + //} + //node.SetBroker(broker) node.OnConnecting(func(ctx context.Context, event centrifuge.ConnectEvent) (centrifuge.ConnectReply, error) { cred, _ := centrifuge.GetCredentials(ctx) @@ -203,6 +196,11 @@ func main() { useProtobufPayload = true } + go func() { + time.Sleep(500 * time.Millisecond) + client.Disconnect(centrifuge.DisconnectForceReconnect) + }() + go func() { log.Printf("using protobuf payload: %v", useProtobufPayload) simulateMatch(client.Context(), 0, node, useProtobufPayload) @@ -265,8 +263,10 @@ func main() { } go func() { + var num int32 for { - simulateMatch(context.Background(), 0, node, false) + num++ + simulateMatch(context.Background(), num, node, false) } }() diff --git a/broker.go b/broker.go index d1996525..b3f7896c 100644 --- a/broker.go +++ b/broker.go @@ -19,6 +19,18 @@ type Publication struct { Tags map[string]string } +func (p *Publication) shallowCopy() *Publication { + if p == nil { + return nil + } + return &Publication{ + Offset: p.Offset, + Data: p.Data, + Info: p.Info, + Tags: p.Tags, + } +} + // ClientInfo contains information about client connection. type ClientInfo struct { // ClientID is a client unique id. diff --git a/broker_redis.go b/broker_redis.go index 93761075..42b5d9b5 100644 --- a/broker_redis.go +++ b/broker_redis.go @@ -710,6 +710,10 @@ func (b *RedisBroker) publish(s *shardWrapper, ch string, data []byte, opts Publ script = b.addHistoryStreamScript } + if opts.UseDelta && b.config.UseLists { + return StreamPosition{}, false, errors.New("delta is not supported when using Redis lists for history") + } + var useDelta string if opts.UseDelta { useDelta = "1" diff --git a/broker_redis_test.go b/broker_redis_test.go index d160166f..57fb9bd5 100644 --- a/broker_redis_test.go +++ b/broker_redis_test.go @@ -766,14 +766,6 @@ func TestRedisExtractPushData(t *testing.T) { require.Equal(t, "xyz.123", sp.Epoch) require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) - //data = []byte(`__16901__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) - //pushData, pushType, sp, _, _, ok = extractPushData(data) - //require.True(t, ok) - //require.Equal(t, pubPushType, pushType) - //require.Equal(t, uint64(16901), sp.Offset) - //require.Equal(t, "", sp.Epoch) - //require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) - data = []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) pushData, pushType, sp, _, _, ok = extractPushData(data) require.True(t, ok) @@ -781,13 +773,6 @@ func TestRedisExtractPushData(t *testing.T) { require.Equal(t, uint64(0), sp.Offset) require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) - //data = []byte(`__4294967337__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) - //pushData, pushType, sp, _, _, ok = extractPushData(data) - //require.True(t, ok) - //require.Equal(t, pubPushType, pushType) - //require.Equal(t, uint64(4294967337), sp.Offset) - //require.Equal(t, []byte(`\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`), pushData) - data = []byte(`__j__\x12\nchat:index\x1aU\"\x0e{\"input\":\"__\"}*C\n\x0242\x12$37cb00a9-bcfa-4284-a1ae-607c7da3a8f4\x1a\x15{\"name\": \"Alexander\"}\"\x00`) pushData, pushType, sp, _, _, ok = extractPushData(data) require.True(t, ok) diff --git a/channel_cache.go b/channel_cache.go index 7e5d70d9..32365adf 100644 --- a/channel_cache.go +++ b/channel_cache.go @@ -1,43 +1,86 @@ package centrifuge import ( - "github.com/centrifugal/centrifuge/internal/timers" + "errors" + "fmt" "math" "sync" "sync/atomic" "time" + + "github.com/centrifugal/centrifuge/internal/timers" ) +// ChannelCacheOptions is an EXPERIMENTAL way to provide a channel cache layer options to Centrifuge. +// This is very unstable at the moment, do not use in production. type ChannelCacheOptions struct { - // Delay broadcasting. In this case intermediate publications may be skipped. May be used to - // reduce number of messages sent to clients. If zero, then all publications will be sent to clients. - Delay time.Duration + // BroadcastDelay controls delay before Publication broadcast. On time tick Centrifugo broadcasts + // only the latest publication in the channel. Useful to reduce the number of messages sent to clients + // when publication contains the entire state. If zero, all publications will be sent to clients without + // delay logic involved on channel cache level. This option requires (!) UseQueue to be enabled, as we + // can not afford delays during synchronous broadcast. + BroadcastDelay time.Duration + // PositionSyncInterval is a time interval to check if we need to sync stream position state with Broker + // to detect PUB/SUB layer message loss. By default, no sync is performed – in that case each individual + // connection syncs position separately. + // TODO: need a mechanism to communicate with Clients that sync is done in cache layer. + PositionSyncInterval time.Duration + // UseQueue enables queue for incoming publications. This can be useful to reduce PUB/SUB message + // processing time (as we put it into a single cache layer queue) and also opens a road to broadcast + // tweaks – such as BroadcastDelay and delta between several publications (deltas require both + // BroadcastDelay and KeepLatestPublication to be enabled). + UseQueue bool // KeepLatestPublication enables keeping latest publication in channel cache. This is required - // for using deltas when delay > 0. Also, this enables fast recovery after reconnect. + // for supporting deltas when BroadcastDelay > 0. Also, this enables fast recovery after reconnect + // in RecoveryModeCache case. + // TODO: make sure we use cache for fast recovery in RecoveryModeCache case. + // TODO: make sure we use cache for fast recovery in RecoveryModeStream case. KeepLatestPublication bool - // SyncInterval is a time interval to check if we need to sync state with Broker. - // By default, no sync will be done. In this case each individual connection will - // sync separately. - SyncInterval time.Duration } -// channelCache is an optional intermediary layer between broker and client connections. -// It may periodically sync its state with a Broker if there were no new publications for a long time. -// If it finds that a continuity in a channel stream is broken it marks channel subscribers with -// insufficient state flag. -// It may be used by clients to check proper stream position in channel thus drastically reduce -// load on Broker. -// It may keep last publication – and with Delay option only send latest publication to clients skipping -// intermediate publications. -// It may also handle delta updates and send deltas (between several publications if Delay is used). +// channelCache is an optional intermediary layer between broker PUB/SUB and client connections. +// It costs up to two additional goroutines depending on ChannelCacheOptions used. +// +// This layer optionally keeps latestPublication in channel (when ChannelCacheOptions.KeepLatestPublication is on) +// and optionally queues incoming publications to process them later (broadcast to active subscribers) in a separate +// goroutine (when ChannelCacheOptions.UseQueue is on). Also, it may have a goroutine for periodic position checks +// (if ChannelCacheOptions.PositionSyncInterval is set to non-zero value). +// +// When ChannelCacheOptions.PositionSyncInterval is used it periodically syncs stream position with a Broker if +// there were no new publications for a long time. If it finds that a continuity in a channel stream is +// broken it marks channel subscribers with insufficient state flag. This way Centrifuge can drastically +// reduce the number of calls to Broker for the mostly idle streams in channels with many subscribers. +// +// When ChannelCacheOptions.KeepLatestPublication is used clients can load latest stream Publication from +// memory instead of remote broker, so connect/reconnect in RecoveryModeCache case is faster and more efficient. +// +// Cache layer may also be used with RecoveryModeStream to only go to the Broker if recovery is not possible +// from the cached state. Thus making quick massive reconnect less expensive. +// +// With ChannelCacheOptions.BroadcastDelay option it can send latest publications to clients skipping intermediate +// publications. Together with ChannelCacheOptions.KeepLatestPublication cache layer can also handle delta +// updates and send deltas between several publications. +// +// Cache is dropped as soon as last subscriber leaves the channel on the node. This generally makes it possible to +// keep latest publication without TTL, but probably we still need to handle TTL to match broker behaviour. BTW it's +// possible to clean up the local cache latest publication by looking at the result from a broker in the periodic +// position sync. +// +// When using cache layer we need to make sure that all synchronizations in channel are made through the cache layer. +// Connection may join with an offset in the future – in that case we need to make sure that we don't send publications +// with lower offset to the client. This also affects using delays and deltas - the delta may be broken. +// The question is - what if client reconnects to a node where cache layer is behind another node? Client may pass +// larger offset. What should we do then? Maybe return an insufficient state error to client in that case? type channelCache struct { initialized atomic.Int64 channel string node node options ChannelCacheOptions - mu sync.Mutex - messages *cacheQueue + mu sync.Mutex + + messages *cacheQueue + broadcastMu sync.Mutex // When queue is not used need to protect broadcast method from concurrent execution. closeCh chan struct{} @@ -45,7 +88,7 @@ type channelCache struct { latestPublication *Publication // currentStreamPosition is an initial stream position or stream position lastly sent. currentStreamPosition StreamPosition - + // latestQueuedStreamPosition is a stream position of the latest queued publication. latestQueuedStreamPosition StreamPosition positionCheckTime int64 @@ -58,26 +101,31 @@ type node interface { channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool, ) error - History(ch string, opts ...HistoryOption) (HistoryResult, error) + streamTopLatestPub(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) } func newChannelCache( channel string, node node, options ChannelCacheOptions, -) *channelCache { +) (*channelCache, error) { c := &channelCache{ - channel: channel, - node: node, - options: options, - closeCh: make(chan struct{}), - messages: newCacheQueue(2), + channel: channel, + node: node, + options: options, + closeCh: make(chan struct{}), nowTimeGetter: func() time.Time { return time.Now() }, positionCheckTime: time.Now().Unix(), } - return c + if options.UseQueue { + c.messages = newCacheQueue(2) + } + if options.BroadcastDelay > 0 && !options.UseQueue { + return nil, fmt.Errorf("broadcast delay can only be used with queue enabled") + } + return c, nil } type queuedPub struct { @@ -94,52 +142,82 @@ func (c *channelCache) initState(latestPublication *Publication, currentStreamPo } c.currentStreamPosition = currentStreamPosition c.latestQueuedStreamPosition = currentStreamPosition - go c.runChecks() - go c.writer() + if c.options.UseQueue { + go c.writer() + } + if c.options.PositionSyncInterval > 0 { + go c.runChecks() + } c.initialized.Store(1) } -func (c *channelCache) handlePublication(pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) { +func (c *channelCache) recoverLatestPublication() (*Publication, StreamPosition, error) { + c.mu.Lock() + defer c.mu.Unlock() + if !c.options.KeepLatestPublication { + return nil, StreamPosition{}, errors.New("keep latest publication option is not enabled") + } + return c.latestPublication.shallowCopy(), c.currentStreamPosition, nil +} + +func (c *channelCache) processPublication(pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) { if c.initialized.Load() == 0 { // Skip publications while cache is not initialized. return } bp := queuedPub{pub: pub, sp: sp, delta: delta, prevPub: prevPub} c.mu.Lock() - defer c.mu.Unlock() c.latestQueuedStreamPosition = sp c.positionCheckTime = c.nowTimeGetter().Unix() - c.messages.Add(queuedItem{Publication: bp}) + c.mu.Unlock() + + if c.options.UseQueue { + c.messages.Add(queuedItem{Publication: bp}) + // TODO: do we need to limit queue size here? + } else { + c.broadcastMu.Lock() + defer c.broadcastMu.Unlock() + c.broadcast(bp) + } } -func (c *channelCache) handleInsufficientState(currentStreamTop StreamPosition, latestPublication *Publication) { +func (c *channelCache) processInsufficientState(currentStreamTop StreamPosition, latestPublication *Publication) { bp := queuedPub{pub: latestPublication, sp: currentStreamTop, delta: false, isInsufficientState: true, prevPub: nil} c.mu.Lock() defer c.mu.Unlock() c.latestQueuedStreamPosition = currentStreamTop c.positionCheckTime = c.nowTimeGetter().Unix() - // TODO: possibly c.messages.dropQueued() ? - c.messages.Add(queuedItem{Publication: bp}) + if c.options.UseQueue { + // TODO: possibly support c.messages.dropQueued() for this path ? + c.messages.Add(queuedItem{Publication: bp}) + } else { + c.broadcastMu.Lock() + defer c.broadcastMu.Unlock() + c.broadcast(bp) + } } func (c *channelCache) broadcast(qp queuedPub) { - bypassOffset := c.options.Delay > 0 && !qp.isInsufficientState + bypassOffset := c.options.BroadcastDelay > 0 && !qp.isInsufficientState pubToBroadcast := qp.pub + spToBroadcast := qp.sp if qp.isInsufficientState { pubToBroadcast = &Publication{ Offset: math.MaxUint64, } + spToBroadcast.Offset = math.MaxUint64 } + prevPub := qp.prevPub - if c.options.KeepLatestPublication && c.options.Delay > 0 { + if c.options.KeepLatestPublication && c.options.BroadcastDelay > 0 { prevPub = c.latestPublication } delta := qp.delta - if c.options.Delay > 0 && !c.options.KeepLatestPublication { + if c.options.BroadcastDelay > 0 && !c.options.KeepLatestPublication { delta = false } _ = c.node.handlePublication( - c.channel, pubToBroadcast, qp.sp, delta, prevPub, bypassOffset) + c.channel, pubToBroadcast, spToBroadcast, delta, prevPub, bypassOffset) c.mu.Lock() defer c.mu.Unlock() if qp.sp.Offset > c.currentStreamPosition.Offset { @@ -152,7 +230,7 @@ func (c *channelCache) broadcast(qp queuedPub) { func (c *channelCache) writer() { for { - if ok := c.waitSendPub(c.options.Delay); !ok { + if ok := c.waitSendPub(c.options.BroadcastDelay); !ok { return } } @@ -173,7 +251,6 @@ func (c *channelCache) waitSendPub(delay time.Duration) bool { timers.ReleaseTimer(tm) return false } - timers.ReleaseTimer(tm) } @@ -206,7 +283,7 @@ func (c *channelCache) waitSendPub(delay time.Duration) bool { func (c *channelCache) checkPosition() (*Publication, StreamPosition, bool) { nowUnix := c.nowTimeGetter().Unix() - needCheckPosition := nowUnix-c.positionCheckTime >= int64(c.options.SyncInterval.Seconds()) + needCheckPosition := nowUnix-c.positionCheckTime >= int64(c.options.PositionSyncInterval.Seconds()) if !needCheckPosition { return nil, StreamPosition{}, true @@ -217,18 +294,9 @@ func (c *channelCache) checkPosition() (*Publication, StreamPosition, bool) { historyMetaTTL = c.metaTTLSeconds * time.Second } - hr, err := c.node.History(c.channel, WithHistoryFilter(HistoryFilter{ - Limit: 1, - Reverse: true, - }), WithHistoryMetaTTL(historyMetaTTL)) - - currentStreamPosition := hr.StreamPosition - var latestPublication *Publication - if len(hr.Publications) > 0 { - latestPublication = hr.Publications[0] - } + latestPublication, currentStreamPosition, err := c.node.streamTopLatestPub(c.channel, historyMetaTTL) if err != nil { - // Check later. + // Will result into position check later. return nil, StreamPosition{}, true } @@ -249,8 +317,8 @@ func (c *channelCache) isValidPosition(streamTop StreamPosition, nowUnix int64) func (c *channelCache) runChecks() { var syncCh <-chan time.Time - if c.options.SyncInterval > 0 { - ticker := time.NewTicker(c.options.SyncInterval) + if c.options.PositionSyncInterval > 0 { + ticker := time.NewTicker(c.options.PositionSyncInterval) syncCh = ticker.C defer ticker.Stop() } @@ -269,7 +337,7 @@ func (c *channelCache) runChecks() { ) latestPublication, streamTop, validPosition = c.checkPosition() if !validPosition { - c.handleInsufficientState(streamTop, latestPublication) + c.processInsufficientState(streamTop, latestPublication) } } } @@ -309,7 +377,7 @@ func newCacheQueue(initialCapacity int) *cacheQueue { return sq } -// WriteMany mutex must be held when calling +// Mutex must be held when calling. func (q *cacheQueue) resize(n int) { nodes := make([]queuedItem, n) if q.head < q.tail { diff --git a/channel_cache_test.go b/channel_cache_test.go new file mode 100644 index 00000000..12f8f8a4 --- /dev/null +++ b/channel_cache_test.go @@ -0,0 +1,192 @@ +package centrifuge + +import ( + "errors" + "math" + "strconv" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// Helper function to create a channelCache with options. +func setupChannelCache(t testing.TB, options ChannelCacheOptions, node node) *channelCache { + t.Helper() + channel := "testChannel" + cache, err := newChannelCache(channel, node, options) + if err != nil { + require.NoError(t, err) + } + return cache +} + +type mockNode struct { + // Store function outputs and any state needed for testing + handlePublicationFunc func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error + streamTopLatestPubFunc func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) +} + +func (m *mockNode) handlePublication(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error { + if m.handlePublicationFunc != nil { + return m.handlePublicationFunc(channel, pub, sp, delta, prevPublication, bypassOffset) + } + return nil +} + +func (m *mockNode) streamTopLatestPub(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { + if m.streamTopLatestPubFunc != nil { + return m.streamTopLatestPubFunc(ch, historyMetaTTL) + } + return nil, StreamPosition{}, nil +} + +func TestChannelCacheInitialization(t *testing.T) { + options := ChannelCacheOptions{ + UseQueue: true, + KeepLatestPublication: true, + BroadcastDelay: 10 * time.Millisecond, + PositionSyncInterval: 1 * time.Second, + } + cache := setupChannelCache(t, options, &mockNode{}) + + require.NotNil(t, cache) + require.NotNil(t, cache.messages) + require.Equal(t, int64(0), cache.initialized.Load()) + cache.initState(&Publication{}, StreamPosition{1, "epoch"}) + require.Equal(t, int64(1), cache.initialized.Load()) +} + +func TestChannelCacheHandlePublication(t *testing.T) { + optionSet := []ChannelCacheOptions{ + { + UseQueue: false, + KeepLatestPublication: false, + }, + { + UseQueue: true, + KeepLatestPublication: false, + }, + { + UseQueue: true, + KeepLatestPublication: false, + BroadcastDelay: 10 * time.Millisecond, + }, + { + UseQueue: true, + KeepLatestPublication: true, + BroadcastDelay: 10 * time.Millisecond, + }, + } + + for i, options := range optionSet { + t.Run(strconv.Itoa(i), func(t *testing.T) { + doneCh := make(chan struct{}) + + cache := setupChannelCache(t, options, &mockNode{ + handlePublicationFunc: func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error { + close(doneCh) + return nil + }, + }) + cache.initState(&Publication{}, StreamPosition{}) + + pub := &Publication{Data: []byte("test data")} + sp := StreamPosition{Offset: 1} + + cache.processPublication(pub, sp, false, nil) + + select { + case <-doneCh: + case <-time.After(5 * time.Second): + require.Fail(t, "handlePublicationFunc was not called") + } + + if options.KeepLatestPublication { + latestPub, latestSP, err := cache.recoverLatestPublication() + require.NoError(t, err) + require.Equal(t, pub, latestPub) + require.Equal(t, sp, latestSP) + } + }) + } +} + +func TestChannelCacheInsufficientState(t *testing.T) { + options := ChannelCacheOptions{ + UseQueue: true, + KeepLatestPublication: true, + } + doneCh := make(chan struct{}) + cache := setupChannelCache(t, options, &mockNode{ + handlePublicationFunc: func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error { + require.Equal(t, uint64(math.MaxUint64), pub.Offset) + require.Equal(t, uint64(math.MaxUint64), sp.Offset) + require.False(t, bypassOffset) + close(doneCh) + return nil + }, + }) + cache.initState(&Publication{}, StreamPosition{}) + + // Simulate the behavior when the state is marked as insufficient + cache.processInsufficientState(StreamPosition{Offset: 2}, &Publication{}) + + select { + case <-doneCh: + case <-time.After(5 * time.Second): + require.Fail(t, "handlePublicationFunc was not called") + } +} + +func TestChannelCachePositionSync(t *testing.T) { + options := ChannelCacheOptions{ + PositionSyncInterval: 10 * time.Millisecond, + } + doneCh := make(chan struct{}) + var closeOnce sync.Once + cache := setupChannelCache(t, options, &mockNode{ + streamTopLatestPubFunc: func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { + closeOnce.Do(func() { + close(doneCh) + }) + return nil, StreamPosition{}, nil + }, + }) + cache.initState(&Publication{}, StreamPosition{}) + + select { + case <-doneCh: + case <-time.After(5 * time.Second): + require.Fail(t, "historyFunc was not called") + } +} + +func TestChannelCachePositionSyncRetry(t *testing.T) { + options := ChannelCacheOptions{ + PositionSyncInterval: 10 * time.Millisecond, + } + doneCh := make(chan struct{}) + var closeOnce sync.Once + numCalls := 0 + cache := setupChannelCache(t, options, &mockNode{ + streamTopLatestPubFunc: func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { + if numCalls == 0 { + numCalls++ + return nil, StreamPosition{}, errors.New("boom") + } + closeOnce.Do(func() { + close(doneCh) + }) + return nil, StreamPosition{}, nil + }, + }) + cache.initState(&Publication{}, StreamPosition{}) + + select { + case <-doneCh: + case <-time.After(5 * time.Second): + require.Fail(t, "historyFunc was not called") + } +} diff --git a/client.go b/client.go index 8b62d9e4..13917c98 100644 --- a/client.go +++ b/client.go @@ -2696,13 +2696,13 @@ func isStreamRecovered(historyResult HistoryResult, cmdOffset uint64, cmdEpoch s return recoveredPubs, recovered } -func isCacheRecovered(historyResult HistoryResult, cmdOffset uint64, cmdEpoch string) ([]*protocol.Publication, bool) { - latestOffset := historyResult.Offset - latestEpoch := historyResult.Epoch +func isCacheRecovered(latestPub *Publication, currentSP StreamPosition, cmdOffset uint64, cmdEpoch string) ([]*protocol.Publication, bool) { + latestOffset := currentSP.Offset + latestEpoch := currentSP.Epoch var recovered bool - recoveredPubs := make([]*protocol.Publication, 0, len(historyResult.Publications)) - if len(historyResult.Publications) > 0 { - publication := historyResult.Publications[0] + recoveredPubs := make([]*protocol.Publication, 0, 1) + if latestPub != nil { + publication := latestPub recovered = publication.Offset == latestOffset skipPublication := cmdOffset > 0 && cmdOffset == latestOffset && cmdEpoch == latestEpoch if recovered && !skipPublication { @@ -2816,28 +2816,28 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep // publications automatically from history (we assume here that the history configured wisely). if recoveryMode == RecoveryModeCache { - historyResult, err := c.node.recoverCache(channel, reply.Options.HistoryMetaTTL) + latestPub, currentSP, err := c.node.recoverCache(channel, reply.Options.HistoryMetaTTL) if err != nil { c.node.logger.log(newLogEntry(LogLevelError, "error on cache recover", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()})) return handleErr(err) } - latestOffset = historyResult.Offset - latestEpoch = historyResult.Epoch + latestOffset = currentSP.Offset + latestEpoch = currentSP.Epoch var recovered bool - recoveredPubs, recovered = isCacheRecovered(historyResult, cmdOffset, cmdEpoch) + recoveredPubs, recovered = isCacheRecovered(latestPub, currentSP, cmdOffset, cmdEpoch) res.Recovered = recovered - if len(historyResult.Publications) == 0 && c.eventHub.cacheEmptyHandler != nil { + if latestPub == nil && c.eventHub.cacheEmptyHandler != nil { cacheReply := c.eventHub.cacheEmptyHandler(CacheEmptyEvent{Channel: channel}) if cacheReply.Populated && !recovered { // One more chance to recover in case we know cache was populated. - historyResult, err = c.node.recoverCache(channel, reply.Options.HistoryMetaTTL) + latestPub, currentSP, err = c.node.recoverCache(channel, reply.Options.HistoryMetaTTL) if err != nil { c.node.logger.log(newLogEntry(LogLevelError, "error on populated cache recover", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()})) return handleErr(err) } - latestOffset = historyResult.Offset - latestEpoch = historyResult.Epoch - recoveredPubs, recovered = isCacheRecovered(historyResult, cmdOffset, cmdEpoch) + latestOffset = currentSP.Offset + latestEpoch = currentSP.Epoch + recoveredPubs, recovered = isCacheRecovered(latestPub, currentSP, cmdOffset, cmdEpoch) res.Recovered = recovered c.node.metrics.incRecover(res.Recovered) } else { @@ -3089,7 +3089,7 @@ func (c *Client) handleAsyncUnsubscribe(ch string, unsub Unsubscribe) { } } -func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publication, data dataValue, sp StreamPosition, bypassOffset bool) error { +func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publication, data dataValue, sp StreamPosition) error { c.mu.Lock() channelContext, ok := c.channels[ch] if !ok || !channelHasFlag(channelContext.flags, flagSubscribed) { @@ -3119,7 +3119,7 @@ func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publica c.mu.Unlock() return nil } - if !bypassOffset && pubOffset != nextExpectedOffset { + if pubOffset != nextExpectedOffset { if c.node.logger.enabled(LogLevelDebug) { c.node.logger.log(newLogEntry(LogLevelDebug, "client insufficient state", map[string]any{"channel": ch, "user": c.user, "client": c.uid, "offset": pubOffset, "expectedOffset": nextExpectedOffset})) } @@ -3149,11 +3149,11 @@ func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publica return c.transportEnqueue(data.data, ch, protocol.FrameTypePushPublication) } -func (c *Client) writePublicationNoDelta(ch string, pub *protocol.Publication, data []byte, sp StreamPosition, bypassOffset bool) error { - return c.writePublication(ch, pub, dataValue{data: data, deltaData: data}, sp, bypassOffset) +func (c *Client) writePublicationNoDelta(ch string, pub *protocol.Publication, data []byte, sp StreamPosition) error { + return c.writePublication(ch, pub, dataValue{data: data, deltaData: data}, sp) } -func (c *Client) writePublication(ch string, pub *protocol.Publication, data dataValue, sp StreamPosition, bypassOffset bool) error { +func (c *Client) writePublication(ch string, pub *protocol.Publication, data dataValue, sp StreamPosition) error { if c.node.LogEnabled(LogLevelTrace) { c.traceOutPush(&protocol.Push{Channel: ch, Pub: pub}) } @@ -3164,7 +3164,7 @@ func (c *Client) writePublication(ch string, pub *protocol.Publication, data dat return c.transportEnqueue(data.data, ch, protocol.FrameTypePushPublication) } c.pubSubSync.SyncPublication(ch, pub, func() { - _ = c.writePublicationUpdatePosition(ch, pub, data, sp, bypassOffset) + _ = c.writePublicationUpdatePosition(ch, pub, data, sp) }) return nil } diff --git a/config.go b/config.go index 964990b4..812099a6 100644 --- a/config.go +++ b/config.go @@ -112,8 +112,6 @@ type Config struct { // AllowedDeltaTypes is a whitelist of DeltaType subscribers can use. At this point Centrifuge // only supports DeltaTypeFossil. If not set clients won't be able to negotiate delta encoding. AllowedDeltaTypes []DeltaType - - GetChannelCacheOptions func(channel string) (ChannelCacheOptions, bool) } const ( diff --git a/hub.go b/hub.go index 4d620291..9196278a 100644 --- a/hub.go +++ b/hub.go @@ -134,12 +134,12 @@ func (h *Hub) removeSub(ch string, c *Client) (bool, error) { // uses a Broker to deliver publications to all Nodes in a cluster and maintains publication history // in a channel with incremental offset. By calling BroadcastPublication messages will only be sent // to the current node subscribers without any defined offset semantics. -func (h *Hub) BroadcastPublication(ch string, pub *Publication, sp StreamPosition, bypassOffset bool) error { - return h.subShards[index(ch, numHubShards)].broadcastPublication(ch, pubToProto(pub), sp, bypassOffset) +func (h *Hub) BroadcastPublication(ch string, pub *Publication, sp StreamPosition) error { + return h.subShards[index(ch, numHubShards)].broadcastPublication(ch, pubToProto(pub), sp) } -func (h *Hub) broadcastPublicationDelta(ch string, pub *Publication, prevPub *Publication, sp StreamPosition, bypassOffset bool) error { - return h.subShards[index(ch, numHubShards)].broadcastPublicationDelta(ch, pub, prevPub, sp, bypassOffset) +func (h *Hub) broadcastPublicationDelta(ch string, pub *Publication, prevPub *Publication, sp StreamPosition) error { + return h.subShards[index(ch, numHubShards)].broadcastPublicationDelta(ch, pub, prevPub, sp) } // broadcastJoin sends message to all clients subscribed on channel. @@ -569,7 +569,7 @@ type dataValue struct { } // broadcastPublicationDelta sends message to all clients subscribed on channel trying to use deltas. -func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, prevPub *Publication, sp StreamPosition, bypassOffset bool) error { +func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, prevPub *Publication, sp StreamPosition) error { fullPub := pubToProto(pub) dataByKey := make(map[broadcastKey]dataValue) @@ -726,7 +726,7 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(sub.client) continue } - _ = sub.client.writePublication(channel, fullPub, value, sp, bypassOffset) + _ = sub.client.writePublication(channel, fullPub, value, sp) } if jsonEncodeErr != nil && h.logger.enabled(LogLevelWarn) { // Log that we had clients with inappropriate protocol, and point to the first such client. @@ -741,7 +741,7 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p } // broadcastPublication sends message to all clients subscribed on channel. -func (h *subShard) broadcastPublication(channel string, pub *protocol.Publication, sp StreamPosition, bypassOffset bool) error { +func (h *subShard) broadcastPublication(channel string, pub *protocol.Publication, sp StreamPosition) error { h.mu.RLock() defer h.mu.RUnlock() @@ -778,7 +778,7 @@ func (h *subShard) broadcastPublication(channel string, pub *protocol.Publicatio continue } } - _ = sub.client.writePublicationNoDelta(channel, pub, jsonPush, sp, bypassOffset) + _ = sub.client.writePublicationNoDelta(channel, pub, jsonPush, sp) } else { if jsonReply == nil { push := &protocol.Push{Channel: channel, Pub: pub} @@ -790,7 +790,7 @@ func (h *subShard) broadcastPublication(channel string, pub *protocol.Publicatio continue } } - _ = sub.client.writePublicationNoDelta(channel, pub, jsonReply, sp, bypassOffset) + _ = sub.client.writePublicationNoDelta(channel, pub, jsonReply, sp) } } else if protoType == protocol.TypeProtobuf { if sub.client.transport.Unidirectional() { @@ -802,7 +802,7 @@ func (h *subShard) broadcastPublication(channel string, pub *protocol.Publicatio return err } } - _ = sub.client.writePublicationNoDelta(channel, pub, protobufPush, sp, bypassOffset) + _ = sub.client.writePublicationNoDelta(channel, pub, protobufPush, sp) } else { if protobufReply == nil { push := &protocol.Push{Channel: channel, Pub: pub} @@ -812,7 +812,7 @@ func (h *subShard) broadcastPublication(channel string, pub *protocol.Publicatio return err } } - _ = sub.client.writePublicationNoDelta(channel, pub, protobufReply, sp, bypassOffset) + _ = sub.client.writePublicationNoDelta(channel, pub, protobufReply, sp) } } } diff --git a/metrics.go b/metrics.go index 4359a953..ff15a1ff 100644 --- a/metrics.go +++ b/metrics.go @@ -47,20 +47,21 @@ type metrics struct { messagesSentCountLeave prometheus.Counter messagesSentCountControl prometheus.Counter - actionCountAddClient prometheus.Counter - actionCountRemoveClient prometheus.Counter - actionCountAddSub prometheus.Counter - actionCountRemoveSub prometheus.Counter - actionCountAddPresence prometheus.Counter - actionCountRemovePresence prometheus.Counter - actionCountPresence prometheus.Counter - actionCountPresenceStats prometheus.Counter - actionCountHistory prometheus.Counter - actionCountHistoryRecover prometheus.Counter - actionCountHistoryStreamTop prometheus.Counter - actionCountHistoryRemove prometheus.Counter - actionCountSurvey prometheus.Counter - actionCountNotify prometheus.Counter + actionCountAddClient prometheus.Counter + actionCountRemoveClient prometheus.Counter + actionCountAddSub prometheus.Counter + actionCountRemoveSub prometheus.Counter + actionCountAddPresence prometheus.Counter + actionCountRemovePresence prometheus.Counter + actionCountPresence prometheus.Counter + actionCountPresenceStats prometheus.Counter + actionCountHistory prometheus.Counter + actionCountHistoryRecover prometheus.Counter + actionCountHistoryStreamTop prometheus.Counter + actionCountHistoryStreamTopLatestPub prometheus.Counter + actionCountHistoryRemove prometheus.Counter + actionCountSurvey prometheus.Counter + actionCountNotify prometheus.Counter recoverCountYes prometheus.Counter recoverCountNo prometheus.Counter @@ -283,6 +284,8 @@ func (m *metrics) incActionCount(action string) { m.actionCountHistoryRecover.Inc() case "history_stream_top": m.actionCountHistoryStreamTop.Inc() + case "history_stream_top_latest_pub": + m.actionCountHistoryStreamTopLatestPub.Inc() case "history_remove": m.actionCountHistoryRemove.Inc() case "survey": @@ -465,6 +468,7 @@ func initMetricsRegistry(registry prometheus.Registerer, metricsNamespace string m.actionCountHistory = m.actionCount.WithLabelValues("history") m.actionCountHistoryRecover = m.actionCount.WithLabelValues("history_recover") m.actionCountHistoryStreamTop = m.actionCount.WithLabelValues("history_stream_top") + m.actionCountHistoryStreamTopLatestPub = m.actionCount.WithLabelValues("history_stream_top_latest_pub") m.actionCountHistoryRemove = m.actionCount.WithLabelValues("history_remove") m.actionCountSurvey = m.actionCount.WithLabelValues("survey") m.actionCountNotify = m.actionCount.WithLabelValues("notify") diff --git a/node.go b/node.go index 0da4b047..ea782be3 100644 --- a/node.go +++ b/node.go @@ -83,8 +83,6 @@ type Node struct { nodeInfoSendHandler NodeInfoSendHandler emulationSurveyHandler *emulationSurveyHandler - - caches map[string]*channelCache } const ( @@ -164,7 +162,6 @@ func New(c Config) (*Node, error) { subDissolver: dissolve.New(numSubDissolverWorkers), nowTimeGetter: nowtime.Get, surveyRegistry: make(map[uint64]chan survey), - caches: make(map[string]*channelCache), } n.emulationSurveyHandler = newEmulationSurveyHandler(n) @@ -686,7 +683,7 @@ func (n *Node) handleControl(data []byte) error { // handlePublication handles messages published into channel and // coming from Broker. The goal of method is to deliver this message // to all clients on this node currently subscribed to channel. -func (n *Node) handlePublication(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication, bypassOffset bool) error { +func (n *Node) handlePublication(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { n.metrics.incMessagesReceived("publication") numSubscribers := n.hub.NumSubscribers(ch) hasCurrentSubscribers := numSubscribers > 0 @@ -694,26 +691,13 @@ func (n *Node) handlePublication(ch string, pub *Publication, sp StreamPosition, return nil } if delta { - err := n.hub.broadcastPublicationDelta(ch, pub, prevPub, sp, bypassOffset) + err := n.hub.broadcastPublicationDelta(ch, pub, prevPub, sp) if err != nil { n.Log(newLogEntry(LogLevelError, "error broadcast delta", map[string]any{"error": err.Error()})) } return err } - return n.hub.BroadcastPublication(ch, pub, sp, bypassOffset) -} - -func (n *Node) handlePublicationCached(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { - mu := n.subLock(ch) - mu.Lock() - cache, ok := n.caches[ch] - if ok { - cache.handlePublication(pub, sp, delta, prevPub) - mu.Unlock() - return nil - } - mu.Unlock() - return n.handlePublication(ch, pub, sp, delta, prevPub, false) + return n.hub.BroadcastPublication(ch, pub, sp) } // handleJoin handles join messages - i.e. broadcasts it to @@ -1004,69 +988,11 @@ func (n *Node) addSubscription(ch string, sub subInfo) error { return err } if first { - // TODO: there is a gap between subscribe and cache initialization. - // Need to synchronize. - if n.config.GetChannelCacheOptions != nil { - cacheOpts, ok := n.config.GetChannelCacheOptions(ch) - if ok { - chCache := newChannelCache(ch, n, cacheOpts) - n.caches[ch] = chCache - } - } - err := n.broker.Subscribe(ch) if err != nil { _, _ = n.hub.removeSub(ch, sub.client) - if n.config.GetChannelCacheOptions != nil { - delete(n.caches, ch) - } return err } - - // TODO: there is a gap between subscribe and cache initialization. - // Need to synchronize. - if n.config.GetChannelCacheOptions != nil { - cache, ok := n.caches[ch] - if ok { - hr, err := n.History(ch, WithHistoryFilter(HistoryFilter{ - Limit: 1, - Reverse: true, - })) - if err != nil { - _, _ = n.hub.removeSub(ch, sub.client) - - // TODO: eliminate code duplication. - submittedAt := time.Now() - _ = n.subDissolver.Submit(func() error { - timeSpent := time.Since(submittedAt) - if timeSpent < time.Second { - time.Sleep(time.Second - timeSpent) - } - mu := n.subLock(ch) - mu.Lock() - defer mu.Unlock() - empty := n.hub.NumSubscribers(ch) == 0 - if empty { - err := n.broker.Unsubscribe(ch) - if err != nil { - // Cool down a bit since broker is not ready to process unsubscription. - time.Sleep(500 * time.Millisecond) - } - return err - } - return nil - }) - - return err - } - currentStreamPosition := hr.StreamPosition - var latestPublication *Publication - if len(hr.Publications) > 0 { - latestPublication = hr.Publications[0] - } - cache.initState(latestPublication, currentStreamPosition) - } - } } return nil } @@ -1083,12 +1009,6 @@ func (n *Node) removeSubscription(ch string, c *Client) error { return err } if empty { - cache, ok := n.caches[ch] - if ok { - cache.close() - delete(n.caches, ch) - } - submittedAt := time.Now() _ = n.subDissolver.Submit(func() error { timeSpent := time.Since(submittedAt) @@ -1425,28 +1345,26 @@ func (n *Node) recoverHistory(ch string, since StreamPosition, historyMetaTTL ti } // recoverCache recovers last publication in channel. -func (n *Node) recoverCache(ch string, historyMetaTTL time.Duration) (HistoryResult, error) { +func (n *Node) recoverCache(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { n.metrics.incActionCount("history_recover") - mu := n.subLock(ch) - mu.Lock() - cache := n.caches[ch] - mu.Unlock() - if cache == nil || !cache.options.KeepLatestPublication { - return n.History(ch, WithHistoryFilter(HistoryFilter{ - Limit: 1, - Reverse: true, - }), WithHistoryMetaTTL(historyMetaTTL)) - } - if n.caches[ch].latestPublication != nil { - return HistoryResult{ - StreamPosition: n.caches[ch].currentStreamPosition, - Publications: []*Publication{n.caches[ch].latestPublication}, - }, nil + return n.streamTopLatestPub(ch, historyMetaTTL) +} + +// streamTopLatestPub returns latest publication in channel with actual stream position. +func (n *Node) streamTopLatestPub(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { + n.metrics.incActionCount("history_stream_top_latest_pub") + hr, err := n.History(ch, WithHistoryFilter(HistoryFilter{ + Limit: 1, + Reverse: true, + }), WithHistoryMetaTTL(historyMetaTTL)) + if err != nil { + return nil, StreamPosition{}, err } - return HistoryResult{ - StreamPosition: n.caches[ch].currentStreamPosition, - Publications: nil, - }, nil + var latestPublication *Publication + if len(hr.Publications) > 0 { + latestPublication = hr.Publications[0] + } + return latestPublication, hr.StreamPosition, nil } // streamTop returns current stream top StreamPosition for a channel. @@ -1633,10 +1551,7 @@ func (h *brokerEventHandler) HandlePublication(ch string, pub *Publication, sp S if pub == nil { panic("nil Publication received, this must never happen") } - if h.node.config.GetChannelCacheOptions != nil { - return h.node.handlePublicationCached(ch, pub, sp, delta, prevPub) - } - return h.node.handlePublication(ch, pub, sp, delta, prevPub, false) + return h.node.handlePublication(ch, pub, sp, delta, prevPub) } // HandleJoin coming from Broker. From a70fff28773ca71a99f13605844689706759fa61 Mon Sep 17 00:00:00 2001 From: FZambia Date: Mon, 22 Apr 2024 21:41:24 +0300 Subject: [PATCH 21/61] various fixes --- broker.go | 12 - broker_redis.go | 2 +- channel_cache.go | 530 ----------------------------------------- channel_cache_test.go | 192 --------------- client_experimental.go | 8 +- client_test.go | 2 +- hub_test.go | 10 +- 7 files changed, 11 insertions(+), 745 deletions(-) delete mode 100644 channel_cache.go delete mode 100644 channel_cache_test.go diff --git a/broker.go b/broker.go index b3f7896c..d1996525 100644 --- a/broker.go +++ b/broker.go @@ -19,18 +19,6 @@ type Publication struct { Tags map[string]string } -func (p *Publication) shallowCopy() *Publication { - if p == nil { - return nil - } - return &Publication{ - Offset: p.Offset, - Data: p.Data, - Info: p.Info, - Tags: p.Tags, - } -} - // ClientInfo contains information about client connection. type ClientInfo struct { // ClientID is a client unique id. diff --git a/broker_redis.go b/broker_redis.go index 42b5d9b5..afa67cf8 100644 --- a/broker_redis.go +++ b/broker_redis.go @@ -1308,7 +1308,7 @@ func extractPushData(data []byte) ([]byte, pushType, StreamPosition, bool, []byt } content := data[len(metaSep):] - if len(content) < 0 { + if len(content) == 0 { return data, pubPushType, StreamPosition{Epoch: epoch, Offset: offset}, false, nil, false } diff --git a/channel_cache.go b/channel_cache.go deleted file mode 100644 index 32365adf..00000000 --- a/channel_cache.go +++ /dev/null @@ -1,530 +0,0 @@ -package centrifuge - -import ( - "errors" - "fmt" - "math" - "sync" - "sync/atomic" - "time" - - "github.com/centrifugal/centrifuge/internal/timers" -) - -// ChannelCacheOptions is an EXPERIMENTAL way to provide a channel cache layer options to Centrifuge. -// This is very unstable at the moment, do not use in production. -type ChannelCacheOptions struct { - // BroadcastDelay controls delay before Publication broadcast. On time tick Centrifugo broadcasts - // only the latest publication in the channel. Useful to reduce the number of messages sent to clients - // when publication contains the entire state. If zero, all publications will be sent to clients without - // delay logic involved on channel cache level. This option requires (!) UseQueue to be enabled, as we - // can not afford delays during synchronous broadcast. - BroadcastDelay time.Duration - // PositionSyncInterval is a time interval to check if we need to sync stream position state with Broker - // to detect PUB/SUB layer message loss. By default, no sync is performed – in that case each individual - // connection syncs position separately. - // TODO: need a mechanism to communicate with Clients that sync is done in cache layer. - PositionSyncInterval time.Duration - // UseQueue enables queue for incoming publications. This can be useful to reduce PUB/SUB message - // processing time (as we put it into a single cache layer queue) and also opens a road to broadcast - // tweaks – such as BroadcastDelay and delta between several publications (deltas require both - // BroadcastDelay and KeepLatestPublication to be enabled). - UseQueue bool - // KeepLatestPublication enables keeping latest publication in channel cache. This is required - // for supporting deltas when BroadcastDelay > 0. Also, this enables fast recovery after reconnect - // in RecoveryModeCache case. - // TODO: make sure we use cache for fast recovery in RecoveryModeCache case. - // TODO: make sure we use cache for fast recovery in RecoveryModeStream case. - KeepLatestPublication bool -} - -// channelCache is an optional intermediary layer between broker PUB/SUB and client connections. -// It costs up to two additional goroutines depending on ChannelCacheOptions used. -// -// This layer optionally keeps latestPublication in channel (when ChannelCacheOptions.KeepLatestPublication is on) -// and optionally queues incoming publications to process them later (broadcast to active subscribers) in a separate -// goroutine (when ChannelCacheOptions.UseQueue is on). Also, it may have a goroutine for periodic position checks -// (if ChannelCacheOptions.PositionSyncInterval is set to non-zero value). -// -// When ChannelCacheOptions.PositionSyncInterval is used it periodically syncs stream position with a Broker if -// there were no new publications for a long time. If it finds that a continuity in a channel stream is -// broken it marks channel subscribers with insufficient state flag. This way Centrifuge can drastically -// reduce the number of calls to Broker for the mostly idle streams in channels with many subscribers. -// -// When ChannelCacheOptions.KeepLatestPublication is used clients can load latest stream Publication from -// memory instead of remote broker, so connect/reconnect in RecoveryModeCache case is faster and more efficient. -// -// Cache layer may also be used with RecoveryModeStream to only go to the Broker if recovery is not possible -// from the cached state. Thus making quick massive reconnect less expensive. -// -// With ChannelCacheOptions.BroadcastDelay option it can send latest publications to clients skipping intermediate -// publications. Together with ChannelCacheOptions.KeepLatestPublication cache layer can also handle delta -// updates and send deltas between several publications. -// -// Cache is dropped as soon as last subscriber leaves the channel on the node. This generally makes it possible to -// keep latest publication without TTL, but probably we still need to handle TTL to match broker behaviour. BTW it's -// possible to clean up the local cache latest publication by looking at the result from a broker in the periodic -// position sync. -// -// When using cache layer we need to make sure that all synchronizations in channel are made through the cache layer. -// Connection may join with an offset in the future – in that case we need to make sure that we don't send publications -// with lower offset to the client. This also affects using delays and deltas - the delta may be broken. -// The question is - what if client reconnects to a node where cache layer is behind another node? Client may pass -// larger offset. What should we do then? Maybe return an insufficient state error to client in that case? -type channelCache struct { - initialized atomic.Int64 - channel string - node node - options ChannelCacheOptions - - mu sync.Mutex - - messages *cacheQueue - broadcastMu sync.Mutex // When queue is not used need to protect broadcast method from concurrent execution. - - closeCh chan struct{} - - // latestPublication is an initial publication in channel or publication last sent. - latestPublication *Publication - // currentStreamPosition is an initial stream position or stream position lastly sent. - currentStreamPosition StreamPosition - // latestQueuedStreamPosition is a stream position of the latest queued publication. - latestQueuedStreamPosition StreamPosition - - positionCheckTime int64 - nowTimeGetter func() time.Time - metaTTLSeconds time.Duration // TODO: not used yet -} - -type node interface { - handlePublication( - channel string, pub *Publication, sp StreamPosition, delta bool, - prevPublication *Publication, bypassOffset bool, - ) error - streamTopLatestPub(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) -} - -func newChannelCache( - channel string, - node node, - options ChannelCacheOptions, -) (*channelCache, error) { - c := &channelCache{ - channel: channel, - node: node, - options: options, - closeCh: make(chan struct{}), - nowTimeGetter: func() time.Time { - return time.Now() - }, - positionCheckTime: time.Now().Unix(), - } - if options.UseQueue { - c.messages = newCacheQueue(2) - } - if options.BroadcastDelay > 0 && !options.UseQueue { - return nil, fmt.Errorf("broadcast delay can only be used with queue enabled") - } - return c, nil -} - -type queuedPub struct { - pub *Publication - sp StreamPosition - delta bool - prevPub *Publication - isInsufficientState bool -} - -func (c *channelCache) initState(latestPublication *Publication, currentStreamPosition StreamPosition) { - if c.options.KeepLatestPublication { - c.latestPublication = latestPublication - } - c.currentStreamPosition = currentStreamPosition - c.latestQueuedStreamPosition = currentStreamPosition - if c.options.UseQueue { - go c.writer() - } - if c.options.PositionSyncInterval > 0 { - go c.runChecks() - } - c.initialized.Store(1) -} - -func (c *channelCache) recoverLatestPublication() (*Publication, StreamPosition, error) { - c.mu.Lock() - defer c.mu.Unlock() - if !c.options.KeepLatestPublication { - return nil, StreamPosition{}, errors.New("keep latest publication option is not enabled") - } - return c.latestPublication.shallowCopy(), c.currentStreamPosition, nil -} - -func (c *channelCache) processPublication(pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) { - if c.initialized.Load() == 0 { - // Skip publications while cache is not initialized. - return - } - bp := queuedPub{pub: pub, sp: sp, delta: delta, prevPub: prevPub} - c.mu.Lock() - c.latestQueuedStreamPosition = sp - c.positionCheckTime = c.nowTimeGetter().Unix() - c.mu.Unlock() - - if c.options.UseQueue { - c.messages.Add(queuedItem{Publication: bp}) - // TODO: do we need to limit queue size here? - } else { - c.broadcastMu.Lock() - defer c.broadcastMu.Unlock() - c.broadcast(bp) - } -} - -func (c *channelCache) processInsufficientState(currentStreamTop StreamPosition, latestPublication *Publication) { - bp := queuedPub{pub: latestPublication, sp: currentStreamTop, delta: false, isInsufficientState: true, prevPub: nil} - c.mu.Lock() - defer c.mu.Unlock() - c.latestQueuedStreamPosition = currentStreamTop - c.positionCheckTime = c.nowTimeGetter().Unix() - if c.options.UseQueue { - // TODO: possibly support c.messages.dropQueued() for this path ? - c.messages.Add(queuedItem{Publication: bp}) - } else { - c.broadcastMu.Lock() - defer c.broadcastMu.Unlock() - c.broadcast(bp) - } -} - -func (c *channelCache) broadcast(qp queuedPub) { - bypassOffset := c.options.BroadcastDelay > 0 && !qp.isInsufficientState - pubToBroadcast := qp.pub - spToBroadcast := qp.sp - if qp.isInsufficientState { - pubToBroadcast = &Publication{ - Offset: math.MaxUint64, - } - spToBroadcast.Offset = math.MaxUint64 - } - - prevPub := qp.prevPub - if c.options.KeepLatestPublication && c.options.BroadcastDelay > 0 { - prevPub = c.latestPublication - } - delta := qp.delta - if c.options.BroadcastDelay > 0 && !c.options.KeepLatestPublication { - delta = false - } - _ = c.node.handlePublication( - c.channel, pubToBroadcast, spToBroadcast, delta, prevPub, bypassOffset) - c.mu.Lock() - defer c.mu.Unlock() - if qp.sp.Offset > c.currentStreamPosition.Offset { - c.currentStreamPosition = qp.sp - if c.options.KeepLatestPublication { - c.latestPublication = qp.pub - } - } -} - -func (c *channelCache) writer() { - for { - if ok := c.waitSendPub(c.options.BroadcastDelay); !ok { - return - } - } -} - -func (c *channelCache) waitSendPub(delay time.Duration) bool { - // Wait for message from the queue. - ok := c.messages.Wait() - if !ok { - return false - } - - if delay > 0 { - tm := timers.AcquireTimer(delay) - select { - case <-tm.C: - case <-c.closeCh: - timers.ReleaseTimer(tm) - return false - } - timers.ReleaseTimer(tm) - } - - msg, ok := c.messages.Remove() - if !ok { - return !c.messages.Closed() - } - if delay == 0 || msg.Publication.isInsufficientState { - c.broadcast(msg.Publication) - return true - } - messageCount := c.messages.Len() - for messageCount > 0 { - messageCount-- - var ok bool - msg, ok = c.messages.Remove() - if !ok { - if c.messages.Closed() { - return false - } - break - } - if msg.Publication.isInsufficientState { - break - } - } - c.broadcast(msg.Publication) - return true -} - -func (c *channelCache) checkPosition() (*Publication, StreamPosition, bool) { - nowUnix := c.nowTimeGetter().Unix() - needCheckPosition := nowUnix-c.positionCheckTime >= int64(c.options.PositionSyncInterval.Seconds()) - - if !needCheckPosition { - return nil, StreamPosition{}, true - } - - var historyMetaTTL time.Duration - if c.metaTTLSeconds > 0 { - historyMetaTTL = c.metaTTLSeconds * time.Second - } - - latestPublication, currentStreamPosition, err := c.node.streamTopLatestPub(c.channel, historyMetaTTL) - if err != nil { - // Will result into position check later. - return nil, StreamPosition{}, true - } - - return latestPublication, currentStreamPosition, c.isValidPosition(currentStreamPosition, nowUnix) -} - -func (c *channelCache) isValidPosition(streamTop StreamPosition, nowUnix int64) bool { - c.mu.Lock() - defer c.mu.Unlock() - position := c.latestQueuedStreamPosition - isValidPosition := streamTop.Epoch == position.Epoch && position.Offset >= streamTop.Offset - if isValidPosition { - c.positionCheckTime = nowUnix - return true - } - return false -} - -func (c *channelCache) runChecks() { - var syncCh <-chan time.Time - if c.options.PositionSyncInterval > 0 { - ticker := time.NewTicker(c.options.PositionSyncInterval) - syncCh = ticker.C - defer ticker.Stop() - } - for { - select { - case <-c.closeCh: - return - case <-syncCh: - // Sync state with Broker. - _, _, validPosition := c.checkPosition() - if !validPosition { - // One retry. - var ( - latestPublication *Publication - streamTop StreamPosition - ) - latestPublication, streamTop, validPosition = c.checkPosition() - if !validPosition { - c.processInsufficientState(streamTop, latestPublication) - } - } - } - } -} - -func (c *channelCache) close() { - close(c.closeCh) -} - -type queuedItem struct { - Publication queuedPub -} - -// cacheQueue is an unbounded queue of queuedItem. -// The queue is goroutine safe. -// Inspired by http://blog.dubbelboer.com/2015/04/25/go-faster-queue.html (MIT) -type cacheQueue struct { - mu sync.RWMutex - cond *sync.Cond - nodes []queuedItem - head int - tail int - cnt int - size int - closed bool - initCap int -} - -// newCacheQueue returns a new queuedItem queue with initial capacity. -func newCacheQueue(initialCapacity int) *cacheQueue { - sq := &cacheQueue{ - initCap: initialCapacity, - nodes: make([]queuedItem, initialCapacity), - } - sq.cond = sync.NewCond(&sq.mu) - return sq -} - -// Mutex must be held when calling. -func (q *cacheQueue) resize(n int) { - nodes := make([]queuedItem, n) - if q.head < q.tail { - copy(nodes, q.nodes[q.head:q.tail]) - } else { - copy(nodes, q.nodes[q.head:]) - copy(nodes[len(q.nodes)-q.head:], q.nodes[:q.tail]) - } - - q.tail = q.cnt % n - q.head = 0 - q.nodes = nodes -} - -// Add an queuedItem to the back of the queue -// will return false if the queue is closed. -// In that case the queuedItem is dropped. -func (q *cacheQueue) Add(i queuedItem) bool { - q.mu.Lock() - if q.closed { - q.mu.Unlock() - return false - } - if q.cnt == len(q.nodes) { - // Also tested a growth rate of 1.5, see: http://stackoverflow.com/questions/2269063/buffer-growth-strategy - // In Go this resulted in a higher memory usage. - q.resize(q.cnt * 2) - } - q.nodes[q.tail] = i - q.tail = (q.tail + 1) % len(q.nodes) - if i.Publication.pub != nil { - q.size += len(i.Publication.pub.Data) - } - q.cnt++ - q.cond.Signal() - q.mu.Unlock() - return true -} - -// Close the queue and discard all entries in the queue -// all goroutines in wait() will return -func (q *cacheQueue) Close() { - q.mu.Lock() - defer q.mu.Unlock() - q.closed = true - q.cnt = 0 - q.nodes = nil - q.size = 0 - q.cond.Broadcast() -} - -// CloseRemaining will close the queue and return all entries in the queue. -// All goroutines in wait() will return. -func (q *cacheQueue) CloseRemaining() []queuedItem { - q.mu.Lock() - defer q.mu.Unlock() - if q.closed { - return []queuedItem{} - } - rem := make([]queuedItem, 0, q.cnt) - for q.cnt > 0 { - i := q.nodes[q.head] - q.head = (q.head + 1) % len(q.nodes) - q.cnt-- - rem = append(rem, i) - } - q.closed = true - q.cnt = 0 - q.nodes = nil - q.size = 0 - q.cond.Broadcast() - return rem -} - -// Closed returns true if the queue has been closed -// The call cannot guarantee that the queue hasn't been -// closed while the function returns, so only "true" has a definite meaning. -func (q *cacheQueue) Closed() bool { - q.mu.RLock() - c := q.closed - q.mu.RUnlock() - return c -} - -// Wait for a message to be added. -// If there are items on the queue will return immediately. -// Will return false if the queue is closed. -// Otherwise, returns true. -func (q *cacheQueue) Wait() bool { - q.mu.Lock() - if q.closed { - q.mu.Unlock() - return false - } - if q.cnt != 0 { - q.mu.Unlock() - return true - } - q.cond.Wait() - q.mu.Unlock() - return true -} - -// Remove will remove an queuedItem from the queue. -// If false is returned, it either means 1) there were no items on the queue -// or 2) the queue is closed. -func (q *cacheQueue) Remove() (queuedItem, bool) { - q.mu.Lock() - if q.cnt == 0 { - q.mu.Unlock() - return queuedItem{}, false - } - i := q.nodes[q.head] - q.head = (q.head + 1) % len(q.nodes) - q.cnt-- - if i.Publication.pub != nil { - q.size -= len(i.Publication.pub.Data) - } - - if n := len(q.nodes) / 2; n >= q.initCap && q.cnt <= n { - q.resize(n) - } - - q.mu.Unlock() - return i, true -} - -// Cap returns the capacity (without allocations) -func (q *cacheQueue) Cap() int { - q.mu.RLock() - c := cap(q.nodes) - q.mu.RUnlock() - return c -} - -// Len returns the current length of the queue. -func (q *cacheQueue) Len() int { - q.mu.RLock() - l := q.cnt - q.mu.RUnlock() - return l -} - -// Size returns the current size of the queue. -func (q *cacheQueue) Size() int { - q.mu.RLock() - s := q.size - q.mu.RUnlock() - return s -} diff --git a/channel_cache_test.go b/channel_cache_test.go deleted file mode 100644 index 12f8f8a4..00000000 --- a/channel_cache_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package centrifuge - -import ( - "errors" - "math" - "strconv" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -// Helper function to create a channelCache with options. -func setupChannelCache(t testing.TB, options ChannelCacheOptions, node node) *channelCache { - t.Helper() - channel := "testChannel" - cache, err := newChannelCache(channel, node, options) - if err != nil { - require.NoError(t, err) - } - return cache -} - -type mockNode struct { - // Store function outputs and any state needed for testing - handlePublicationFunc func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error - streamTopLatestPubFunc func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) -} - -func (m *mockNode) handlePublication(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error { - if m.handlePublicationFunc != nil { - return m.handlePublicationFunc(channel, pub, sp, delta, prevPublication, bypassOffset) - } - return nil -} - -func (m *mockNode) streamTopLatestPub(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { - if m.streamTopLatestPubFunc != nil { - return m.streamTopLatestPubFunc(ch, historyMetaTTL) - } - return nil, StreamPosition{}, nil -} - -func TestChannelCacheInitialization(t *testing.T) { - options := ChannelCacheOptions{ - UseQueue: true, - KeepLatestPublication: true, - BroadcastDelay: 10 * time.Millisecond, - PositionSyncInterval: 1 * time.Second, - } - cache := setupChannelCache(t, options, &mockNode{}) - - require.NotNil(t, cache) - require.NotNil(t, cache.messages) - require.Equal(t, int64(0), cache.initialized.Load()) - cache.initState(&Publication{}, StreamPosition{1, "epoch"}) - require.Equal(t, int64(1), cache.initialized.Load()) -} - -func TestChannelCacheHandlePublication(t *testing.T) { - optionSet := []ChannelCacheOptions{ - { - UseQueue: false, - KeepLatestPublication: false, - }, - { - UseQueue: true, - KeepLatestPublication: false, - }, - { - UseQueue: true, - KeepLatestPublication: false, - BroadcastDelay: 10 * time.Millisecond, - }, - { - UseQueue: true, - KeepLatestPublication: true, - BroadcastDelay: 10 * time.Millisecond, - }, - } - - for i, options := range optionSet { - t.Run(strconv.Itoa(i), func(t *testing.T) { - doneCh := make(chan struct{}) - - cache := setupChannelCache(t, options, &mockNode{ - handlePublicationFunc: func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error { - close(doneCh) - return nil - }, - }) - cache.initState(&Publication{}, StreamPosition{}) - - pub := &Publication{Data: []byte("test data")} - sp := StreamPosition{Offset: 1} - - cache.processPublication(pub, sp, false, nil) - - select { - case <-doneCh: - case <-time.After(5 * time.Second): - require.Fail(t, "handlePublicationFunc was not called") - } - - if options.KeepLatestPublication { - latestPub, latestSP, err := cache.recoverLatestPublication() - require.NoError(t, err) - require.Equal(t, pub, latestPub) - require.Equal(t, sp, latestSP) - } - }) - } -} - -func TestChannelCacheInsufficientState(t *testing.T) { - options := ChannelCacheOptions{ - UseQueue: true, - KeepLatestPublication: true, - } - doneCh := make(chan struct{}) - cache := setupChannelCache(t, options, &mockNode{ - handlePublicationFunc: func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error { - require.Equal(t, uint64(math.MaxUint64), pub.Offset) - require.Equal(t, uint64(math.MaxUint64), sp.Offset) - require.False(t, bypassOffset) - close(doneCh) - return nil - }, - }) - cache.initState(&Publication{}, StreamPosition{}) - - // Simulate the behavior when the state is marked as insufficient - cache.processInsufficientState(StreamPosition{Offset: 2}, &Publication{}) - - select { - case <-doneCh: - case <-time.After(5 * time.Second): - require.Fail(t, "handlePublicationFunc was not called") - } -} - -func TestChannelCachePositionSync(t *testing.T) { - options := ChannelCacheOptions{ - PositionSyncInterval: 10 * time.Millisecond, - } - doneCh := make(chan struct{}) - var closeOnce sync.Once - cache := setupChannelCache(t, options, &mockNode{ - streamTopLatestPubFunc: func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { - closeOnce.Do(func() { - close(doneCh) - }) - return nil, StreamPosition{}, nil - }, - }) - cache.initState(&Publication{}, StreamPosition{}) - - select { - case <-doneCh: - case <-time.After(5 * time.Second): - require.Fail(t, "historyFunc was not called") - } -} - -func TestChannelCachePositionSyncRetry(t *testing.T) { - options := ChannelCacheOptions{ - PositionSyncInterval: 10 * time.Millisecond, - } - doneCh := make(chan struct{}) - var closeOnce sync.Once - numCalls := 0 - cache := setupChannelCache(t, options, &mockNode{ - streamTopLatestPubFunc: func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { - if numCalls == 0 { - numCalls++ - return nil, StreamPosition{}, errors.New("boom") - } - closeOnce.Do(func() { - close(doneCh) - }) - return nil, StreamPosition{}, nil - }, - }) - cache.initState(&Publication{}, StreamPosition{}) - - select { - case <-doneCh: - case <-time.After(5 * time.Second): - require.Fail(t, "historyFunc was not called") - } -} diff --git a/client_experimental.go b/client_experimental.go index 0760ecc6..bfcb355c 100644 --- a/client_experimental.go +++ b/client_experimental.go @@ -29,7 +29,7 @@ func (c *Client) WritePublication(channel string, publication *Publication, sp S go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c) return err } - return c.writePublicationNoDelta(channel, pub, jsonPush, sp, false) + return c.writePublicationNoDelta(channel, pub, jsonPush, sp) } else { push := &protocol.Push{Channel: channel, Pub: pub} var err error @@ -38,7 +38,7 @@ func (c *Client) WritePublication(channel string, publication *Publication, sp S go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(c) return err } - return c.writePublicationNoDelta(channel, pub, jsonReply, sp, false) + return c.writePublicationNoDelta(channel, pub, jsonReply, sp) } } else if protoType == protocol.TypeProtobuf { if c.transport.Unidirectional() { @@ -48,7 +48,7 @@ func (c *Client) WritePublication(channel string, publication *Publication, sp S if err != nil { return err } - return c.writePublicationNoDelta(channel, pub, protobufPush, sp, false) + return c.writePublicationNoDelta(channel, pub, protobufPush, sp) } else { push := &protocol.Push{Channel: channel, Pub: pub} var err error @@ -56,7 +56,7 @@ func (c *Client) WritePublication(channel string, publication *Publication, sp S if err != nil { return err } - return c.writePublicationNoDelta(channel, pub, protobufReply, sp, false) + return c.writePublicationNoDelta(channel, pub, protobufReply, sp) } } diff --git a/client_test.go b/client_test.go index 4395b501..c881cc7e 100644 --- a/client_test.go +++ b/client_test.go @@ -678,7 +678,7 @@ func testUnexpectedOffsetEpochProtocolV2(t *testing.T, offset uint64, epoch stri err = node.handlePublication("test", &Publication{ Offset: offset, - }, StreamPosition{offset, epoch}, false, nil, false) + }, StreamPosition{offset, epoch}, false, nil) require.NoError(t, err) select { diff --git a/hub_test.go b/hub_test.go index 71e66b38..81119c57 100644 --- a/hub_test.go +++ b/hub_test.go @@ -470,7 +470,7 @@ func TestHubBroadcastPublication(t *testing.T) { err := n.hub.BroadcastPublication( "non_existing_channel", &Publication{Data: []byte(`{"data": "broadcast_data"}`)}, - StreamPosition{}, false, + StreamPosition{}, ) require.NoError(t, err) @@ -478,7 +478,7 @@ func TestHubBroadcastPublication(t *testing.T) { err = n.hub.BroadcastPublication( "test_channel", &Publication{Data: []byte(`{"data": "broadcast_data"}`)}, - StreamPosition{}, false, + StreamPosition{}, ) require.NoError(t, err) LOOP: @@ -744,7 +744,7 @@ func BenchmarkHub_Contention(b *testing.B) { wg.Add(1) go func() { defer wg.Done() - _ = n.hub.BroadcastPublication(channels[(i+numChannels/2)%numChannels], pub, streamPosition, false) + _ = n.hub.BroadcastPublication(channels[(i+numChannels/2)%numChannels], pub, streamPosition) }() _, _ = n.hub.addSub(channels[i%numChannels], subInfo{client: clients[i%numClients], deltaType: ""}) wg.Wait() @@ -806,7 +806,7 @@ func BenchmarkHub_MassiveBroadcast(b *testing.B) { } } }() - _ = n.hub.BroadcastPublication(channels[i%numChannels], pub, streamPosition, false) + _ = n.hub.BroadcastPublication(channels[i%numChannels], pub, streamPosition) wg.Wait() } }) @@ -831,7 +831,7 @@ func TestHubBroadcastInappropriateProtocol_Publication(t *testing.T) { } err := n.hub.BroadcastPublication("test_channel", &Publication{ Data: []byte(`{111`), - }, StreamPosition{}, false) + }, StreamPosition{}) require.NoError(t, err) waitWithTimeout(t, done) } From e6d51c6e0231a33b6cb0650bb3da9e8e371eee9c Mon Sep 17 00:00:00 2001 From: FZambia Date: Fri, 26 Apr 2024 16:37:55 +0300 Subject: [PATCH 22/61] various adjustments --- _examples/compression_playground/main.go | 30 ++-- .../templates/index.html | 3 + client_test.go | 38 +++++ config.go | 3 +- events.go | 12 +- hub_test.go | 151 +++++++++++++++++- 6 files changed, 218 insertions(+), 19 deletions(-) diff --git a/_examples/compression_playground/main.go b/_examples/compression_playground/main.go index fad6de12..773a587d 100644 --- a/_examples/compression_playground/main.go +++ b/_examples/compression_playground/main.go @@ -196,10 +196,10 @@ func main() { useProtobufPayload = true } - go func() { - time.Sleep(500 * time.Millisecond) - client.Disconnect(centrifuge.DisconnectForceReconnect) - }() + //go func() { + // time.Sleep(500 * time.Millisecond) + // client.Disconnect(centrifuge.DisconnectForceReconnect) + //}() go func() { log.Printf("using protobuf payload: %v", useProtobufPayload) @@ -262,13 +262,21 @@ func main() { log.Fatal(err) } - go func() { - var num int32 - for { - num++ - simulateMatch(context.Background(), num, node, false) - } - }() + //go func() { + // var num int32 + // for { + // num++ + // simulateMatch(context.Background(), num, node, false) + // } + //}() + // + //go func() { + // var num int32 + // for { + // num++ + // simulateMatch(context.Background(), num, node, true) + // } + //}() // Now configure HTTP routes. diff --git a/_examples/compression_playground/templates/index.html b/_examples/compression_playground/templates/index.html index 46fae638..fcf9f3b6 100644 --- a/_examples/compression_playground/templates/index.html +++ b/_examples/compression_playground/templates/index.html @@ -47,6 +47,9 @@
  • JSON over Protobuf, with compression, with delta, with delay
  • +
  • + Protobuf over Protobuf, with compression, with delta, with delay +
  • diff --git a/client_test.go b/client_test.go index c881cc7e..dda50799 100644 --- a/client_test.go +++ b/client_test.go @@ -648,6 +648,44 @@ func TestClientSubscribeBrokerErrorOnRecoverHistory(t *testing.T) { } } +func TestClientSubscribeDeltaNotAllowed(t *testing.T) { + n := defaultTestNode() + n.config.AllowedDeltaTypes = []DeltaType{} + defer func() { _ = n.Shutdown(context.Background()) }() + + ctx, cancelFn := context.WithCancel(context.Background()) + transport := newTestTransport(cancelFn) + transport.sink = make(chan []byte, 100) + transport.setProtocolType(ProtocolTypeJSON) + transport.setProtocolVersion(ProtocolVersion2) + client := newTestConnectedClientWithTransport(t, ctx, n, transport, "42") + rwWrapper := testReplyWriterWrapper() + err := client.handleSubscribe(&protocol.SubscribeRequest{ + Channel: "test_channel", + Delta: string(DeltaTypeFossil), + }, &protocol.Command{Id: 1}, time.Now(), rwWrapper.rw) + require.Equal(t, DisconnectBadRequest, err) +} + +func TestClientSubscribeUnknownDelta(t *testing.T) { + n := defaultTestNode() + n.config.AllowedDeltaTypes = []DeltaType{} + defer func() { _ = n.Shutdown(context.Background()) }() + + ctx, cancelFn := context.WithCancel(context.Background()) + transport := newTestTransport(cancelFn) + transport.sink = make(chan []byte, 100) + transport.setProtocolType(ProtocolTypeJSON) + transport.setProtocolVersion(ProtocolVersion2) + client := newTestConnectedClientWithTransport(t, ctx, n, transport, "42") + rwWrapper := testReplyWriterWrapper() + err := client.handleSubscribe(&protocol.SubscribeRequest{ + Channel: "test_channel", + Delta: "invalid", + }, &protocol.Command{Id: 1}, time.Now(), rwWrapper.rw) + require.Equal(t, DisconnectBadRequest, err) +} + func testUnexpectedOffsetEpochProtocolV2(t *testing.T, offset uint64, epoch string) { t.Parallel() broker := NewTestBroker() diff --git a/config.go b/config.go index b205c13b..6a5a888c 100644 --- a/config.go +++ b/config.go @@ -110,7 +110,8 @@ type Config struct { ChannelNamespaceLabelForTransportMessagesReceived bool // AllowedDeltaTypes is a whitelist of DeltaType subscribers can use. At this point Centrifuge - // only supports DeltaTypeFossil. If not set clients won't be able to negotiate delta encoding. + // only supports DeltaTypeFossil. If zero value – clients won't be able to negotiate delta encoding. + // Delta encoding is an EXPERIMENTAL feature and may be changed/removed. AllowedDeltaTypes []DeltaType } diff --git a/events.go b/events.go index 9843fdbd..23673b9b 100644 --- a/events.go +++ b/events.go @@ -358,17 +358,23 @@ type HistoryHandler func(HistoryEvent, HistoryCallback) // internal state. Returning a copy is important to avoid data races. type StateSnapshotHandler func() (any, error) -// CacheEmptyEvent ... +// CacheEmptyEvent is issued when recovery mode is used but Centrifuge can't +// find Publication in history to recover from. This event allows application +// to decide what to do in this case – it's possible to populate the cache by +// sending actual data to a channel. type CacheEmptyEvent struct { Channel string } -// CacheEmptyReply ... +// CacheEmptyReply contains fields determining the reaction on cache empty event. type CacheEmptyReply struct { + // Populated when set to true tells Centrifuge that cache was populated and + // in that case Centrifuge will try to recover missed Publication from history + // one more time. Populated bool } -// CacheEmptyHandler ... +// CacheEmptyHandler allows setting cache empty handler function. type CacheEmptyHandler func(CacheEmptyEvent) CacheEmptyReply // SurveyEvent with Op and Data of survey. diff --git a/hub_test.go b/hub_test.go index 81119c57..3453d6a2 100644 --- a/hub_test.go +++ b/hub_test.go @@ -3,6 +3,7 @@ package centrifuge import ( "context" "fmt" + "github.com/centrifugal/protocol" "io" "strconv" "strings" @@ -444,10 +445,10 @@ func TestHubBroadcastPublication(t *testing.T) { protocolVersion ProtocolVersion uni bool }{ - {name: "JSON-V2", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2}, - {name: "Protobuf-V2", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2}, - {name: "JSON-V2-uni", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2, uni: true}, - {name: "Protobuf-V2-uni", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2, uni: true}, + {name: "JSON", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2}, + {name: "Protobuf", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2}, + {name: "JSON-uni", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2, uni: true}, + {name: "Protobuf-uni", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2, uni: true}, } for _, tc := range tcs { @@ -496,6 +497,148 @@ func TestHubBroadcastPublication(t *testing.T) { } } +func deltaTestNode() *Node { + n := defaultNodeNoHandlers() + n.OnConnect(func(client *Client) { + client.OnSubscribe(func(e SubscribeEvent, cb SubscribeCallback) { + cb(SubscribeReply{ + Options: SubscribeOptions{ + EnableRecovery: true, + RecoveryMode: RecoveryModeCache, + }, + }, nil) + }) + client.OnPublish(func(e PublishEvent, cb PublishCallback) { + cb(PublishReply{}, nil) + }) + }) + return n +} + +func newTestSubscribedClientWithTransportDelta(t *testing.T, ctx context.Context, n *Node, transport Transport, userID, chanID string, deltaType DeltaType) *Client { + client := newTestConnectedClientWithTransport(t, ctx, n, transport, userID) + subscribeClientDelta(t, client, chanID, deltaType) + require.True(t, n.hub.NumSubscribers(chanID) > 0) + require.Contains(t, client.channels, chanID) + return client +} + +func subscribeClientDelta(t testing.TB, client *Client, ch string, deltaType DeltaType) *protocol.SubscribeResult { + rwWrapper := testReplyWriterWrapper() + err := client.handleSubscribe(&protocol.SubscribeRequest{ + Channel: ch, + Delta: string(deltaType), + }, &protocol.Command{Id: 1}, time.Now(), rwWrapper.rw) + require.NoError(t, err) + require.Nil(t, rwWrapper.replies[0].Error) + return rwWrapper.replies[0].Subscribe +} + +func TestHubBroadcastPublicationDelta(t *testing.T) { + tcs := []struct { + name string + protocolType ProtocolType + protocolVersion ProtocolVersion + uni bool + }{ + {name: "JSON", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2}, + {name: "Protobuf", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2}, + {name: "JSON-uni", protocolType: ProtocolTypeJSON, protocolVersion: ProtocolVersion2, uni: true}, + {name: "Protobuf-uni", protocolType: ProtocolTypeProtobuf, protocolVersion: ProtocolVersion2, uni: true}, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + n := deltaTestNode() + n.config.GetChannelNamespaceLabel = func(channel string) string { + return channel + } + n.config.AllowedDeltaTypes = []DeltaType{DeltaTypeFossil} + defer func() { _ = n.Shutdown(context.Background()) }() + + ctx, cancelFn := context.WithCancel(context.Background()) + transport := newTestTransport(cancelFn) + transport.sink = make(chan []byte, 100) + transport.setProtocolType(tc.protocolType) + transport.setProtocolVersion(tc.protocolVersion) + transport.setUnidirectional(tc.uni) + newTestSubscribedClientWithTransportDelta( + t, ctx, n, transport, "42", "test_channel", DeltaTypeFossil) + + res, err := n.History("test_channel") + require.NoError(t, err) + + // Broadcast to non-existing channel. + err = n.hub.broadcastPublicationDelta( + "non_existing_channel", + &Publication{Data: []byte(`{"data": "broadcast_data"}`), Offset: 1}, + nil, + StreamPosition{Offset: 1, Epoch: res.StreamPosition.Epoch}, + ) + require.NoError(t, err) + + // Broadcast to existing channel. + err = n.hub.broadcastPublicationDelta( + "test_channel", + &Publication{Data: []byte(`{"data": "broadcast_data"}`), Offset: 1}, + nil, + StreamPosition{Offset: 1, Epoch: res.StreamPosition.Epoch}, + ) + require.NoError(t, err) + + totalLength := 0 + + LOOP: + for { + select { + case data := <-transport.sink: + if tc.protocolType == ProtocolTypeProtobuf { + if strings.Contains(string(data), "broadcast_data") { + totalLength += len(data) + break LOOP + } + } else { + if strings.Contains(string(data), "pub") && strings.Contains(string(data), "b64data") { + totalLength += len(data) + break LOOP + } + } + case <-time.After(2 * time.Second): + t.Fatal("no data in sink") + } + } + + // Broadcast same data to existing channel. + err = n.hub.broadcastPublicationDelta( + "test_channel", + &Publication{Data: []byte(`{"data": "broadcast_data"}`), Offset: 2}, + &Publication{Data: []byte(`{"data": "broadcast_data"}`), Offset: 1}, + StreamPosition{Offset: 2, Epoch: res.StreamPosition.Epoch}, + ) + require.NoError(t, err) + + LOOP2: + for { + select { + case data := <-transport.sink: + if tc.protocolType == ProtocolTypeProtobuf { + if strings.Contains(string(data), "broadcast_data") { + require.Fail(t, "should not receive same data twice - delta expected") + } + } else { + if strings.Contains(string(data), "pub") && strings.Contains(string(data), "b64data") && !strings.Contains(string(data), "delta") { + require.Fail(t, "should not receive same data twice - delta expected") + } + } + break LOOP2 + case <-time.After(2 * time.Second): + t.Fatal("no data in sink 2") + } + } + }) + } +} + func TestHubBroadcastJoin(t *testing.T) { tcs := []struct { name string From c22500c36623ddf6d401df2cdfe691ef894475a4 Mon Sep 17 00:00:00 2001 From: FZambia Date: Sun, 28 Apr 2024 17:06:34 +0300 Subject: [PATCH 23/61] try using js data --- client.go | 15 +++++++++------ hub.go | 14 ++++++++------ writer.go | 2 +- 3 files changed, 18 insertions(+), 13 deletions(-) diff --git a/client.go b/client.go index 1eb4602a..7d2f7402 100644 --- a/client.go +++ b/client.go @@ -3022,13 +3022,16 @@ func (c *Client) makeRecoveredPubsDeltaFossil(recoveredPubs []*protocol.Publicat prevPub := recoveredPubs[0] if c.transport.Protocol() == ProtocolTypeJSON { // For JSON case we need to use b64 for data. + + js, _ := json.Marshal(string(prevPub.Data)) + pub := &protocol.Publication{ - Offset: prevPub.Offset, - Info: prevPub.Info, - Tags: prevPub.Tags, - Data: nil, - B64Data: base64.StdEncoding.EncodeToString(prevPub.Data), - Delta: false, + Offset: prevPub.Offset, + Info: prevPub.Info, + Tags: prevPub.Tags, + Data: js, + //B64Data: string(js), //base64.StdEncoding.EncodeToString(prevPub.Data), + Delta: false, } recoveredPubs[0] = pub } diff --git a/hub.go b/hub.go index 9196278a..da03946a 100644 --- a/hub.go +++ b/hub.go @@ -3,6 +3,7 @@ package centrifuge import ( "context" "encoding/base64" + "github.com/segmentio/encoding/json" "io" "sync" @@ -597,15 +598,16 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p deltaPub := fullPub if prevPub != nil && key.DeltaType == DeltaTypeFossil { patch := fdelta.Create(prevPub.Data, fullPub.Data) + js, _ := json.Marshal(string(patch)) if key.ProtocolType == protocol.TypeJSON { - b64patch := base64.StdEncoding.EncodeToString(patch) + //b64patch := base64.StdEncoding.EncodeToString(patch) deltaPub = &protocol.Publication{ Offset: fullPub.Offset, - //Data: nil, - Info: fullPub.Info, - Tags: fullPub.Tags, - Delta: true, - B64Data: b64patch, + Data: js, + Info: fullPub.Info, + Tags: fullPub.Tags, + Delta: true, + //B64Data: b64patch, } } else { deltaPub = &protocol.Publication{ diff --git a/writer.go b/writer.go index 15ac9841..a9edef81 100644 --- a/writer.go +++ b/writer.go @@ -36,7 +36,7 @@ func newWriter(config writerConfig, queueInitialCap int) *writer { } const ( - defaultMaxMessagesInFrame = 16 + defaultMaxMessagesInFrame = 1 ) func (w *writer) waitSendMessage(maxMessagesInFrame int, writeDelay time.Duration) bool { From 2f5234dc1c8dd9b88e821713f8879e305d238879 Mon Sep 17 00:00:00 2001 From: FZambia Date: Mon, 29 Apr 2024 15:52:46 +0300 Subject: [PATCH 24/61] js data --- client.go | 25 +++++++++++-------------- hub.go | 50 ++++++++++++++++++++++++++++++-------------------- hub_test.go | 41 ++++++++++++++++++++++------------------- writer.go | 2 +- 4 files changed, 64 insertions(+), 54 deletions(-) diff --git a/client.go b/client.go index 7d2f7402..7773d043 100644 --- a/client.go +++ b/client.go @@ -2,9 +2,9 @@ package centrifuge import ( "context" - "encoding/base64" "errors" "fmt" + "github.com/centrifugal/centrifuge/internal/convert" "io" "slices" "sync" @@ -3021,17 +3021,14 @@ func (c *Client) makeRecoveredPubsDeltaFossil(recoveredPubs []*protocol.Publicat } prevPub := recoveredPubs[0] if c.transport.Protocol() == ProtocolTypeJSON { - // For JSON case we need to use b64 for data. - - js, _ := json.Marshal(string(prevPub.Data)) - + // For JSON case we need to use JSON string (js) for data. + jsData, _ := json.Marshal(convert.BytesToString(prevPub.Data)) pub := &protocol.Publication{ Offset: prevPub.Offset, Info: prevPub.Info, Tags: prevPub.Tags, - Data: js, - //B64Data: string(js), //base64.StdEncoding.EncodeToString(prevPub.Data), - Delta: false, + Data: jsData, + Delta: false, } recoveredPubs[0] = pub } @@ -3042,14 +3039,14 @@ func (c *Client) makeRecoveredPubsDeltaFossil(recoveredPubs []*protocol.Publicat patch := fdelta.Create(prevPub.Data, pub.Data) var deltaPub *protocol.Publication if c.transport.Protocol() == ProtocolTypeJSON { - b64patch := base64.StdEncoding.EncodeToString(patch) + // For JSON case we need to use JSON string (js) for patch. + jsPatch, _ := json.Marshal(convert.BytesToString(patch)) deltaPub = &protocol.Publication{ Offset: pub.Offset, - //Data: nil, - Info: pub.Info, - Tags: pub.Tags, - Delta: true, - B64Data: b64patch, + Data: jsPatch, + Info: pub.Info, + Tags: pub.Tags, + Delta: true, } } else { deltaPub = &protocol.Publication{ diff --git a/hub.go b/hub.go index da03946a..14084475 100644 --- a/hub.go +++ b/hub.go @@ -2,12 +2,13 @@ package centrifuge import ( "context" - "encoding/base64" - "github.com/segmentio/encoding/json" "io" "sync" + "github.com/centrifugal/centrifuge/internal/convert" + "github.com/centrifugal/protocol" + "github.com/segmentio/encoding/json" fdelta "github.com/shadowspore/fossil-delta" ) @@ -598,16 +599,17 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p deltaPub := fullPub if prevPub != nil && key.DeltaType == DeltaTypeFossil { patch := fdelta.Create(prevPub.Data, fullPub.Data) - js, _ := json.Marshal(string(patch)) if key.ProtocolType == protocol.TypeJSON { - //b64patch := base64.StdEncoding.EncodeToString(patch) + jsData, err := json.Marshal(convert.BytesToString(patch)) + if err != nil { + jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err} + } deltaPub = &protocol.Publication{ Offset: fullPub.Offset, - Data: js, + Data: jsData, Info: fullPub.Info, Tags: fullPub.Tags, Delta: true, - //B64Data: b64patch, } } else { deltaPub = &protocol.Publication{ @@ -619,14 +621,16 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p } } } else if prevPub == nil && key.ProtocolType == protocol.TypeJSON && key.DeltaType == DeltaTypeFossil { - // In JSON and Fossil case we need to send full state in base64 format. - b64data := base64.StdEncoding.EncodeToString(fullPub.Data) + // In JSON and Fossil case we need to send full state in JSON string format. + jsData, err := json.Marshal(convert.BytesToString(fullPub.Data)) + if err != nil { + jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err} + } deltaPub = &protocol.Publication{ Offset: fullPub.Offset, - //Data: nil, - Info: fullPub.Info, - Tags: fullPub.Tags, - B64Data: b64data, + Data: jsData, + Info: fullPub.Info, + Tags: fullPub.Tags, } } @@ -637,12 +641,15 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p if sub.client.transport.Unidirectional() { pubToUse := fullPub if key.ProtocolType == protocol.TypeJSON && key.DeltaType == DeltaTypeFossil { + jsData, err := json.Marshal(convert.BytesToString(fullPub.Data)) + if err != nil { + jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err} + } pubToUse = &protocol.Publication{ Offset: fullPub.Offset, - //Data: nil, - Info: fullPub.Info, - Tags: fullPub.Tags, - B64Data: base64.StdEncoding.EncodeToString(fullPub.Data), + Data: jsData, + Info: fullPub.Info, + Tags: fullPub.Tags, } } push := &protocol.Push{Channel: channel, Pub: pubToUse} @@ -654,12 +661,15 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p } else { pubToUse := fullPub if key.ProtocolType == protocol.TypeJSON && key.DeltaType == DeltaTypeFossil { + jsData, err := json.Marshal(convert.BytesToString(fullPub.Data)) + if err != nil { + jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err} + } pubToUse = &protocol.Publication{ Offset: fullPub.Offset, - //Data: nil, - Info: fullPub.Info, - Tags: fullPub.Tags, - B64Data: base64.StdEncoding.EncodeToString(fullPub.Data), + Data: jsData, + Info: fullPub.Info, + Tags: fullPub.Tags, } } push := &protocol.Push{Channel: channel, Pub: pubToUse} diff --git a/hub_test.go b/hub_test.go index 3453d6a2..9e372dbd 100644 --- a/hub_test.go +++ b/hub_test.go @@ -3,7 +3,6 @@ package centrifuge import ( "context" "fmt" - "github.com/centrifugal/protocol" "io" "strconv" "strings" @@ -11,6 +10,10 @@ import ( "testing" "time" + "github.com/centrifugal/centrifuge/internal/convert" + + "github.com/centrifugal/protocol" + "github.com/segmentio/encoding/json" "github.com/stretchr/testify/require" ) @@ -592,16 +595,9 @@ func TestHubBroadcastPublicationDelta(t *testing.T) { for { select { case data := <-transport.sink: - if tc.protocolType == ProtocolTypeProtobuf { - if strings.Contains(string(data), "broadcast_data") { - totalLength += len(data) - break LOOP - } - } else { - if strings.Contains(string(data), "pub") && strings.Contains(string(data), "b64data") { - totalLength += len(data) - break LOOP - } + if strings.Contains(string(data), "broadcast_data") { + totalLength += len(data) + break LOOP } case <-time.After(2 * time.Second): t.Fatal("no data in sink") @@ -621,14 +617,8 @@ func TestHubBroadcastPublicationDelta(t *testing.T) { for { select { case data := <-transport.sink: - if tc.protocolType == ProtocolTypeProtobuf { - if strings.Contains(string(data), "broadcast_data") { - require.Fail(t, "should not receive same data twice - delta expected") - } - } else { - if strings.Contains(string(data), "pub") && strings.Contains(string(data), "b64data") && !strings.Contains(string(data), "delta") { - require.Fail(t, "should not receive same data twice - delta expected") - } + if strings.Contains(string(data), "broadcast_data") { + require.Fail(t, "should not receive same data twice - delta expected") } break LOOP2 case <-time.After(2 * time.Second): @@ -1042,3 +1032,16 @@ func TestHubBroadcastInappropriateProtocol_Leave(t *testing.T) { testFunc(client) }) } + +var res []byte + +func BenchmarkEncode(b *testing.B) { + jsonData := []byte(`{"input": "test"}`) + for i := 0; i < b.N; i++ { + var err error + res, err = json.Marshal(convert.BytesToString(jsonData)) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/writer.go b/writer.go index a9edef81..15ac9841 100644 --- a/writer.go +++ b/writer.go @@ -36,7 +36,7 @@ func newWriter(config writerConfig, queueInitialCap int) *writer { } const ( - defaultMaxMessagesInFrame = 1 + defaultMaxMessagesInFrame = 16 ) func (w *writer) waitSendMessage(maxMessagesInFrame int, writeDelay time.Duration) bool { From c58213e01519fe83e7da0303057127159b96c26a Mon Sep 17 00:00:00 2001 From: FZambia Date: Mon, 29 Apr 2024 19:02:49 +0300 Subject: [PATCH 25/61] effective json string, tests --- _examples/compression_playground/readme.md | 29 ++--- client.go | 6 +- hub.go | 24 +--- hub_test.go | 121 ++++++++++++++++++++- 4 files changed, 136 insertions(+), 44 deletions(-) diff --git a/_examples/compression_playground/readme.md b/_examples/compression_playground/readme.md index 4c6a83ed..49c9bb9d 100644 --- a/_examples/compression_playground/readme.md +++ b/_examples/compression_playground/readme.md @@ -8,20 +8,21 @@ caught with WireShark filter: tcp.srcport == 8000 && websocket ``` -| Protocol | Compression | Delta | Bytes sent | Percentage | -|--------------------------|-------------|-----------|------------|------------| -| JSON over JSON | No | No | 29510 | 100.0 | -| JSON over JSON | Yes | No | 11135 | 37.73 | -| JSON over JSON | No | Yes | 6435 | 21.81 | -| JSON over JSON | Yes | Yes | 4963 | 16.82 | -| JSON over Protobuf | No | No | 28589 | 96.88 | -| JSON over Protobuf | Yes | No | 11133 | 37.73 | -| JSON over Protobuf | No | Yes | 4276 | 14.49 | -| JSON over Protobuf | Yes | Yes | 3454 | 11.70 | -| Protobuf over Protobuf | No | No | ? | ? | -| Protobuf over Protobuf | Yes | No | ? | ? | -| Protobuf over Protobuf | No | Yes | ? | ? | -| Protobuf over Protobuf | Yes | Yes | ? | ? | +| Protocol | Compression | Delta | Bytes sent | Percentage | +|-----------------------------|-------------|-----------|------------|------------| +| JSON over JSON | No | No | 29510 | 100.0 | +| JSON over JSON | Yes | No | 11135 | 37.73 | +| JSON over JSON | No | Yes | 6435 | 21.81 | +| JSON over JSON | Yes | Yes | 4963 | 16.82 | +| JSON over Protobuf | No | No | 28589 | 96.88 | +| JSON over Protobuf | Yes | No | 11133 | 37.73 | +| JSON over Protobuf | No | Yes | 4276 | 14.49 | +| JSON over Protobuf | Yes | Yes | 3454 | 11.70 | +| Protobuf over Protobuf | No | No | ? | ? | +| Protobuf over Protobuf | Yes | No | ? | ? | +| Protobuf over Protobuf | No | Yes | ? | ? | +| Protobuf over Protobuf | Yes | Yes | ? | ? | +| Protobuf over Protobuf | Yes | Yes | ? | ? | Note: since we send JSON over Protobuf, the JSON size is the same as the JSON over JSON case. In this case Centrifugal protocol gives lower overhead, but the main part comes from the JSON payload size. diff --git a/client.go b/client.go index 7773d043..a78e0140 100644 --- a/client.go +++ b/client.go @@ -3022,12 +3022,11 @@ func (c *Client) makeRecoveredPubsDeltaFossil(recoveredPubs []*protocol.Publicat prevPub := recoveredPubs[0] if c.transport.Protocol() == ProtocolTypeJSON { // For JSON case we need to use JSON string (js) for data. - jsData, _ := json.Marshal(convert.BytesToString(prevPub.Data)) pub := &protocol.Publication{ Offset: prevPub.Offset, Info: prevPub.Info, Tags: prevPub.Tags, - Data: jsData, + Data: json.Escape(convert.BytesToString(prevPub.Data)), Delta: false, } recoveredPubs[0] = pub @@ -3040,10 +3039,9 @@ func (c *Client) makeRecoveredPubsDeltaFossil(recoveredPubs []*protocol.Publicat var deltaPub *protocol.Publication if c.transport.Protocol() == ProtocolTypeJSON { // For JSON case we need to use JSON string (js) for patch. - jsPatch, _ := json.Marshal(convert.BytesToString(patch)) deltaPub = &protocol.Publication{ Offset: pub.Offset, - Data: jsPatch, + Data: json.Escape(convert.BytesToString(patch)), Info: pub.Info, Tags: pub.Tags, Delta: true, diff --git a/hub.go b/hub.go index 14084475..7d77fe11 100644 --- a/hub.go +++ b/hub.go @@ -600,13 +600,9 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p if prevPub != nil && key.DeltaType == DeltaTypeFossil { patch := fdelta.Create(prevPub.Data, fullPub.Data) if key.ProtocolType == protocol.TypeJSON { - jsData, err := json.Marshal(convert.BytesToString(patch)) - if err != nil { - jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err} - } deltaPub = &protocol.Publication{ Offset: fullPub.Offset, - Data: jsData, + Data: json.Escape(convert.BytesToString(patch)), Info: fullPub.Info, Tags: fullPub.Tags, Delta: true, @@ -622,13 +618,9 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p } } else if prevPub == nil && key.ProtocolType == protocol.TypeJSON && key.DeltaType == DeltaTypeFossil { // In JSON and Fossil case we need to send full state in JSON string format. - jsData, err := json.Marshal(convert.BytesToString(fullPub.Data)) - if err != nil { - jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err} - } deltaPub = &protocol.Publication{ Offset: fullPub.Offset, - Data: jsData, + Data: json.Escape(convert.BytesToString(fullPub.Data)), Info: fullPub.Info, Tags: fullPub.Tags, } @@ -641,13 +633,9 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p if sub.client.transport.Unidirectional() { pubToUse := fullPub if key.ProtocolType == protocol.TypeJSON && key.DeltaType == DeltaTypeFossil { - jsData, err := json.Marshal(convert.BytesToString(fullPub.Data)) - if err != nil { - jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err} - } pubToUse = &protocol.Publication{ Offset: fullPub.Offset, - Data: jsData, + Data: json.Escape(convert.BytesToString(fullPub.Data)), Info: fullPub.Info, Tags: fullPub.Tags, } @@ -661,13 +649,9 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p } else { pubToUse := fullPub if key.ProtocolType == protocol.TypeJSON && key.DeltaType == DeltaTypeFossil { - jsData, err := json.Marshal(convert.BytesToString(fullPub.Data)) - if err != nil { - jsonEncodeErr = &encodeError{client: sub.client.ID(), user: sub.client.UserID(), error: err} - } pubToUse = &protocol.Publication{ Offset: fullPub.Offset, - Data: jsData, + Data: json.Escape(convert.BytesToString(fullPub.Data)), Info: fullPub.Info, Tags: fullPub.Tags, } diff --git a/hub_test.go b/hub_test.go index 9e372dbd..cd30debf 100644 --- a/hub_test.go +++ b/hub_test.go @@ -14,6 +14,7 @@ import ( "github.com/centrifugal/protocol" "github.com/segmentio/encoding/json" + fdelta "github.com/shadowspore/fossil-delta" "github.com/stretchr/testify/require" ) @@ -1033,15 +1034,123 @@ func TestHubBroadcastInappropriateProtocol_Leave(t *testing.T) { }) } -var res []byte +var testJsonData = []byte(`{ + "_id":"662fb7df5110d6e8e9942fb2", + "index":0, + "guid":"a100afc6-fc35-47fd-8e3e-e8e9a81629ec", + "isActive":true, + "balance":"$2,784.25", + "picture":"http://placehold.it/32x32", + "age":21, + "eyeColor":"green", + "name":"Lois Norris", + "gender":"female", + "company":"ORGANICA", + "email":"loisnorris@organica.com", + "phone":"+1 (939) 451-2349", + "address":"774 Ide Court, Sabillasville, Virginia, 4034", + "about":"Cupidatat reprehenderit laboris aute pariatur nulla exercitation. Commodo aliqua cupidatat consectetur aliquip. Id irure nisi qui ullamco culpa reprehenderit nisi sunt consequat ipsum. Velit officia sint id voluptate anim. Sunt duis duis consequat mollit incididunt laborum enim amet ad aliqua esse nulla. Aliqua nulla adipisicing ad aliquip ut. Nostrud mollit ex aute magna culpa ea exercitation qui ex.\r\n", + "registered":"2023-02-28T11:09:34 -02:00", + "latitude":24.054483, + "longitude":38.953522, + "tags":[ + "consequat", + "adipisicing", + "eiusmod", + "ipsum", + "enim", + "et", + "voluptate" + ], + "friends":[ + { + "id":0, + "name":"Kaufman Randall" + }, + { + "id":1, + "name":"Byrd Cooley" + }, + { + "id":2, + "name":"Obrien William" + } + ], + "greeting":"Hello, Lois Norris! You have 9 unread messages.", + "favoriteFruit":"banana" +}`) + +// Has some changes (in tags field, in friends field). +var testNewJsonData = []byte(`{ + "_id":"662fb7df5110d6e8e9942fb2", + "index":0, + "guid":"a100afc6-fc35-47fd-8e3e-e8e9a81629ec", + "isActive":true, + "balance":"$2,784.25", + "picture":"http://placehold.it/32x32", + "age":21, + "eyeColor":"green", + "name":"Lois Norris", + "gender":"female", + "company":"ORGANICA", + "email":"loisnorris@organica.com", + "phone":"+1 (939) 451-2349", + "address":"774 Ide Court, Sabillasville, Virginia, 4034", + "about":"Cupidatat reprehenderit laboris aute pariatur nulla exercitation. Commodo aliqua cupidatat consectetur aliquip. Id irure nisi qui ullamco culpa reprehenderit nisi sunt consequat ipsum. Velit officia sint id voluptate anim. Sunt duis duis consequat mollit incididunt laborum enim amet ad aliqua esse nulla. Aliqua nulla adipisicing ad aliquip ut. Nostrud mollit ex aute magna culpa ea exercitation qui ex.\r\n", + "registered":"2023-02-28T11:09:34 -02:00", + "latitude":24.054483, + "longitude":38.953522, + "tags":[ + "consequat", + "adipisicing", + "eiusmod" + ], + "friends":[ + { + "id":0, + "name":"Kaufman Randall" + }, + { + "id":1, + "name":"Byrd Cooley" + } + ], + "greeting":"Hello, Lois Norris! You have 9 unread messages.", + "favoriteFruit":"banana" +}`) + +func TestJsonStringEncode(t *testing.T) { + testBenchmarkDeltaFossilPatch = fdelta.Create(testJsonData, testNewJsonData) + if len(testBenchmarkDeltaFossilPatch) == 0 { + t.Fatal("empty fossil patch") + } + testDeltaJsonData, err := json.Marshal(convert.BytesToString(testBenchmarkDeltaFossilPatch)) + require.NoError(t, err) + require.NotNil(t, testDeltaJsonData) + + alternativeDeltaJsonData := json.Escape(convert.BytesToString(testBenchmarkDeltaFossilPatch)) + require.Equal(t, testDeltaJsonData, alternativeDeltaJsonData) +} + +var testBenchmarkEncodeData []byte -func BenchmarkEncode(b *testing.B) { +func BenchmarkEncodeJSONString(b *testing.B) { jsonData := []byte(`{"input": "test"}`) for i := 0; i < b.N; i++ { - var err error - res, err = json.Marshal(convert.BytesToString(jsonData)) - if err != nil { - b.Fatal(err) + testBenchmarkEncodeData = json.Escape(convert.BytesToString(jsonData)) + if len(testBenchmarkEncodeData) == 0 { + b.Fatal("empty data") + } + } +} + +var testBenchmarkDeltaFossilPatch []byte + +func BenchmarkDeltaFossil(b *testing.B) { + for i := 0; i < b.N; i++ { + testBenchmarkDeltaFossilPatch = fdelta.Create(testJsonData, testNewJsonData) + if len(testBenchmarkDeltaFossilPatch) == 0 { + b.Fatal("empty fossil patch") } } } From 2bc1a24f6b9fed12c84164def08cfc0da32eed12 Mon Sep 17 00:00:00 2001 From: FZambia Date: Mon, 29 Apr 2024 19:03:57 +0300 Subject: [PATCH 26/61] fix lint --- hub_test.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/hub_test.go b/hub_test.go index cd30debf..fafce6a6 100644 --- a/hub_test.go +++ b/hub_test.go @@ -589,15 +589,12 @@ func TestHubBroadcastPublicationDelta(t *testing.T) { StreamPosition{Offset: 1, Epoch: res.StreamPosition.Epoch}, ) require.NoError(t, err) - - totalLength := 0 - + LOOP: for { select { case data := <-transport.sink: if strings.Contains(string(data), "broadcast_data") { - totalLength += len(data) break LOOP } case <-time.After(2 * time.Second): From ddd8e2efac42f796ea8737b8eab697a74fa02479 Mon Sep 17 00:00:00 2001 From: FZambia Date: Mon, 29 Apr 2024 19:09:00 +0300 Subject: [PATCH 27/61] pin to latest protocol --- _examples/go.mod | 2 +- _examples/go.sum | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- hub_test.go | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/_examples/go.mod b/_examples/go.mod index 8cf3fb45..39672dd0 100644 --- a/_examples/go.mod +++ b/_examples/go.mod @@ -7,7 +7,7 @@ replace github.com/centrifugal/centrifuge => ../ require ( github.com/FZambia/tarantool v0.2.2 github.com/centrifugal/centrifuge v0.8.2 - github.com/centrifugal/protocol v0.12.2-0.20240422190129-0bd81b469a35 + github.com/centrifugal/protocol v0.12.2-0.20240429145950-b906e73562fe github.com/cristalhq/jwt/v5 v5.4.0 github.com/dchest/uniuri v1.2.0 github.com/gin-contrib/sessions v0.0.3 diff --git a/_examples/go.sum b/_examples/go.sum index f81fb97e..85737a09 100644 --- a/_examples/go.sum +++ b/_examples/go.sum @@ -14,8 +14,8 @@ github.com/bradleypeabody/gorilla-sessions-memcache v0.0.0-20181103040241-659414 github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= -github.com/centrifugal/protocol v0.12.2-0.20240422190129-0bd81b469a35 h1:ClXkBRlf4Cn+82L1B1y483gRwznq6Uifr+zBBeEu7RI= -github.com/centrifugal/protocol v0.12.2-0.20240422190129-0bd81b469a35/go.mod h1:lM54PGU/u5WupYSb755Zv6tZ2ju1SqNKCp6A4s0DeG4= +github.com/centrifugal/protocol v0.12.2-0.20240429145950-b906e73562fe h1:uXsl6MWJZMlk42wfVDOK0z596x9JqmkvAqlWzQ830qU= +github.com/centrifugal/protocol v0.12.2-0.20240429145950-b906e73562fe/go.mod h1:lM54PGU/u5WupYSb755Zv6tZ2ju1SqNKCp6A4s0DeG4= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= diff --git a/go.mod b/go.mod index 0f3b2a27..e273a08d 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/FZambia/eagle v0.1.0 - github.com/centrifugal/protocol v0.12.2-0.20240422190129-0bd81b469a35 + github.com/centrifugal/protocol v0.12.2-0.20240429145950-b906e73562fe github.com/google/uuid v1.6.0 github.com/prometheus/client_golang v1.19.0 github.com/redis/rueidis v1.0.33 diff --git a/go.sum b/go.sum index 23a22381..f5643007 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ github.com/FZambia/eagle v0.1.0 h1:9gyX6x+xjoIfglgyPTcYm7dvY7FJ93us1QY5De4CyXA= github.com/FZambia/eagle v0.1.0/go.mod h1:YjGSPVkQTNcVLfzEUQJNgW9ScPR0K4u/Ky0yeFa4oDA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/centrifugal/protocol v0.12.2-0.20240422190129-0bd81b469a35 h1:ClXkBRlf4Cn+82L1B1y483gRwznq6Uifr+zBBeEu7RI= -github.com/centrifugal/protocol v0.12.2-0.20240422190129-0bd81b469a35/go.mod h1:lM54PGU/u5WupYSb755Zv6tZ2ju1SqNKCp6A4s0DeG4= +github.com/centrifugal/protocol v0.12.2-0.20240429145950-b906e73562fe h1:uXsl6MWJZMlk42wfVDOK0z596x9JqmkvAqlWzQ830qU= +github.com/centrifugal/protocol v0.12.2-0.20240429145950-b906e73562fe/go.mod h1:lM54PGU/u5WupYSb755Zv6tZ2ju1SqNKCp6A4s0DeG4= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= diff --git a/hub_test.go b/hub_test.go index fafce6a6..8286f336 100644 --- a/hub_test.go +++ b/hub_test.go @@ -589,7 +589,7 @@ func TestHubBroadcastPublicationDelta(t *testing.T) { StreamPosition{Offset: 1, Epoch: res.StreamPosition.Epoch}, ) require.NoError(t, err) - + LOOP: for { select { From b9b5b8d8a86b8b7284ea6298747a85ebfd12453a Mon Sep 17 00:00:00 2001 From: FZambia Date: Wed, 1 May 2024 08:21:13 +0300 Subject: [PATCH 28/61] move cache empty handler to node --- client.go | 13 ++----------- node.go | 9 +++++++++ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/client.go b/client.go index a78e0140..97f32ef1 100644 --- a/client.go +++ b/client.go @@ -51,7 +51,6 @@ type clientEventHub struct { presenceStatsHandler PresenceStatsHandler historyHandler HistoryHandler stateSnapshotHandler StateSnapshotHandler - cacheEmptyHandler CacheEmptyHandler } // OnAlive allows setting AliveHandler. @@ -108,14 +107,6 @@ func (c *Client) OnPublish(h PublishHandler) { c.eventHub.publishHandler = h } -// OnCacheEmpty allows setting CacheEmptyHandler. -// CacheEmptyHandler called when client subscribes on a channel with RecoveryModeCache but there is no -// cached value in channel. In response to this handler it's possible to tell Centrifuge what to do with -// subscribe request – keep it, or return error. -func (c *Client) OnCacheEmpty(h CacheEmptyHandler) { - c.eventHub.cacheEmptyHandler = h -} - // OnPresence allows setting PresenceHandler. // PresenceHandler called when Presence request from client received. // At this moment you can only return a custom error or disconnect client. @@ -2838,8 +2829,8 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep var recovered bool recoveredPubs, recovered = isCacheRecovered(latestPub, currentSP, cmdOffset, cmdEpoch) res.Recovered = recovered - if latestPub == nil && c.eventHub.cacheEmptyHandler != nil { - cacheReply := c.eventHub.cacheEmptyHandler(CacheEmptyEvent{Channel: channel}) + if latestPub == nil && c.node.clientEvents.cacheEmptyHandler != nil { + cacheReply := c.node.clientEvents.cacheEmptyHandler(CacheEmptyEvent{Channel: channel}) if cacheReply.Populated && !recovered { // One more chance to recover in case we know cache was populated. latestPub, currentSP, err = c.node.recoverCache(channel, reply.Options.HistoryMetaTTL) diff --git a/node.go b/node.go index f441c743..fb35081e 100644 --- a/node.go +++ b/node.go @@ -1510,6 +1510,7 @@ type eventHub struct { transportWriteHandler TransportWriteHandler commandReadHandler CommandReadHandler commandProcessedHandler CommandProcessedHandler + cacheEmptyHandler CacheEmptyHandler } // OnConnecting allows setting ConnectingHandler. @@ -1542,6 +1543,14 @@ func (n *Node) OnCommandProcessed(handler CommandProcessedHandler) { n.clientEvents.commandProcessedHandler = handler } +// OnCacheEmpty allows setting CacheEmptyHandler. +// CacheEmptyHandler called when client subscribes on a channel with RecoveryModeCache but there is no +// cached value in channel. In response to this handler it's possible to tell Centrifuge what to do with +// subscribe request – keep it, or return error. +func (n *Node) OnCacheEmpty(h CacheEmptyHandler) { + n.clientEvents.cacheEmptyHandler = h +} + type brokerEventHandler struct { node *Node } From d4699c79d655c562907ddea8bba0815fbaf7fd35 Mon Sep 17 00:00:00 2001 From: FZambia Date: Wed, 1 May 2024 08:44:46 +0300 Subject: [PATCH 29/61] cache empty handler returns error --- client.go | 6 +++++- events.go | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/client.go b/client.go index 97f32ef1..d41065c6 100644 --- a/client.go +++ b/client.go @@ -2830,7 +2830,11 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep recoveredPubs, recovered = isCacheRecovered(latestPub, currentSP, cmdOffset, cmdEpoch) res.Recovered = recovered if latestPub == nil && c.node.clientEvents.cacheEmptyHandler != nil { - cacheReply := c.node.clientEvents.cacheEmptyHandler(CacheEmptyEvent{Channel: channel}) + cacheReply, err := c.node.clientEvents.cacheEmptyHandler(CacheEmptyEvent{Channel: channel}) + if err != nil { + c.node.logger.log(newLogEntry(LogLevelError, "error on cache empty", map[string]any{"channel": channel, "user": c.user, "client": c.uid, "error": err.Error()})) + return handleErr(err) + } if cacheReply.Populated && !recovered { // One more chance to recover in case we know cache was populated. latestPub, currentSP, err = c.node.recoverCache(channel, reply.Options.HistoryMetaTTL) diff --git a/events.go b/events.go index 23673b9b..0fe10e98 100644 --- a/events.go +++ b/events.go @@ -375,7 +375,7 @@ type CacheEmptyReply struct { } // CacheEmptyHandler allows setting cache empty handler function. -type CacheEmptyHandler func(CacheEmptyEvent) CacheEmptyReply +type CacheEmptyHandler func(CacheEmptyEvent) (CacheEmptyReply, error) // SurveyEvent with Op and Data of survey. type SurveyEvent struct { From 90dae904c2ea6d5cf7466ee019c8c437209a646f Mon Sep 17 00:00:00 2001 From: FZambia Date: Sat, 4 May 2024 12:39:22 +0300 Subject: [PATCH 30/61] define revisited channel cache --- channel_cache.go | 477 ++++++++++++++++++++++++++++++++++++++++++ channel_cache_test.go | 185 ++++++++++++++++ 2 files changed, 662 insertions(+) create mode 100644 channel_cache.go create mode 100644 channel_cache_test.go diff --git a/channel_cache.go b/channel_cache.go new file mode 100644 index 00000000..9da1e7c4 --- /dev/null +++ b/channel_cache.go @@ -0,0 +1,477 @@ +package centrifuge + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/centrifugal/centrifuge/internal/timers" +) + +// ChannelCacheOptions is an EXPERIMENTAL way to provide a channelCache layer options to Centrifuge. +// This is very unstable at the moment, do not use in production. +type ChannelCacheOptions struct { + // UseQueue enables queue for incoming publications. This can be useful to reduce PUB/SUB message + // processing time (as we put it into a single cache layer queue instead of each individual connection queue), + // reduce channel broadcast contention (when one channel waits for broadcast of another channel to finish), + // and also opens a road to broadcast tweaks – such as BroadcastDelay and delta between several + // publications (deltas require both BroadcastDelay and KeepLatestPublication to be enabled). + UseQueue bool + // QueueMaxSize is a maximum size of the queue used in channel cache. If zero, 16MB default is used. + // If max size reached, new publications will be dropped. + QueueMaxSize int + + // BroadcastDelay controls delay before Publication broadcast. On time tick Centrifugo broadcasts + // only the latest publication in the channel if any. Useful to reduce/smooth the number of messages sent + // to clients when publication contains the entire state. If zero, all publications will be sent to clients + // without delay logic involved on channel cache level. BroadcastDelay option requires (!) UseQueue to be + // enabled, as we can not afford delays during broadcast from the PUB/SUB layer. + BroadcastDelay time.Duration + + // KeepLatestPublication enables keeping latest publication in channel cache layer. This is required + // for supporting deltas when BroadcastDelay > 0. + // Probably it may be used for fast recovery also, but need to consider edge cases for races. + KeepLatestPublication bool + + // PositionSync when true delegates connection position checks to the channel cache. In that case check + // is only performed no more often than PositionSyncInterval thus reducing the load on broker in cases when + // channel has many subscribers. When message loss is detected cache layer tells caller about this and also + // marks all channel subscribers with insufficient state flag. By default, cache is not used for sync – in + // that case each individual connection syncs position independently. + // TODO: introduce Node method to sync positions and call it from Client. + PositionSync bool + // PositionSyncInterval is a period of time between position sync checks. If zero, Centrifuge uses + // Config.ClientChannelPositionCheckDelay as a default value. + PositionSyncInterval time.Duration +} + +// channelCache is an optional intermediary layer between broker PUB/SUB and client connections. +// It comes with memory overhead depending on ChannelCacheOptions used, and may consume one additional +// goroutine per channel if ChannelCacheOptions.UseQueue used. At the same time it can provide significant +// benefits in terms of overall system efficiency. +// +// channelCache is initialized when first subscriber comes into channel, and dropped as soon as last subscriber +// leaves the channel on the node. This generally makes it possible to keep latest publication without TTL, but +// probably we still need to handle TTL to match broker behaviour. +type channelCache struct { + initialized atomic.Int64 + channel string + node node + options ChannelCacheOptions + + mu sync.RWMutex + + messages *cacheQueue + broadcastMu sync.Mutex // When queue is not used need to protect broadcast method from concurrent execution. + + closeCh chan struct{} + + // latestPublication is an initial publication in channel or publication last sent to connections. + latestPublication *Publication + // latestStreamPosition is an initial stream position or stream position lastly sent. + latestStreamPosition StreamPosition + // latestQueuedStreamPosition is a stream position of the latest queued publication. + latestQueuedStreamPosition StreamPosition + + positionCheckTime int64 + nowTimeGetter func() time.Time +} + +type node interface { + handlePublication( + channel string, pub *Publication, sp StreamPosition, delta bool, + prevPublication *Publication, bypassOffset bool, + ) error + streamTopLatestPub(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) +} + +func newChannelCache( + channel string, + node node, + options ChannelCacheOptions, +) (*channelCache, error) { + c := &channelCache{ + channel: channel, + node: node, + options: options, + closeCh: make(chan struct{}), + nowTimeGetter: func() time.Time { + return time.Now() + }, + positionCheckTime: time.Now().Unix(), + } + if options.UseQueue { + c.messages = newCacheQueue(2) + } + if options.BroadcastDelay > 0 && !options.UseQueue { + return nil, fmt.Errorf("broadcast delay can only be used with queue enabled") + } + return c, nil +} + +type queuedPub struct { + pub *Publication + sp StreamPosition + delta bool + prevPub *Publication + isInsufficientState bool +} + +func (c *channelCache) InitState(latestPublication *Publication, currentStreamPosition StreamPosition) { + if c.options.KeepLatestPublication { + c.latestPublication = latestPublication + } + c.latestStreamPosition = currentStreamPosition + c.latestQueuedStreamPosition = currentStreamPosition + if c.options.UseQueue { + go c.writer() + } + c.initialized.Store(1) +} + +func (c *channelCache) processPublication(pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) { + if c.initialized.Load() == 0 { + // Skip publications while cache is not initialized. + return + } + bp := queuedPub{pub: pub, sp: sp, delta: delta, prevPub: prevPub} + c.mu.Lock() + c.latestQueuedStreamPosition = sp + c.positionCheckTime = c.nowTimeGetter().Unix() + c.mu.Unlock() + + if c.options.UseQueue { + c.messages.Add(queuedItem{Publication: bp}) + // TODO: do we need to limit queue size here? + } else { + c.broadcastMu.Lock() + defer c.broadcastMu.Unlock() + c.broadcast(bp) + } +} + +func (c *channelCache) processInsufficientState(currentStreamTop StreamPosition, latestPublication *Publication) { + bp := queuedPub{pub: latestPublication, sp: currentStreamTop, delta: false, isInsufficientState: true, prevPub: nil} + c.mu.Lock() + defer c.mu.Unlock() + c.latestQueuedStreamPosition = currentStreamTop + c.positionCheckTime = c.nowTimeGetter().Unix() + if c.options.UseQueue { + // TODO: possibly support c.messages.dropQueued() for this path ? + c.messages.Add(queuedItem{Publication: bp}) + } else { + c.broadcastMu.Lock() + defer c.broadcastMu.Unlock() + c.broadcast(bp) + } +} + +func (c *channelCache) broadcast(qp queuedPub) { + bypassOffset := c.options.BroadcastDelay > 0 && !qp.isInsufficientState + pubToBroadcast := qp.pub + spToBroadcast := qp.sp + if qp.isInsufficientState { + pubToBroadcast = &Publication{ + Offset: math.MaxUint64, + } + spToBroadcast.Offset = math.MaxUint64 + } + + prevPub := qp.prevPub + if c.options.KeepLatestPublication && c.options.BroadcastDelay > 0 { + prevPub = c.latestPublication + } + delta := qp.delta + if c.options.BroadcastDelay > 0 && !c.options.KeepLatestPublication { + delta = false + } + _ = c.node.handlePublication( + c.channel, pubToBroadcast, spToBroadcast, delta, prevPub, bypassOffset) + c.mu.Lock() + defer c.mu.Unlock() + if qp.sp.Offset > c.latestStreamPosition.Offset { + c.latestStreamPosition = qp.sp + if c.options.KeepLatestPublication { + c.latestPublication = qp.pub + } + } +} + +func (c *channelCache) writer() { + for { + if ok := c.waitSendPub(c.options.BroadcastDelay); !ok { + return + } + } +} + +func (c *channelCache) waitSendPub(delay time.Duration) bool { + // Wait for message from the queue. + ok := c.messages.Wait() + if !ok { + return false + } + + if delay > 0 { + tm := timers.AcquireTimer(delay) + select { + case <-tm.C: + case <-c.closeCh: + timers.ReleaseTimer(tm) + return false + } + timers.ReleaseTimer(tm) + } + + msg, ok := c.messages.Remove() + if !ok { + return !c.messages.Closed() + } + if delay == 0 || msg.Publication.isInsufficientState { + c.broadcast(msg.Publication) + return true + } + messageCount := c.messages.Len() + for messageCount > 0 { + messageCount-- + var ok bool + msg, ok = c.messages.Remove() + if !ok { + if c.messages.Closed() { + return false + } + break + } + if msg.Publication.isInsufficientState { + break + } + } + c.broadcast(msg.Publication) + return true +} + +func (c *channelCache) CheckPosition(historyMetaTTL time.Duration) bool { + nowUnixNano := c.nowTimeGetter().UnixNano() + c.mu.RLock() + needCheckPosition := nowUnixNano-c.positionCheckTime >= c.options.PositionSyncInterval.Nanoseconds() + c.mu.RUnlock() + if !needCheckPosition { + return true + } + latestPublication, streamTop, validPosition := c.checkPositionWithRetry(historyMetaTTL) + c.mu.Lock() + c.positionCheckTime = nowUnixNano + c.mu.Unlock() + if !validPosition { + c.processInsufficientState(streamTop, latestPublication) + } + return validPosition +} + +func (c *channelCache) checkPositionWithRetry(historyMetaTTL time.Duration) (*Publication, StreamPosition, bool) { + latestPub, sp, validPosition := c.checkPositionOnce(historyMetaTTL) + if !validPosition { + return c.checkPositionOnce(historyMetaTTL) + } + return latestPub, sp, true +} + +func (c *channelCache) checkPositionOnce(historyMetaTTL time.Duration) (*Publication, StreamPosition, bool) { + latestPublication, currentStreamPosition, err := c.node.streamTopLatestPub(c.channel, historyMetaTTL) + if err != nil { + // Will result into position check later. + return nil, StreamPosition{}, true + } + c.mu.Lock() + defer c.mu.Unlock() + position := c.latestQueuedStreamPosition + isValidPosition := currentStreamPosition.Epoch == position.Epoch && position.Offset == currentStreamPosition.Offset + return latestPublication, currentStreamPosition, isValidPosition +} + +func (c *channelCache) close() { + close(c.closeCh) +} + +type queuedItem struct { + Publication queuedPub +} + +// cacheQueue is an unbounded queue of queuedItem. +// The queue is goroutine safe. +// Inspired by http://blog.dubbelboer.com/2015/04/25/go-faster-queue.html (MIT) +type cacheQueue struct { + mu sync.RWMutex + cond *sync.Cond + nodes []queuedItem + head int + tail int + cnt int + size int + closed bool + initCap int +} + +// newCacheQueue returns a new queuedItem queue with initial capacity. +func newCacheQueue(initialCapacity int) *cacheQueue { + sq := &cacheQueue{ + initCap: initialCapacity, + nodes: make([]queuedItem, initialCapacity), + } + sq.cond = sync.NewCond(&sq.mu) + return sq +} + +// Mutex must be held when calling. +func (q *cacheQueue) resize(n int) { + nodes := make([]queuedItem, n) + if q.head < q.tail { + copy(nodes, q.nodes[q.head:q.tail]) + } else { + copy(nodes, q.nodes[q.head:]) + copy(nodes[len(q.nodes)-q.head:], q.nodes[:q.tail]) + } + + q.tail = q.cnt % n + q.head = 0 + q.nodes = nodes +} + +// Add an queuedItem to the back of the queue +// will return false if the queue is closed. +// In that case the queuedItem is dropped. +func (q *cacheQueue) Add(i queuedItem) bool { + q.mu.Lock() + if q.closed { + q.mu.Unlock() + return false + } + if q.cnt == len(q.nodes) { + // Also tested a growth rate of 1.5, see: http://stackoverflow.com/questions/2269063/buffer-growth-strategy + // In Go this resulted in a higher memory usage. + q.resize(q.cnt * 2) + } + q.nodes[q.tail] = i + q.tail = (q.tail + 1) % len(q.nodes) + if i.Publication.pub != nil { + q.size += len(i.Publication.pub.Data) + } + q.cnt++ + q.cond.Signal() + q.mu.Unlock() + return true +} + +// Close the queue and discard all entries in the queue +// all goroutines in wait() will return +func (q *cacheQueue) Close() { + q.mu.Lock() + defer q.mu.Unlock() + q.closed = true + q.cnt = 0 + q.nodes = nil + q.size = 0 + q.cond.Broadcast() +} + +// CloseRemaining will close the queue and return all entries in the queue. +// All goroutines in wait() will return. +func (q *cacheQueue) CloseRemaining() []queuedItem { + q.mu.Lock() + defer q.mu.Unlock() + if q.closed { + return []queuedItem{} + } + rem := make([]queuedItem, 0, q.cnt) + for q.cnt > 0 { + i := q.nodes[q.head] + q.head = (q.head + 1) % len(q.nodes) + q.cnt-- + rem = append(rem, i) + } + q.closed = true + q.cnt = 0 + q.nodes = nil + q.size = 0 + q.cond.Broadcast() + return rem +} + +// Closed returns true if the queue has been closed +// The call cannot guarantee that the queue hasn't been +// closed while the function returns, so only "true" has a definite meaning. +func (q *cacheQueue) Closed() bool { + q.mu.RLock() + c := q.closed + q.mu.RUnlock() + return c +} + +// Wait for a message to be added. +// If there are items on the queue will return immediately. +// Will return false if the queue is closed. +// Otherwise, returns true. +func (q *cacheQueue) Wait() bool { + q.mu.Lock() + if q.closed { + q.mu.Unlock() + return false + } + if q.cnt != 0 { + q.mu.Unlock() + return true + } + q.cond.Wait() + q.mu.Unlock() + return true +} + +// Remove will remove an queuedItem from the queue. +// If false is returned, it either means 1) there were no items on the queue +// or 2) the queue is closed. +func (q *cacheQueue) Remove() (queuedItem, bool) { + q.mu.Lock() + if q.cnt == 0 { + q.mu.Unlock() + return queuedItem{}, false + } + i := q.nodes[q.head] + q.head = (q.head + 1) % len(q.nodes) + q.cnt-- + if i.Publication.pub != nil { + q.size -= len(i.Publication.pub.Data) + } + + if n := len(q.nodes) / 2; n >= q.initCap && q.cnt <= n { + q.resize(n) + } + + q.mu.Unlock() + return i, true +} + +// Cap returns the capacity (without allocations) +func (q *cacheQueue) Cap() int { + q.mu.RLock() + c := cap(q.nodes) + q.mu.RUnlock() + return c +} + +// Len returns the current length of the queue. +func (q *cacheQueue) Len() int { + q.mu.RLock() + l := q.cnt + q.mu.RUnlock() + return l +} + +// Size returns the current size of the queue. +func (q *cacheQueue) Size() int { + q.mu.RLock() + s := q.size + q.mu.RUnlock() + return s +} diff --git a/channel_cache_test.go b/channel_cache_test.go new file mode 100644 index 00000000..9f919763 --- /dev/null +++ b/channel_cache_test.go @@ -0,0 +1,185 @@ +package centrifuge + +import ( + "errors" + "math" + "strconv" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// Helper function to create a channelCache with options. +func setupChannelCache(t testing.TB, options ChannelCacheOptions, node node) *channelCache { + t.Helper() + channel := "testChannel" + cache, err := newChannelCache(channel, node, options) + if err != nil { + require.NoError(t, err) + } + return cache +} + +type mockNode struct { + // Store function outputs and any state needed for testing + handlePublicationFunc func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error + streamTopLatestPubFunc func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) +} + +func (m *mockNode) handlePublication(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error { + if m.handlePublicationFunc != nil { + return m.handlePublicationFunc(channel, pub, sp, delta, prevPublication, bypassOffset) + } + return nil +} + +func (m *mockNode) streamTopLatestPub(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { + if m.streamTopLatestPubFunc != nil { + return m.streamTopLatestPubFunc(ch, historyMetaTTL) + } + return nil, StreamPosition{}, nil +} + +func TestChannelCacheInitialization(t *testing.T) { + options := ChannelCacheOptions{ + UseQueue: true, + KeepLatestPublication: true, + BroadcastDelay: 10 * time.Millisecond, + PositionSyncInterval: 1 * time.Second, + } + cache := setupChannelCache(t, options, &mockNode{}) + + require.NotNil(t, cache) + require.NotNil(t, cache.messages) + require.Equal(t, int64(0), cache.initialized.Load()) + cache.InitState(&Publication{}, StreamPosition{1, "epoch"}) + require.Equal(t, int64(1), cache.initialized.Load()) +} + +func TestChannelCacheHandlePublication(t *testing.T) { + optionSet := []ChannelCacheOptions{ + { + UseQueue: false, + KeepLatestPublication: false, + }, + { + UseQueue: true, + KeepLatestPublication: false, + }, + { + UseQueue: true, + KeepLatestPublication: false, + BroadcastDelay: 10 * time.Millisecond, + }, + { + UseQueue: true, + KeepLatestPublication: true, + BroadcastDelay: 10 * time.Millisecond, + }, + } + + for i, options := range optionSet { + t.Run(strconv.Itoa(i), func(t *testing.T) { + doneCh := make(chan struct{}) + + cache := setupChannelCache(t, options, &mockNode{ + handlePublicationFunc: func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error { + close(doneCh) + return nil + }, + }) + cache.InitState(&Publication{}, StreamPosition{}) + + pub := &Publication{Data: []byte("test data")} + sp := StreamPosition{Offset: 1} + + cache.processPublication(pub, sp, false, nil) + + select { + case <-doneCh: + case <-time.After(5 * time.Second): + require.Fail(t, "handlePublicationFunc was not called") + } + }) + } +} + +func TestChannelCacheInsufficientState(t *testing.T) { + options := ChannelCacheOptions{ + UseQueue: true, + KeepLatestPublication: true, + } + doneCh := make(chan struct{}) + cache := setupChannelCache(t, options, &mockNode{ + handlePublicationFunc: func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error { + require.Equal(t, uint64(math.MaxUint64), pub.Offset) + require.Equal(t, uint64(math.MaxUint64), sp.Offset) + require.False(t, bypassOffset) + close(doneCh) + return nil + }, + }) + cache.InitState(&Publication{}, StreamPosition{}) + + // Simulate the behavior when the state is marked as insufficient + cache.processInsufficientState(StreamPosition{Offset: 2}, &Publication{}) + + select { + case <-doneCh: + case <-time.After(5 * time.Second): + require.Fail(t, "handlePublicationFunc was not called") + } +} + +func TestChannelCachePositionSync(t *testing.T) { + options := ChannelCacheOptions{ + PositionSyncInterval: 10 * time.Millisecond, + } + doneCh := make(chan struct{}) + var closeOnce sync.Once + cache := setupChannelCache(t, options, &mockNode{ + streamTopLatestPubFunc: func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { + closeOnce.Do(func() { + close(doneCh) + }) + return nil, StreamPosition{}, nil + }, + }) + cache.InitState(&Publication{}, StreamPosition{}) + + select { + case <-doneCh: + case <-time.After(5 * time.Second): + require.Fail(t, "historyFunc was not called") + } +} + +func TestChannelCachePositionSyncRetry(t *testing.T) { + options := ChannelCacheOptions{ + PositionSyncInterval: 10 * time.Millisecond, + } + doneCh := make(chan struct{}) + var closeOnce sync.Once + numCalls := 0 + cache := setupChannelCache(t, options, &mockNode{ + streamTopLatestPubFunc: func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { + if numCalls == 0 { + numCalls++ + return nil, StreamPosition{}, errors.New("boom") + } + closeOnce.Do(func() { + close(doneCh) + }) + return nil, StreamPosition{}, nil + }, + }) + cache.InitState(&Publication{}, StreamPosition{}) + + select { + case <-doneCh: + case <-time.After(5 * time.Second): + require.Fail(t, "historyFunc was not called") + } +} From 32da6b5365518b85a96445a3cfdacd0f8f488365 Mon Sep 17 00:00:00 2001 From: FZambia Date: Sun, 5 May 2024 14:15:40 +0300 Subject: [PATCH 31/61] channel layer, optional delta negotiation --- _examples/compression_playground/main.go | 7 + channel_cache.go => channel_layer.go | 133 +++++++++--------- ...nel_cache_test.go => channel_layer_test.go | 28 ++-- 3 files changed, 85 insertions(+), 83 deletions(-) rename channel_cache.go => channel_layer.go (74%) rename channel_cache_test.go => channel_layer_test.go (89%) diff --git a/_examples/compression_playground/main.go b/_examples/compression_playground/main.go index 773a587d..e684930e 100644 --- a/_examples/compression_playground/main.go +++ b/_examples/compression_playground/main.go @@ -141,6 +141,13 @@ func main() { log.Println(entry.Message, entry.Fields) }, AllowedDeltaTypes: []centrifuge.DeltaType{centrifuge.DeltaTypeFossil}, + //GetChannelLayerOptions: func(channel string) (centrifuge.ChannelLayerOptions, bool) { + // return centrifuge.ChannelLayerOptions{ + // //KeepLatestPublication: true, + // //EnableQueue: true, + // //BroadcastDelay: 500 * time.Millisecond, + // }, true + //}, }) if err != nil { log.Fatal(err) diff --git a/channel_cache.go b/channel_layer.go similarity index 74% rename from channel_cache.go rename to channel_layer.go index 9da1e7c4..a2a37ec3 100644 --- a/channel_cache.go +++ b/channel_layer.go @@ -10,24 +10,25 @@ import ( "github.com/centrifugal/centrifuge/internal/timers" ) -// ChannelCacheOptions is an EXPERIMENTAL way to provide a channelCache layer options to Centrifuge. +// ChannelLayerOptions is an EXPERIMENTAL way to provide a channelLayer layer options to Centrifuge. // This is very unstable at the moment, do not use in production. -type ChannelCacheOptions struct { - // UseQueue enables queue for incoming publications. This can be useful to reduce PUB/SUB message - // processing time (as we put it into a single cache layer queue instead of each individual connection queue), - // reduce channel broadcast contention (when one channel waits for broadcast of another channel to finish), +type ChannelLayerOptions struct { + // EnableQueue for incoming publications. This can be useful to reduce PUB/SUB message processing time + // (as we put it into a single cache layer queue instead of each individual connection queue), reduce + // channel broadcast contention (when one channel waits for broadcast of another channel to finish), // and also opens a road to broadcast tweaks – such as BroadcastDelay and delta between several // publications (deltas require both BroadcastDelay and KeepLatestPublication to be enabled). - UseQueue bool + EnableQueue bool // QueueMaxSize is a maximum size of the queue used in channel cache. If zero, 16MB default is used. // If max size reached, new publications will be dropped. QueueMaxSize int - // BroadcastDelay controls delay before Publication broadcast. On time tick Centrifugo broadcasts + // BroadcastDelay controls the delay before Publication broadcast. On time tick Centrifugo broadcasts // only the latest publication in the channel if any. Useful to reduce/smooth the number of messages sent // to clients when publication contains the entire state. If zero, all publications will be sent to clients - // without delay logic involved on channel cache level. BroadcastDelay option requires (!) UseQueue to be - // enabled, as we can not afford delays during broadcast from the PUB/SUB layer. + // without delay logic involved on channel cache level. BroadcastDelay option requires (!) EnableQueue to be + // enabled, as we can not afford delays during broadcast from the PUB/SUB layer. BroadcastDelay must not be + // used in channels with positioning/recovery on. BroadcastDelay time.Duration // KeepLatestPublication enables keeping latest publication in channel cache layer. This is required @@ -35,31 +36,29 @@ type ChannelCacheOptions struct { // Probably it may be used for fast recovery also, but need to consider edge cases for races. KeepLatestPublication bool - // PositionSync when true delegates connection position checks to the channel cache. In that case check + // EnablePositionSync when true delegates connection position checks to the channel cache. In that case check // is only performed no more often than PositionSyncInterval thus reducing the load on broker in cases when // channel has many subscribers. When message loss is detected cache layer tells caller about this and also // marks all channel subscribers with insufficient state flag. By default, cache is not used for sync – in // that case each individual connection syncs position independently. - // TODO: introduce Node method to sync positions and call it from Client. - PositionSync bool + EnablePositionSync bool // PositionSyncInterval is a period of time between position sync checks. If zero, Centrifuge uses // Config.ClientChannelPositionCheckDelay as a default value. PositionSyncInterval time.Duration } -// channelCache is an optional intermediary layer between broker PUB/SUB and client connections. -// It comes with memory overhead depending on ChannelCacheOptions used, and may consume one additional -// goroutine per channel if ChannelCacheOptions.UseQueue used. At the same time it can provide significant +// channelLayer is an optional intermediary between broker PUB/SUB and client connections. It comes +// with memory overhead depending on ChannelLayerOptions used, and may consume one additional goroutine +// per channel if ChannelLayerOptions.EnableQueue used. At the same time it can provide significant // benefits in terms of overall system efficiency. // -// channelCache is initialized when first subscriber comes into channel, and dropped as soon as last subscriber -// leaves the channel on the node. This generally makes it possible to keep latest publication without TTL, but -// probably we still need to handle TTL to match broker behaviour. -type channelCache struct { +// channelLayer is initialized when first subscriber comes into channel, and dropped as soon as last +// subscriber leaves the channel on the Node. +type channelLayer struct { initialized atomic.Int64 channel string node node - options ChannelCacheOptions + options ChannelLayerOptions mu sync.RWMutex @@ -82,17 +81,17 @@ type channelCache struct { type node interface { handlePublication( channel string, pub *Publication, sp StreamPosition, delta bool, - prevPublication *Publication, bypassOffset bool, + prevPublication *Publication, ) error streamTopLatestPub(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) } -func newChannelCache( +func newChannelInterlayer( channel string, node node, - options ChannelCacheOptions, -) (*channelCache, error) { - c := &channelCache{ + options ChannelLayerOptions, +) (*channelLayer, error) { + c := &channelLayer{ channel: channel, node: node, options: options, @@ -102,12 +101,15 @@ func newChannelCache( }, positionCheckTime: time.Now().Unix(), } - if options.UseQueue { + if options.EnableQueue { c.messages = newCacheQueue(2) } - if options.BroadcastDelay > 0 && !options.UseQueue { + if options.BroadcastDelay > 0 && !options.EnableQueue { return nil, fmt.Errorf("broadcast delay can only be used with queue enabled") } + if c.options.EnableQueue { + go c.writer() + } return c, nil } @@ -119,32 +121,18 @@ type queuedPub struct { isInsufficientState bool } -func (c *channelCache) InitState(latestPublication *Publication, currentStreamPosition StreamPosition) { - if c.options.KeepLatestPublication { - c.latestPublication = latestPublication - } - c.latestStreamPosition = currentStreamPosition - c.latestQueuedStreamPosition = currentStreamPosition - if c.options.UseQueue { - go c.writer() - } - c.initialized.Store(1) -} - -func (c *channelCache) processPublication(pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) { - if c.initialized.Load() == 0 { - // Skip publications while cache is not initialized. - return - } +func (c *channelLayer) processPublication(pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) { bp := queuedPub{pub: pub, sp: sp, delta: delta, prevPub: prevPub} c.mu.Lock() c.latestQueuedStreamPosition = sp - c.positionCheckTime = c.nowTimeGetter().Unix() + c.positionCheckTime = c.nowTimeGetter().UnixNano() c.mu.Unlock() - if c.options.UseQueue { + if c.options.EnableQueue { + if c.options.QueueMaxSize > 0 && c.messages.Size() > c.options.QueueMaxSize { + return + } c.messages.Add(queuedItem{Publication: bp}) - // TODO: do we need to limit queue size here? } else { c.broadcastMu.Lock() defer c.broadcastMu.Unlock() @@ -152,13 +140,13 @@ func (c *channelCache) processPublication(pub *Publication, sp StreamPosition, d } } -func (c *channelCache) processInsufficientState(currentStreamTop StreamPosition, latestPublication *Publication) { +func (c *channelLayer) processInsufficientState(currentStreamTop StreamPosition, latestPublication *Publication) { bp := queuedPub{pub: latestPublication, sp: currentStreamTop, delta: false, isInsufficientState: true, prevPub: nil} c.mu.Lock() defer c.mu.Unlock() c.latestQueuedStreamPosition = currentStreamTop - c.positionCheckTime = c.nowTimeGetter().Unix() - if c.options.UseQueue { + c.positionCheckTime = c.nowTimeGetter().UnixNano() + if c.options.EnableQueue { // TODO: possibly support c.messages.dropQueued() for this path ? c.messages.Add(queuedItem{Publication: bp}) } else { @@ -168,8 +156,8 @@ func (c *channelCache) processInsufficientState(currentStreamTop StreamPosition, } } -func (c *channelCache) broadcast(qp queuedPub) { - bypassOffset := c.options.BroadcastDelay > 0 && !qp.isInsufficientState +func (c *channelLayer) broadcast(qp queuedPub) { + //bypassOffset := c.options.BroadcastDelay > 0 && !qp.isInsufficientState pubToBroadcast := qp.pub spToBroadcast := qp.sp if qp.isInsufficientState { @@ -180,26 +168,30 @@ func (c *channelCache) broadcast(qp queuedPub) { } prevPub := qp.prevPub - if c.options.KeepLatestPublication && c.options.BroadcastDelay > 0 { + if qp.delta && c.options.KeepLatestPublication { prevPub = c.latestPublication } delta := qp.delta if c.options.BroadcastDelay > 0 && !c.options.KeepLatestPublication { delta = false } + if qp.isInsufficientState { + delta = false + prevPub = nil + } _ = c.node.handlePublication( - c.channel, pubToBroadcast, spToBroadcast, delta, prevPub, bypassOffset) + c.channel, pubToBroadcast, spToBroadcast, delta, prevPub) c.mu.Lock() defer c.mu.Unlock() if qp.sp.Offset > c.latestStreamPosition.Offset { c.latestStreamPosition = qp.sp - if c.options.KeepLatestPublication { - c.latestPublication = qp.pub - } + } + if c.options.KeepLatestPublication { + c.latestPublication = qp.pub } } -func (c *channelCache) writer() { +func (c *channelLayer) writer() { for { if ok := c.waitSendPub(c.options.BroadcastDelay); !ok { return @@ -207,7 +199,7 @@ func (c *channelCache) writer() { } } -func (c *channelCache) waitSendPub(delay time.Duration) bool { +func (c *channelLayer) waitSendPub(delay time.Duration) bool { // Wait for message from the queue. ok := c.messages.Wait() if !ok { @@ -252,7 +244,7 @@ func (c *channelCache) waitSendPub(delay time.Duration) bool { return true } -func (c *channelCache) CheckPosition(historyMetaTTL time.Duration) bool { +func (c *channelLayer) CheckPosition(historyMetaTTL time.Duration, clientPosition StreamPosition) bool { nowUnixNano := c.nowTimeGetter().UnixNano() c.mu.RLock() needCheckPosition := nowUnixNano-c.positionCheckTime >= c.options.PositionSyncInterval.Nanoseconds() @@ -260,7 +252,7 @@ func (c *channelCache) CheckPosition(historyMetaTTL time.Duration) bool { if !needCheckPosition { return true } - latestPublication, streamTop, validPosition := c.checkPositionWithRetry(historyMetaTTL) + latestPublication, streamTop, validPosition := c.checkPositionWithRetry(historyMetaTTL, clientPosition) c.mu.Lock() c.positionCheckTime = nowUnixNano c.mu.Unlock() @@ -270,16 +262,16 @@ func (c *channelCache) CheckPosition(historyMetaTTL time.Duration) bool { return validPosition } -func (c *channelCache) checkPositionWithRetry(historyMetaTTL time.Duration) (*Publication, StreamPosition, bool) { - latestPub, sp, validPosition := c.checkPositionOnce(historyMetaTTL) +func (c *channelLayer) checkPositionWithRetry(historyMetaTTL time.Duration, clientPosition StreamPosition) (*Publication, StreamPosition, bool) { + latestPub, sp, validPosition := c.checkPositionOnce(historyMetaTTL, clientPosition) if !validPosition { - return c.checkPositionOnce(historyMetaTTL) + return c.checkPositionOnce(historyMetaTTL, clientPosition) } return latestPub, sp, true } -func (c *channelCache) checkPositionOnce(historyMetaTTL time.Duration) (*Publication, StreamPosition, bool) { - latestPublication, currentStreamPosition, err := c.node.streamTopLatestPub(c.channel, historyMetaTTL) +func (c *channelLayer) checkPositionOnce(historyMetaTTL time.Duration, clientPosition StreamPosition) (*Publication, StreamPosition, bool) { + latestPublication, streamTop, err := c.node.streamTopLatestPub(c.channel, historyMetaTTL) if err != nil { // Will result into position check later. return nil, StreamPosition{}, true @@ -287,11 +279,14 @@ func (c *channelCache) checkPositionOnce(historyMetaTTL time.Duration) (*Publica c.mu.Lock() defer c.mu.Unlock() position := c.latestQueuedStreamPosition - isValidPosition := currentStreamPosition.Epoch == position.Epoch && position.Offset == currentStreamPosition.Offset - return latestPublication, currentStreamPosition, isValidPosition + if position.Offset == 0 { + position = clientPosition + } + isValidPosition := streamTop.Epoch == position.Epoch && position.Offset == streamTop.Offset + return latestPublication, streamTop, isValidPosition } -func (c *channelCache) close() { +func (c *channelLayer) close() { close(c.closeCh) } diff --git a/channel_cache_test.go b/channel_layer_test.go similarity index 89% rename from channel_cache_test.go rename to channel_layer_test.go index 9f919763..0e91f515 100644 --- a/channel_cache_test.go +++ b/channel_layer_test.go @@ -11,11 +11,11 @@ import ( "github.com/stretchr/testify/require" ) -// Helper function to create a channelCache with options. -func setupChannelCache(t testing.TB, options ChannelCacheOptions, node node) *channelCache { +// Helper function to create a channelLayer with options. +func setupChannelCache(t testing.TB, options ChannelLayerOptions, node node) *channelLayer { t.Helper() channel := "testChannel" - cache, err := newChannelCache(channel, node, options) + cache, err := newChannelInterlayer(channel, node, options) if err != nil { require.NoError(t, err) } @@ -43,8 +43,8 @@ func (m *mockNode) streamTopLatestPub(ch string, historyMetaTTL time.Duration) ( } func TestChannelCacheInitialization(t *testing.T) { - options := ChannelCacheOptions{ - UseQueue: true, + options := ChannelLayerOptions{ + EnableQueue: true, KeepLatestPublication: true, BroadcastDelay: 10 * time.Millisecond, PositionSyncInterval: 1 * time.Second, @@ -59,22 +59,22 @@ func TestChannelCacheInitialization(t *testing.T) { } func TestChannelCacheHandlePublication(t *testing.T) { - optionSet := []ChannelCacheOptions{ + optionSet := []ChannelLayerOptions{ { - UseQueue: false, + EnableQueue: false, KeepLatestPublication: false, }, { - UseQueue: true, + EnableQueue: true, KeepLatestPublication: false, }, { - UseQueue: true, + EnableQueue: true, KeepLatestPublication: false, BroadcastDelay: 10 * time.Millisecond, }, { - UseQueue: true, + EnableQueue: true, KeepLatestPublication: true, BroadcastDelay: 10 * time.Millisecond, }, @@ -107,8 +107,8 @@ func TestChannelCacheHandlePublication(t *testing.T) { } func TestChannelCacheInsufficientState(t *testing.T) { - options := ChannelCacheOptions{ - UseQueue: true, + options := ChannelLayerOptions{ + EnableQueue: true, KeepLatestPublication: true, } doneCh := make(chan struct{}) @@ -134,7 +134,7 @@ func TestChannelCacheInsufficientState(t *testing.T) { } func TestChannelCachePositionSync(t *testing.T) { - options := ChannelCacheOptions{ + options := ChannelLayerOptions{ PositionSyncInterval: 10 * time.Millisecond, } doneCh := make(chan struct{}) @@ -157,7 +157,7 @@ func TestChannelCachePositionSync(t *testing.T) { } func TestChannelCachePositionSyncRetry(t *testing.T) { - options := ChannelCacheOptions{ + options := ChannelLayerOptions{ PositionSyncInterval: 10 * time.Millisecond, } doneCh := make(chan struct{}) From 9a59a69f514a46d8135c1e736c4a3bb2ed861faf Mon Sep 17 00:00:00 2001 From: FZambia Date: Sun, 5 May 2024 14:15:47 +0300 Subject: [PATCH 32/61] channel layer, optional delta negotiation --- _examples/chat_json/index.html | 4 +- _examples/chat_json/main.go | 49 +++++++----- _examples/go.mod | 2 +- _examples/go.sum | 4 +- channel_layer.go | 127 +++++++++++++++--------------- channel_layer_test.go | 73 ++++++++---------- client.go | 81 +++++++++++++------- client_test.go | 136 ++++++++++++++++----------------- config.go | 9 ++- go.mod | 2 +- go.sum | 4 +- hub.go | 2 + node.go | 89 +++++++++++++++++++++ 13 files changed, 352 insertions(+), 230 deletions(-) diff --git a/_examples/chat_json/index.html b/_examples/chat_json/index.html index 0369eb1e..3293d75e 100644 --- a/_examples/chat_json/index.html +++ b/_examples/chat_json/index.html @@ -224,7 +224,9 @@ // subscribe on channel and bind various event listeners. Actual // subscription request will be sent after client connects to // a server. - const sub = centrifuge.newSubscription(channel, {delta: 'fossil'}); + const sub = centrifuge.newSubscription(channel, { + delta: 'fossil', + }); sub.on("publication", handlePublication) .on("join", handleJoin) diff --git a/_examples/chat_json/main.go b/_examples/chat_json/main.go index 0af1f143..bd0f1d7a 100644 --- a/_examples/chat_json/main.go +++ b/_examples/chat_json/main.go @@ -68,9 +68,17 @@ func channelSubscribeAllowed(channel string) bool { func main() { node, _ := centrifuge.New(centrifuge.Config{ - LogLevel: centrifuge.LogLevelInfo, - LogHandler: handleLog, - HistoryMetaTTL: 24 * time.Hour, + LogLevel: centrifuge.LogLevelInfo, + LogHandler: handleLog, + HistoryMetaTTL: 24 * time.Hour, + AllowedDeltaTypes: []centrifuge.DeltaType{centrifuge.DeltaTypeFossil}, + GetChannelLayerOptions: func(channel string) (centrifuge.ChannelLayerOptions, bool) { + return centrifuge.ChannelLayerOptions{ + KeepLatestPublication: true, + EnableQueue: true, + BroadcastDelay: time.Second, + }, true + }, }) node.OnConnecting(func(ctx context.Context, e centrifuge.ConnectEvent) (centrifuge.ConnectReply, error) { @@ -160,7 +168,6 @@ func main() { e.Channel, data, centrifuge.WithHistory(300, time.Minute), centrifuge.WithClientInfo(e.ClientInfo), - centrifuge.WithDelta(true), ) cb(centrifuge.PublishReply{Result: &result}, err) @@ -220,22 +227,23 @@ func main() { // } //}() // - //go func() { - // // Publish to channel periodically. - // i := 1 - // for { - // _, err := node.Publish( - // "chat:index", - // []byte(`{"input": "Publish from server `+strconv.Itoa(i)+`"}`), - // centrifuge.WithHistory(300, time.Minute), - // ) - // if err != nil { - // log.Printf("error publishing to channel: %s", err) - // } - // i++ - // time.Sleep(10000 * time.Millisecond) - // } - //}() + go func() { + // Publish to channel periodically. + i := 1 + for { + _, err := node.Publish( + "chat:index", + []byte(`{"input": "Publish from server `+strconv.Itoa(i)+`"}`), + centrifuge.WithHistory(300, time.Minute), + centrifuge.WithDelta(true), + ) + if err != nil { + log.Printf("error publishing to channel: %s", err) + } + i++ + time.Sleep(300 * time.Millisecond) + } + }() mux := http.DefaultServeMux @@ -255,6 +263,7 @@ func main() { WriteTimeout: 10 * time.Second, } + log.Print("Starting server, visit http://localhost:8000") go func() { if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { log.Fatal(err) diff --git a/_examples/go.mod b/_examples/go.mod index 39672dd0..a7c3f09e 100644 --- a/_examples/go.mod +++ b/_examples/go.mod @@ -7,7 +7,7 @@ replace github.com/centrifugal/centrifuge => ../ require ( github.com/FZambia/tarantool v0.2.2 github.com/centrifugal/centrifuge v0.8.2 - github.com/centrifugal/protocol v0.12.2-0.20240429145950-b906e73562fe + github.com/centrifugal/protocol v0.12.2-0.20240505100351-eb69a0b95e15 github.com/cristalhq/jwt/v5 v5.4.0 github.com/dchest/uniuri v1.2.0 github.com/gin-contrib/sessions v0.0.3 diff --git a/_examples/go.sum b/_examples/go.sum index 85737a09..7d2cfc00 100644 --- a/_examples/go.sum +++ b/_examples/go.sum @@ -14,8 +14,8 @@ github.com/bradleypeabody/gorilla-sessions-memcache v0.0.0-20181103040241-659414 github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= -github.com/centrifugal/protocol v0.12.2-0.20240429145950-b906e73562fe h1:uXsl6MWJZMlk42wfVDOK0z596x9JqmkvAqlWzQ830qU= -github.com/centrifugal/protocol v0.12.2-0.20240429145950-b906e73562fe/go.mod h1:lM54PGU/u5WupYSb755Zv6tZ2ju1SqNKCp6A4s0DeG4= +github.com/centrifugal/protocol v0.12.2-0.20240505100351-eb69a0b95e15 h1:JpdbAUIZwdHB0xc9371SXJEqByEDs/eZ93RcWSqFAoo= +github.com/centrifugal/protocol v0.12.2-0.20240505100351-eb69a0b95e15/go.mod h1:lM54PGU/u5WupYSb755Zv6tZ2ju1SqNKCp6A4s0DeG4= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= diff --git a/channel_layer.go b/channel_layer.go index a2a37ec3..518fbc7e 100644 --- a/channel_layer.go +++ b/channel_layer.go @@ -4,14 +4,18 @@ import ( "fmt" "math" "sync" - "sync/atomic" "time" "github.com/centrifugal/centrifuge/internal/timers" ) -// ChannelLayerOptions is an EXPERIMENTAL way to provide a channelLayer layer options to Centrifuge. -// This is very unstable at the moment, do not use in production. +// ChannelLayerOptions is an EXPERIMENTAL way to enable using a channel layer in Centrifuge. +// Note, channel layer is very unstable at the moment – do not use it in production! +// Channel layer is an optional per-channel intermediary between Broker PUB/SUB and Client connections. +// This intermediary layer may be used for various per-channel tweaks and optimizations. Channel layer +// comes with memory overhead depending on ChannelLayerOptions, and may consume one additional goroutine +// per channel if ChannelLayerOptions.EnableQueue is used. At the same time it can provide significant +// benefits in terms of overall system efficiency and flexibility. type ChannelLayerOptions struct { // EnableQueue for incoming publications. This can be useful to reduce PUB/SUB message processing time // (as we put it into a single cache layer queue instead of each individual connection queue), reduce @@ -19,8 +23,8 @@ type ChannelLayerOptions struct { // and also opens a road to broadcast tweaks – such as BroadcastDelay and delta between several // publications (deltas require both BroadcastDelay and KeepLatestPublication to be enabled). EnableQueue bool - // QueueMaxSize is a maximum size of the queue used in channel cache. If zero, 16MB default is used. - // If max size reached, new publications will be dropped. + // QueueMaxSize is a maximum size of the queue used in channel cache (in bytes). If zero, 16MB default + // is used. If max size reached, new publications will be dropped. QueueMaxSize int // BroadcastDelay controls the delay before Publication broadcast. On time tick Centrifugo broadcasts @@ -37,52 +41,40 @@ type ChannelLayerOptions struct { KeepLatestPublication bool // EnablePositionSync when true delegates connection position checks to the channel cache. In that case check - // is only performed no more often than PositionSyncInterval thus reducing the load on broker in cases when - // channel has many subscribers. When message loss is detected cache layer tells caller about this and also - // marks all channel subscribers with insufficient state flag. By default, cache is not used for sync – in - // that case each individual connection syncs position independently. + // is only performed no more often than once in Config.ClientChannelPositionCheckDelay thus reducing the load + // on broker in cases when channel has many subscribers. When message loss is detected cache layer tells caller + // about this and also marks all channel subscribers with insufficient state flag. By default, cache is not used + // for sync – in that case each individual connection syncs position independently. EnablePositionSync bool - // PositionSyncInterval is a period of time between position sync checks. If zero, Centrifuge uses - // Config.ClientChannelPositionCheckDelay as a default value. - PositionSyncInterval time.Duration } -// channelLayer is an optional intermediary between broker PUB/SUB and client connections. It comes -// with memory overhead depending on ChannelLayerOptions used, and may consume one additional goroutine -// per channel if ChannelLayerOptions.EnableQueue used. At the same time it can provide significant -// benefits in terms of overall system efficiency. -// +// Keep global to not allocate per-channel. Must be only changed by tests. +var channelLayerTimeNow = time.Now + // channelLayer is initialized when first subscriber comes into channel, and dropped as soon as last // subscriber leaves the channel on the Node. type channelLayer struct { - initialized atomic.Int64 - channel string - node node - options ChannelLayerOptions - - mu sync.RWMutex + channel string + node node + options ChannelLayerOptions + mu sync.RWMutex + closeCh chan struct{} + // optional queue for publications. messages *cacheQueue broadcastMu sync.Mutex // When queue is not used need to protect broadcast method from concurrent execution. - - closeCh chan struct{} - // latestPublication is an initial publication in channel or publication last sent to connections. latestPublication *Publication // latestStreamPosition is an initial stream position or stream position lastly sent. latestStreamPosition StreamPosition // latestQueuedStreamPosition is a stream position of the latest queued publication. latestQueuedStreamPosition StreamPosition - + // positionCheckTime is a time (Unix Nanoseconds) when last position check was performed. positionCheckTime int64 - nowTimeGetter func() time.Time } type node interface { - handlePublication( - channel string, pub *Publication, sp StreamPosition, delta bool, - prevPublication *Publication, - ) error + handlePublication(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication) error streamTopLatestPub(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) } @@ -92,14 +84,11 @@ func newChannelInterlayer( options ChannelLayerOptions, ) (*channelLayer, error) { c := &channelLayer{ - channel: channel, - node: node, - options: options, - closeCh: make(chan struct{}), - nowTimeGetter: func() time.Time { - return time.Now() - }, - positionCheckTime: time.Now().Unix(), + channel: channel, + node: node, + options: options, + closeCh: make(chan struct{}), + positionCheckTime: channelLayerTimeNow().UnixNano(), } if options.EnableQueue { c.messages = newCacheQueue(2) @@ -121,15 +110,21 @@ type queuedPub struct { isInsufficientState bool } -func (c *channelLayer) processPublication(pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) { +const defaultChannelLayerQueueMaxSize = 16 * 1024 * 1024 + +func (c *channelLayer) broadcastPublication(pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) { bp := queuedPub{pub: pub, sp: sp, delta: delta, prevPub: prevPub} c.mu.Lock() c.latestQueuedStreamPosition = sp - c.positionCheckTime = c.nowTimeGetter().UnixNano() + c.positionCheckTime = channelLayerTimeNow().UnixNano() c.mu.Unlock() if c.options.EnableQueue { - if c.options.QueueMaxSize > 0 && c.messages.Size() > c.options.QueueMaxSize { + queueMaxSize := defaultChannelLayerQueueMaxSize + if c.options.QueueMaxSize > 0 { + queueMaxSize = c.options.QueueMaxSize + } + if c.messages.Size() > queueMaxSize { return } c.messages.Add(queuedItem{Publication: bp}) @@ -140,12 +135,12 @@ func (c *channelLayer) processPublication(pub *Publication, sp StreamPosition, d } } -func (c *channelLayer) processInsufficientState(currentStreamTop StreamPosition, latestPublication *Publication) { +func (c *channelLayer) broadcastInsufficientState(currentStreamTop StreamPosition, latestPublication *Publication) { bp := queuedPub{pub: latestPublication, sp: currentStreamTop, delta: false, isInsufficientState: true, prevPub: nil} c.mu.Lock() - defer c.mu.Unlock() c.latestQueuedStreamPosition = currentStreamTop - c.positionCheckTime = c.nowTimeGetter().UnixNano() + c.positionCheckTime = channelLayerTimeNow().UnixNano() + c.mu.Unlock() if c.options.EnableQueue { // TODO: possibly support c.messages.dropQueued() for this path ? c.messages.Add(queuedItem{Publication: bp}) @@ -157,7 +152,6 @@ func (c *channelLayer) processInsufficientState(currentStreamTop StreamPosition, } func (c *channelLayer) broadcast(qp queuedPub) { - //bypassOffset := c.options.BroadcastDelay > 0 && !qp.isInsufficientState pubToBroadcast := qp.pub spToBroadcast := qp.sp if qp.isInsufficientState { @@ -244,37 +238,40 @@ func (c *channelLayer) waitSendPub(delay time.Duration) bool { return true } -func (c *channelLayer) CheckPosition(historyMetaTTL time.Duration, clientPosition StreamPosition) bool { - nowUnixNano := c.nowTimeGetter().UnixNano() - c.mu.RLock() - needCheckPosition := nowUnixNano-c.positionCheckTime >= c.options.PositionSyncInterval.Nanoseconds() - c.mu.RUnlock() +func (c *channelLayer) CheckPosition(historyMetaTTL time.Duration, clientPosition StreamPosition, checkDelay time.Duration) bool { + nowUnixNano := channelLayerTimeNow().UnixNano() + c.mu.Lock() + needCheckPosition := nowUnixNano-c.positionCheckTime >= checkDelay.Nanoseconds() + if needCheckPosition { + c.positionCheckTime = nowUnixNano + } + c.mu.Unlock() if !needCheckPosition { return true } - latestPublication, streamTop, validPosition := c.checkPositionWithRetry(historyMetaTTL, clientPosition) - c.mu.Lock() - c.positionCheckTime = nowUnixNano - c.mu.Unlock() + latestPublication, streamTop, validPosition, err := c.checkPositionWithRetry(historyMetaTTL, clientPosition) + if err != nil { + // Will be checked later. + return true + } if !validPosition { - c.processInsufficientState(streamTop, latestPublication) + c.broadcastInsufficientState(streamTop, latestPublication) } return validPosition } -func (c *channelLayer) checkPositionWithRetry(historyMetaTTL time.Duration, clientPosition StreamPosition) (*Publication, StreamPosition, bool) { - latestPub, sp, validPosition := c.checkPositionOnce(historyMetaTTL, clientPosition) - if !validPosition { +func (c *channelLayer) checkPositionWithRetry(historyMetaTTL time.Duration, clientPosition StreamPosition) (*Publication, StreamPosition, bool, error) { + latestPub, sp, validPosition, err := c.checkPositionOnce(historyMetaTTL, clientPosition) + if err != nil || !validPosition { return c.checkPositionOnce(historyMetaTTL, clientPosition) } - return latestPub, sp, true + return latestPub, sp, validPosition, err } -func (c *channelLayer) checkPositionOnce(historyMetaTTL time.Duration, clientPosition StreamPosition) (*Publication, StreamPosition, bool) { +func (c *channelLayer) checkPositionOnce(historyMetaTTL time.Duration, clientPosition StreamPosition) (*Publication, StreamPosition, bool, error) { latestPublication, streamTop, err := c.node.streamTopLatestPub(c.channel, historyMetaTTL) if err != nil { - // Will result into position check later. - return nil, StreamPosition{}, true + return nil, StreamPosition{}, false, err } c.mu.Lock() defer c.mu.Unlock() @@ -283,7 +280,7 @@ func (c *channelLayer) checkPositionOnce(historyMetaTTL time.Duration, clientPos position = clientPosition } isValidPosition := streamTop.Epoch == position.Epoch && position.Offset == streamTop.Offset - return latestPublication, streamTop, isValidPosition + return latestPublication, streamTop, isValidPosition, nil } func (c *channelLayer) close() { diff --git a/channel_layer_test.go b/channel_layer_test.go index 0e91f515..e9674a2a 100644 --- a/channel_layer_test.go +++ b/channel_layer_test.go @@ -12,7 +12,7 @@ import ( ) // Helper function to create a channelLayer with options. -func setupChannelCache(t testing.TB, options ChannelLayerOptions, node node) *channelLayer { +func setupChannelLayer(t testing.TB, options ChannelLayerOptions, node node) *channelLayer { t.Helper() channel := "testChannel" cache, err := newChannelInterlayer(channel, node, options) @@ -24,13 +24,13 @@ func setupChannelCache(t testing.TB, options ChannelLayerOptions, node node) *ch type mockNode struct { // Store function outputs and any state needed for testing - handlePublicationFunc func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error + handlePublicationFunc func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication) error streamTopLatestPubFunc func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) } -func (m *mockNode) handlePublication(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error { +func (m *mockNode) handlePublication(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication) error { if m.handlePublicationFunc != nil { - return m.handlePublicationFunc(channel, pub, sp, delta, prevPublication, bypassOffset) + return m.handlePublicationFunc(channel, pub, sp, delta, prevPublication) } return nil } @@ -42,23 +42,7 @@ func (m *mockNode) streamTopLatestPub(ch string, historyMetaTTL time.Duration) ( return nil, StreamPosition{}, nil } -func TestChannelCacheInitialization(t *testing.T) { - options := ChannelLayerOptions{ - EnableQueue: true, - KeepLatestPublication: true, - BroadcastDelay: 10 * time.Millisecond, - PositionSyncInterval: 1 * time.Second, - } - cache := setupChannelCache(t, options, &mockNode{}) - - require.NotNil(t, cache) - require.NotNil(t, cache.messages) - require.Equal(t, int64(0), cache.initialized.Load()) - cache.InitState(&Publication{}, StreamPosition{1, "epoch"}) - require.Equal(t, int64(1), cache.initialized.Load()) -} - -func TestChannelCacheHandlePublication(t *testing.T) { +func TestChannelLayerHandlePublication(t *testing.T) { optionSet := []ChannelLayerOptions{ { EnableQueue: false, @@ -84,18 +68,17 @@ func TestChannelCacheHandlePublication(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) { doneCh := make(chan struct{}) - cache := setupChannelCache(t, options, &mockNode{ - handlePublicationFunc: func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error { + cache := setupChannelLayer(t, options, &mockNode{ + handlePublicationFunc: func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication) error { close(doneCh) return nil }, }) - cache.InitState(&Publication{}, StreamPosition{}) pub := &Publication{Data: []byte("test data")} sp := StreamPosition{Offset: 1} - cache.processPublication(pub, sp, false, nil) + cache.broadcastPublication(pub, sp, false, nil) select { case <-doneCh: @@ -106,25 +89,23 @@ func TestChannelCacheHandlePublication(t *testing.T) { } } -func TestChannelCacheInsufficientState(t *testing.T) { +func TestChannelLayerInsufficientState(t *testing.T) { options := ChannelLayerOptions{ EnableQueue: true, KeepLatestPublication: true, } doneCh := make(chan struct{}) - cache := setupChannelCache(t, options, &mockNode{ - handlePublicationFunc: func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication, bypassOffset bool) error { + cache := setupChannelLayer(t, options, &mockNode{ + handlePublicationFunc: func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication) error { require.Equal(t, uint64(math.MaxUint64), pub.Offset) require.Equal(t, uint64(math.MaxUint64), sp.Offset) - require.False(t, bypassOffset) close(doneCh) return nil }, }) - cache.InitState(&Publication{}, StreamPosition{}) // Simulate the behavior when the state is marked as insufficient - cache.processInsufficientState(StreamPosition{Offset: 2}, &Publication{}) + cache.broadcastInsufficientState(StreamPosition{Offset: 2}, &Publication{}) select { case <-doneCh: @@ -133,13 +114,13 @@ func TestChannelCacheInsufficientState(t *testing.T) { } } -func TestChannelCachePositionSync(t *testing.T) { +func TestChannelLayerPositionSync(t *testing.T) { options := ChannelLayerOptions{ - PositionSyncInterval: 10 * time.Millisecond, + EnablePositionSync: true, } doneCh := make(chan struct{}) var closeOnce sync.Once - cache := setupChannelCache(t, options, &mockNode{ + layer := setupChannelLayer(t, options, &mockNode{ streamTopLatestPubFunc: func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { closeOnce.Do(func() { close(doneCh) @@ -147,8 +128,12 @@ func TestChannelCachePositionSync(t *testing.T) { return nil, StreamPosition{}, nil }, }) - cache.InitState(&Publication{}, StreamPosition{}) - + originalGetter := channelLayerTimeNow + channelLayerTimeNow = func() time.Time { + return time.Now().Add(time.Hour) + } + layer.CheckPosition(time.Second, StreamPosition{Offset: 1, Epoch: "test"}, time.Second) + channelLayerTimeNow = originalGetter select { case <-doneCh: case <-time.After(5 * time.Second): @@ -156,14 +141,14 @@ func TestChannelCachePositionSync(t *testing.T) { } } -func TestChannelCachePositionSyncRetry(t *testing.T) { +func TestChannelLayerPositionSyncRetry(t *testing.T) { options := ChannelLayerOptions{ - PositionSyncInterval: 10 * time.Millisecond, + EnablePositionSync: true, } doneCh := make(chan struct{}) var closeOnce sync.Once numCalls := 0 - cache := setupChannelCache(t, options, &mockNode{ + layer := setupChannelLayer(t, options, &mockNode{ streamTopLatestPubFunc: func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { if numCalls == 0 { numCalls++ @@ -175,11 +160,15 @@ func TestChannelCachePositionSyncRetry(t *testing.T) { return nil, StreamPosition{}, nil }, }) - cache.InitState(&Publication{}, StreamPosition{}) - + originalGetter := channelLayerTimeNow + channelLayerTimeNow = func() time.Time { + return time.Now().Add(time.Hour) + } + layer.CheckPosition(time.Second, StreamPosition{Offset: 1, Epoch: "test"}, time.Second) + channelLayerTimeNow = originalGetter select { case <-doneCh: case <-time.After(5 * time.Second): - require.Fail(t, "historyFunc was not called") + require.Fail(t, "streamTopLatestPubFunc was not called") } } diff --git a/client.go b/client.go index d41065c6..aa1e7465 100644 --- a/client.go +++ b/client.go @@ -4,12 +4,13 @@ import ( "context" "errors" "fmt" - "github.com/centrifugal/centrifuge/internal/convert" "io" + "math" "slices" "sync" "time" + "github.com/centrifugal/centrifuge/internal/convert" "github.com/centrifugal/centrifuge/internal/queue" "github.com/centrifugal/centrifuge/internal/recovery" "github.com/centrifugal/centrifuge/internal/saferand" @@ -740,16 +741,6 @@ func (c *Client) checkPosition(checkDelay time.Duration, ch string, chCtx Channe historyMetaTTL = time.Duration(chCtx.metaTTLSeconds) * time.Second } - streamTop, err := c.node.streamTop(ch, historyMetaTTL) - if err != nil { - // Check later. - return true - } - - return c.isValidPosition(streamTop, nowUnix, ch) -} - -func (c *Client) isValidPosition(streamTop StreamPosition, nowUnix int64, ch string) bool { c.mu.Lock() if c.status == statusClosed { c.mu.Unlock() @@ -763,18 +754,20 @@ func (c *Client) isValidPosition(streamTop StreamPosition, nowUnix int64, ch str position := chCtx.streamPosition c.mu.Unlock() - isValidPosition := streamTop.Epoch == position.Epoch && position.Offset >= streamTop.Offset - if isValidPosition { + validPosition, err := c.node.checkPosition(ch, position, historyMetaTTL) + if err != nil { + // Check later. + return true + } + if validPosition { c.mu.Lock() if chContext, ok := c.channels[ch]; ok { chContext.positionCheckTime = nowUnix c.channels[ch] = chContext } c.mu.Unlock() - return true } - - return false + return validPosition } // ID returns unique client connection id. @@ -1618,13 +1611,10 @@ func (c *Client) handleSubscribe(req *protocol.SubscribeRequest, cmd *protocol.C } if req.Delta != "" { - dt, ok := stringToDeltaType[req.Delta] + _, ok := stringToDeltaType[req.Delta] if !ok { return c.logDisconnectBadRequest("unknown delta type in subscribe request: " + req.Delta) } - if !slices.Contains(c.node.config.AllowedDeltaTypes, dt) { - return c.logDisconnectBadRequest("disabled delta type in subscribe request: " + req.Delta) - } } replyError, disconnect := c.validateSubscribeRequest(req) @@ -2763,9 +2753,11 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep sub := subInfo{client: c, deltaType: ""} if req.Delta != "" { - if dt, deltaFound := stringToDeltaType[req.Delta]; deltaFound { - sub.deltaType = dt + dt := DeltaType(req.Delta) + if slices.Contains(c.node.config.AllowedDeltaTypes, dt) { + res.Delta = true } + sub.deltaType = dt } err := c.node.addSubscription(channel, sub) if err != nil { @@ -2919,7 +2911,7 @@ func (c *Client) subscribeCmd(req *protocol.SubscribeRequest, reply SubscribeRep if res.Recovered { // Only append recovered publications in case continuity in a channel can be achieved. - if req.Delta == string(DeltaTypeFossil) { + if res.Delta && req.Delta == string(DeltaTypeFossil) { res.Publications = c.makeRecoveredPubsDeltaFossil(recoveredPubs) // Allow delta for the following real-time publications since recovery is successful // and makeRecoveredPubsDeltaFossil already created publication with base data if required. @@ -3101,15 +3093,30 @@ func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publica c.mu.Unlock() return nil } + deltaAllowed := channelHasFlag(channelContext.flags, flagDeltaAllowed) if !channelHasFlag(channelContext.flags, flagPositioning) { if hasFlag(c.transport.DisabledPushFlags(), PushFlagPublication) { c.mu.Unlock() return nil } c.mu.Unlock() + if pub.Offset == math.MaxUint64 { + // This is a special pub to trigger insufficient state. + return nil + } + if data.delta && deltaAllowed { + return c.transportEnqueue(data.deltaData, ch, protocol.FrameTypePushPublication) + } + if !deltaAllowed { + c.mu.Lock() + if chCtx, chCtxOK := c.channels[ch]; chCtxOK { + chCtx.flags |= flagDeltaAllowed + c.channels[ch] = chCtx + } + c.mu.Unlock() + } return c.transportEnqueue(data.data, ch, protocol.FrameTypePushPublication) } - deltaAllowed := channelHasFlag(channelContext.flags, flagDeltaAllowed) serverSide := channelHasFlag(channelContext.flags, flagServerSide) currentPositionOffset := channelContext.streamPosition.Offset nextExpectedOffset := currentPositionOffset + 1 @@ -3140,7 +3147,7 @@ func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publica if hasFlag(c.transport.DisabledPushFlags(), PushFlagPublication) { return nil } - if deltaAllowed { + if data.delta && deltaAllowed { return c.transportEnqueue(data.deltaData, ch, protocol.FrameTypePushPublication) } if !deltaAllowed { @@ -3155,7 +3162,7 @@ func (c *Client) writePublicationUpdatePosition(ch string, pub *protocol.Publica } func (c *Client) writePublicationNoDelta(ch string, pub *protocol.Publication, data []byte, sp StreamPosition) error { - return c.writePublication(ch, pub, dataValue{data: data, deltaData: data}, sp) + return c.writePublication(ch, pub, dataValue{data: data, deltaData: nil, delta: false}, sp) } func (c *Client) writePublication(ch string, pub *protocol.Publication, data dataValue, sp StreamPosition) error { @@ -3166,6 +3173,28 @@ func (c *Client) writePublication(ch string, pub *protocol.Publication, data dat if hasFlag(c.transport.DisabledPushFlags(), PushFlagPublication) { return nil } + + if data.delta { + c.mu.RLock() + channelContext, ok := c.channels[ch] + if !ok { + c.mu.RUnlock() + return nil + } + deltaAllowed := channelHasFlag(channelContext.flags, flagDeltaAllowed) + c.mu.RUnlock() + + if deltaAllowed { + return c.transportEnqueue(data.deltaData, ch, protocol.FrameTypePushPublication) + } else { + c.mu.Lock() + if chCtx, chCtxOK := c.channels[ch]; chCtxOK { + chCtx.flags |= flagDeltaAllowed + c.channels[ch] = chCtx + } + c.mu.Unlock() + } + } return c.transportEnqueue(data.data, ch, protocol.FrameTypePushPublication) } c.pubSubSync.SyncPublication(ch, pub, func() { diff --git a/client_test.go b/client_test.go index dda50799..ee54cf6c 100644 --- a/client_test.go +++ b/client_test.go @@ -3042,74 +3042,74 @@ func TestClientCheckPosition(t *testing.T) { require.True(t, got) } -func TestClientIsValidPosition(t *testing.T) { - node := defaultTestNode() - defer func() { _ = node.Shutdown(context.Background()) }() - - client := newTestClient(t, node, "42") - - node.mu.Lock() - node.nowTimeGetter = func() time.Time { - return time.Unix(200, 0) - } - node.mu.Unlock() - - client.channels = map[string]ChannelContext{ - "example": { - flags: flagSubscribed, - positionCheckTime: 50, - streamPosition: StreamPosition{ - Offset: 20, - Epoch: "test", - }, - }, - } - - got := client.isValidPosition(StreamPosition{ - Offset: 20, - Epoch: "test", - }, 200, "example") - require.True(t, got) - require.Equal(t, int64(200), client.channels["example"].positionCheckTime) - - got = client.isValidPosition(StreamPosition{ - Offset: 19, - Epoch: "test", - }, 210, "example") - require.True(t, got) - require.Equal(t, int64(210), client.channels["example"].positionCheckTime) - - got = client.isValidPosition(StreamPosition{ - Offset: 21, - Epoch: "test", - }, 220, "example") - require.False(t, got) - require.Equal(t, int64(210), client.channels["example"].positionCheckTime) - - client.channels = map[string]ChannelContext{ - "example": { - positionCheckTime: 50, - streamPosition: StreamPosition{ - Offset: 20, - Epoch: "test", - }, - }, - } - // no subscribed flag. - got = client.isValidPosition(StreamPosition{ - Offset: 21, - Epoch: "test", - }, 220, "example") - require.True(t, got) - - _ = client.close(DisconnectConnectionClosed) - // closed client. - got = client.isValidPosition(StreamPosition{ - Offset: 21, - Epoch: "test", - }, 220, "example") - require.True(t, got) -} +//func TestClientIsValidPosition(t *testing.T) { +// node := defaultTestNode() +// defer func() { _ = node.Shutdown(context.Background()) }() +// +// client := newTestClient(t, node, "42") +// +// node.mu.Lock() +// node.nowTimeGetter = func() time.Time { +// return time.Unix(200, 0) +// } +// node.mu.Unlock() +// +// client.channels = map[string]ChannelContext{ +// "example": { +// flags: flagSubscribed, +// positionCheckTime: 50, +// streamPosition: StreamPosition{ +// Offset: 20, +// Epoch: "test", +// }, +// }, +// } +// +// got := client.isValidPosition(StreamPosition{ +// Offset: 20, +// Epoch: "test", +// }, 200, "example") +// require.True(t, got) +// require.Equal(t, int64(200), client.channels["example"].positionCheckTime) +// +// got = client.isValidPosition(StreamPosition{ +// Offset: 19, +// Epoch: "test", +// }, 210, "example") +// require.True(t, got) +// require.Equal(t, int64(210), client.channels["example"].positionCheckTime) +// +// got = client.isValidPosition(StreamPosition{ +// Offset: 21, +// Epoch: "test", +// }, 220, "example") +// require.False(t, got) +// require.Equal(t, int64(210), client.channels["example"].positionCheckTime) +// +// client.channels = map[string]ChannelContext{ +// "example": { +// positionCheckTime: 50, +// streamPosition: StreamPosition{ +// Offset: 20, +// Epoch: "test", +// }, +// }, +// } +// // no subscribed flag. +// got = client.isValidPosition(StreamPosition{ +// Offset: 21, +// Epoch: "test", +// }, 220, "example") +// require.True(t, got) +// +// _ = client.close(DisconnectConnectionClosed) +// // closed client. +// got = client.isValidPosition(StreamPosition{ +// Offset: 21, +// Epoch: "test", +// }, 220, "example") +// require.True(t, got) +//} func TestErrLogLevel(t *testing.T) { require.Equal(t, LogLevelInfo, errLogLevel(ErrorNotAvailable)) diff --git a/config.go b/config.go index 6a5a888c..ae252980 100644 --- a/config.go +++ b/config.go @@ -110,9 +110,14 @@ type Config struct { ChannelNamespaceLabelForTransportMessagesReceived bool // AllowedDeltaTypes is a whitelist of DeltaType subscribers can use. At this point Centrifuge - // only supports DeltaTypeFossil. If zero value – clients won't be able to negotiate delta encoding. - // Delta encoding is an EXPERIMENTAL feature and may be changed/removed. + // only supports DeltaTypeFossil. If zero value – clients won't be able to negotiate delta encoding + // and will receive full data in publications. + // Delta encoding is an EXPERIMENTAL feature and may be changed. AllowedDeltaTypes []DeltaType + + // GetChannelLayerOptions is a way to provide ChannelLayerOptions for channel. + // See the doc comment for ChannelLayerOptions. + GetChannelLayerOptions func(channel string) (ChannelLayerOptions, bool) } const ( diff --git a/go.mod b/go.mod index e273a08d..e9ec7394 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/FZambia/eagle v0.1.0 - github.com/centrifugal/protocol v0.12.2-0.20240429145950-b906e73562fe + github.com/centrifugal/protocol v0.12.2-0.20240505100351-eb69a0b95e15 github.com/google/uuid v1.6.0 github.com/prometheus/client_golang v1.19.0 github.com/redis/rueidis v1.0.33 diff --git a/go.sum b/go.sum index f5643007..a48a01db 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ github.com/FZambia/eagle v0.1.0 h1:9gyX6x+xjoIfglgyPTcYm7dvY7FJ93us1QY5De4CyXA= github.com/FZambia/eagle v0.1.0/go.mod h1:YjGSPVkQTNcVLfzEUQJNgW9ScPR0K4u/Ky0yeFa4oDA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/centrifugal/protocol v0.12.2-0.20240429145950-b906e73562fe h1:uXsl6MWJZMlk42wfVDOK0z596x9JqmkvAqlWzQ830qU= -github.com/centrifugal/protocol v0.12.2-0.20240429145950-b906e73562fe/go.mod h1:lM54PGU/u5WupYSb755Zv6tZ2ju1SqNKCp6A4s0DeG4= +github.com/centrifugal/protocol v0.12.2-0.20240505100351-eb69a0b95e15 h1:JpdbAUIZwdHB0xc9371SXJEqByEDs/eZ93RcWSqFAoo= +github.com/centrifugal/protocol v0.12.2-0.20240505100351-eb69a0b95e15/go.mod h1:lM54PGU/u5WupYSb755Zv6tZ2ju1SqNKCp6A4s0DeG4= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= diff --git a/hub.go b/hub.go index 7d77fe11..d4db26d9 100644 --- a/hub.go +++ b/hub.go @@ -568,6 +568,7 @@ type broadcastKey struct { type dataValue struct { data []byte deltaData []byte + delta bool } // broadcastPublicationDelta sends message to all clients subscribed on channel trying to use deltas. @@ -722,6 +723,7 @@ func (h *subShard) broadcastPublicationDelta(channel string, pub *Publication, p go func(c *Client) { c.Disconnect(DisconnectInappropriateProtocol) }(sub.client) continue } + value.delta = true _ = sub.client.writePublication(channel, fullPub, value, sp) } if jsonEncodeErr != nil && h.logger.enabled(LogLevelWarn) { diff --git a/node.go b/node.go index fb35081e..a82850a7 100644 --- a/node.go +++ b/node.go @@ -83,6 +83,8 @@ type Node struct { nodeInfoSendHandler NodeInfoSendHandler emulationSurveyHandler *emulationSurveyHandler + + layers map[string]*channelLayer } const ( @@ -162,6 +164,7 @@ func New(c Config) (*Node, error) { subDissolver: dissolve.New(numSubDissolverWorkers), nowTimeGetter: nowtime.Get, surveyRegistry: make(map[uint64]chan survey), + layers: map[string]*channelLayer{}, } n.emulationSurveyHandler = newEmulationSurveyHandler(n) @@ -680,6 +683,18 @@ func (n *Node) handleControl(data []byte) error { return nil } +func (n *Node) handlePublicationCached(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { + mu := n.subLock(ch) + mu.Lock() + cache, ok := n.layers[ch] + mu.Unlock() + if ok { + cache.broadcastPublication(pub, sp, delta, prevPub) + return nil + } + return n.handlePublication(ch, pub, sp, delta, prevPub) +} + // handlePublication handles messages published into channel and // coming from Broker. The goal of method is to deliver this message // to all clients on this node currently subscribed to channel. @@ -988,9 +1003,27 @@ func (n *Node) addSubscription(ch string, sub subInfo) error { return err } if first { + if n.config.GetChannelLayerOptions != nil { + cacheOpts, ok := n.config.GetChannelLayerOptions(ch) + if ok { + layer, err := newChannelInterlayer(ch, n, cacheOpts) + if err != nil { + return err + } + n.layers[ch] = layer + } + } + err := n.broker.Subscribe(ch) if err != nil { _, _ = n.hub.removeSub(ch, sub.client) + if n.config.GetChannelLayerOptions != nil { + layer, ok := n.layers[ch] + if ok { + layer.close() + delete(n.layers, ch) + } + } return err } } @@ -1024,6 +1057,12 @@ func (n *Node) removeSubscription(ch string, c *Client) error { if err != nil { // Cool down a bit since broker is not ready to process unsubscription. time.Sleep(500 * time.Millisecond) + } else { + cache, ok := n.layers[ch] + if ok { + cache.close() + delete(n.layers, ch) + } } return err } @@ -1377,6 +1416,53 @@ func (n *Node) streamTop(ch string, historyMetaTTL time.Duration) (StreamPositio return historyResult.StreamPosition, nil } +//func (c *Client) isValidPosition(streamTop StreamPosition, nowUnix int64, ch string) bool { +// c.mu.Lock() +// if c.status == statusClosed { +// c.mu.Unlock() +// return true +// } +// chCtx, ok := c.channels[ch] +// if !ok || !channelHasFlag(chCtx.flags, flagSubscribed) { +// c.mu.Unlock() +// return true +// } +// position := chCtx.streamPosition +// c.mu.Unlock() +// +// isValidPosition := streamTop.Epoch == position.Epoch && position.Offset >= streamTop.Offset +// if isValidPosition { +// c.mu.Lock() +// if chContext, ok := c.channels[ch]; ok { +// chContext.positionCheckTime = nowUnix +// c.channels[ch] = chContext +// } +// c.mu.Unlock() +// return true +// } +// +// return false +//} + +func (n *Node) checkPosition(ch string, position StreamPosition, historyMetaTTL time.Duration) (bool, error) { + n.metrics.incActionCount("add_subscription") + mu := n.subLock(ch) + mu.Lock() + cache, ok := n.layers[ch] + mu.Unlock() + if !ok || !cache.options.EnablePositionSync { + // No interlayer for channel or position sync disabled – we then check position over Broker. + streamTop, err := n.streamTop(ch, historyMetaTTL) + if err != nil { + // Will be checked later. + return false, err + } + return streamTop.Epoch == position.Epoch && position.Offset == streamTop.Offset, nil + } + validPosition := cache.CheckPosition(historyMetaTTL, position, n.config.ClientChannelPositionCheckDelay) + return validPosition, nil +} + // RemoveHistory removes channel history. func (n *Node) RemoveHistory(ch string) error { n.metrics.incActionCount("history_remove") @@ -1560,6 +1646,9 @@ func (h *brokerEventHandler) HandlePublication(ch string, pub *Publication, sp S if pub == nil { panic("nil Publication received, this must never happen") } + if h.node.config.GetChannelLayerOptions != nil { + return h.node.handlePublicationCached(ch, pub, sp, delta, prevPub) + } return h.node.handlePublication(ch, pub, sp, delta, prevPub) } From b80c43fe8f0363948e75104ada1949950f71e580 Mon Sep 17 00:00:00 2001 From: FZambia Date: Sun, 5 May 2024 15:05:55 +0300 Subject: [PATCH 33/61] minor cleanups --- channel_layer.go | 64 +++++++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 31 deletions(-) diff --git a/channel_layer.go b/channel_layer.go index 518fbc7e..b807d413 100644 --- a/channel_layer.go +++ b/channel_layer.go @@ -48,7 +48,7 @@ type ChannelLayerOptions struct { EnablePositionSync bool } -// Keep global to not allocate per-channel. Must be only changed by tests. +// Keep global to save 8 byte per-channel. Must be only changed by tests. var channelLayerTimeNow = time.Now // channelLayer is initialized when first subscriber comes into channel, and dropped as soon as last @@ -61,8 +61,10 @@ type channelLayer struct { mu sync.RWMutex closeCh chan struct{} // optional queue for publications. - messages *cacheQueue - broadcastMu sync.Mutex // When queue is not used need to protect broadcast method from concurrent execution. + messages *publicationQueue + // We must synchronize broadcast method between general publications and insufficient state notifications. + // Only used when queue is disabled. + broadcastMu sync.Mutex // latestPublication is an initial publication in channel or publication last sent to connections. latestPublication *Publication // latestStreamPosition is an initial stream position or stream position lastly sent. @@ -91,7 +93,7 @@ func newChannelInterlayer( positionCheckTime: channelLayerTimeNow().UnixNano(), } if options.EnableQueue { - c.messages = newCacheQueue(2) + c.messages = newPublicationQueue(2) } if options.BroadcastDelay > 0 && !options.EnableQueue { return nil, fmt.Errorf("broadcast delay can only be used with queue enabled") @@ -127,7 +129,7 @@ func (c *channelLayer) broadcastPublication(pub *Publication, sp StreamPosition, if c.messages.Size() > queueMaxSize { return } - c.messages.Add(queuedItem{Publication: bp}) + c.messages.Add(queuedPublication{Publication: bp}) } else { c.broadcastMu.Lock() defer c.broadcastMu.Unlock() @@ -143,7 +145,7 @@ func (c *channelLayer) broadcastInsufficientState(currentStreamTop StreamPositio c.mu.Unlock() if c.options.EnableQueue { // TODO: possibly support c.messages.dropQueued() for this path ? - c.messages.Add(queuedItem{Publication: bp}) + c.messages.Add(queuedPublication{Publication: bp}) } else { c.broadcastMu.Lock() defer c.broadcastMu.Unlock() @@ -287,17 +289,17 @@ func (c *channelLayer) close() { close(c.closeCh) } -type queuedItem struct { +type queuedPublication struct { Publication queuedPub } -// cacheQueue is an unbounded queue of queuedItem. +// publicationQueue is an unbounded queue of queuedPublication. // The queue is goroutine safe. // Inspired by http://blog.dubbelboer.com/2015/04/25/go-faster-queue.html (MIT) -type cacheQueue struct { +type publicationQueue struct { mu sync.RWMutex cond *sync.Cond - nodes []queuedItem + nodes []queuedPublication head int tail int cnt int @@ -306,19 +308,19 @@ type cacheQueue struct { initCap int } -// newCacheQueue returns a new queuedItem queue with initial capacity. -func newCacheQueue(initialCapacity int) *cacheQueue { - sq := &cacheQueue{ +// newPublicationQueue returns a new queuedPublication queue with initial capacity. +func newPublicationQueue(initialCapacity int) *publicationQueue { + sq := &publicationQueue{ initCap: initialCapacity, - nodes: make([]queuedItem, initialCapacity), + nodes: make([]queuedPublication, initialCapacity), } sq.cond = sync.NewCond(&sq.mu) return sq } // Mutex must be held when calling. -func (q *cacheQueue) resize(n int) { - nodes := make([]queuedItem, n) +func (q *publicationQueue) resize(n int) { + nodes := make([]queuedPublication, n) if q.head < q.tail { copy(nodes, q.nodes[q.head:q.tail]) } else { @@ -331,10 +333,10 @@ func (q *cacheQueue) resize(n int) { q.nodes = nodes } -// Add an queuedItem to the back of the queue +// Add an queuedPublication to the back of the queue // will return false if the queue is closed. -// In that case the queuedItem is dropped. -func (q *cacheQueue) Add(i queuedItem) bool { +// In that case the queuedPublication is dropped. +func (q *publicationQueue) Add(i queuedPublication) bool { q.mu.Lock() if q.closed { q.mu.Unlock() @@ -358,7 +360,7 @@ func (q *cacheQueue) Add(i queuedItem) bool { // Close the queue and discard all entries in the queue // all goroutines in wait() will return -func (q *cacheQueue) Close() { +func (q *publicationQueue) Close() { q.mu.Lock() defer q.mu.Unlock() q.closed = true @@ -370,13 +372,13 @@ func (q *cacheQueue) Close() { // CloseRemaining will close the queue and return all entries in the queue. // All goroutines in wait() will return. -func (q *cacheQueue) CloseRemaining() []queuedItem { +func (q *publicationQueue) CloseRemaining() []queuedPublication { q.mu.Lock() defer q.mu.Unlock() if q.closed { - return []queuedItem{} + return []queuedPublication{} } - rem := make([]queuedItem, 0, q.cnt) + rem := make([]queuedPublication, 0, q.cnt) for q.cnt > 0 { i := q.nodes[q.head] q.head = (q.head + 1) % len(q.nodes) @@ -394,7 +396,7 @@ func (q *cacheQueue) CloseRemaining() []queuedItem { // Closed returns true if the queue has been closed // The call cannot guarantee that the queue hasn't been // closed while the function returns, so only "true" has a definite meaning. -func (q *cacheQueue) Closed() bool { +func (q *publicationQueue) Closed() bool { q.mu.RLock() c := q.closed q.mu.RUnlock() @@ -405,7 +407,7 @@ func (q *cacheQueue) Closed() bool { // If there are items on the queue will return immediately. // Will return false if the queue is closed. // Otherwise, returns true. -func (q *cacheQueue) Wait() bool { +func (q *publicationQueue) Wait() bool { q.mu.Lock() if q.closed { q.mu.Unlock() @@ -420,14 +422,14 @@ func (q *cacheQueue) Wait() bool { return true } -// Remove will remove an queuedItem from the queue. +// Remove will remove an queuedPublication from the queue. // If false is returned, it either means 1) there were no items on the queue // or 2) the queue is closed. -func (q *cacheQueue) Remove() (queuedItem, bool) { +func (q *publicationQueue) Remove() (queuedPublication, bool) { q.mu.Lock() if q.cnt == 0 { q.mu.Unlock() - return queuedItem{}, false + return queuedPublication{}, false } i := q.nodes[q.head] q.head = (q.head + 1) % len(q.nodes) @@ -445,7 +447,7 @@ func (q *cacheQueue) Remove() (queuedItem, bool) { } // Cap returns the capacity (without allocations) -func (q *cacheQueue) Cap() int { +func (q *publicationQueue) Cap() int { q.mu.RLock() c := cap(q.nodes) q.mu.RUnlock() @@ -453,7 +455,7 @@ func (q *cacheQueue) Cap() int { } // Len returns the current length of the queue. -func (q *cacheQueue) Len() int { +func (q *publicationQueue) Len() int { q.mu.RLock() l := q.cnt q.mu.RUnlock() @@ -461,7 +463,7 @@ func (q *cacheQueue) Len() int { } // Size returns the current size of the queue. -func (q *cacheQueue) Size() int { +func (q *publicationQueue) Size() int { q.mu.RLock() s := q.size q.mu.RUnlock() From 5b171fb7b44fd65354489e7ac87b30f0071a8ec6 Mon Sep 17 00:00:00 2001 From: FZambia Date: Sun, 5 May 2024 17:48:34 +0300 Subject: [PATCH 34/61] rename to medium --- _examples/chat_json/main.go | 4 +- _examples/compression_playground/main.go | 4 +- channel_layer.go => channel_medium.go | 70 +++++++++---------- ...el_layer_test.go => channel_medium_test.go | 48 ++++++------- config.go | 8 ++- node.go | 44 ++++++------ 6 files changed, 90 insertions(+), 88 deletions(-) rename channel_layer.go => channel_medium.go (81%) rename channel_layer_test.go => channel_medium_test.go (73%) diff --git a/_examples/chat_json/main.go b/_examples/chat_json/main.go index bd0f1d7a..cd0c4175 100644 --- a/_examples/chat_json/main.go +++ b/_examples/chat_json/main.go @@ -72,8 +72,8 @@ func main() { LogHandler: handleLog, HistoryMetaTTL: 24 * time.Hour, AllowedDeltaTypes: []centrifuge.DeltaType{centrifuge.DeltaTypeFossil}, - GetChannelLayerOptions: func(channel string) (centrifuge.ChannelLayerOptions, bool) { - return centrifuge.ChannelLayerOptions{ + GetChannelMediumOptions: func(channel string) (centrifuge.ChannelMediumOptions, bool) { + return centrifuge.ChannelMediumOptions{ KeepLatestPublication: true, EnableQueue: true, BroadcastDelay: time.Second, diff --git a/_examples/compression_playground/main.go b/_examples/compression_playground/main.go index e684930e..838fe594 100644 --- a/_examples/compression_playground/main.go +++ b/_examples/compression_playground/main.go @@ -141,8 +141,8 @@ func main() { log.Println(entry.Message, entry.Fields) }, AllowedDeltaTypes: []centrifuge.DeltaType{centrifuge.DeltaTypeFossil}, - //GetChannelLayerOptions: func(channel string) (centrifuge.ChannelLayerOptions, bool) { - // return centrifuge.ChannelLayerOptions{ + //GetChannelLayerOptions: func(channel string) (centrifuge.ChannelMediumOptions, bool) { + // return centrifuge.ChannelMediumOptions{ // //KeepLatestPublication: true, // //EnableQueue: true, // //BroadcastDelay: 500 * time.Millisecond, diff --git a/channel_layer.go b/channel_medium.go similarity index 81% rename from channel_layer.go rename to channel_medium.go index b807d413..5cf492e6 100644 --- a/channel_layer.go +++ b/channel_medium.go @@ -9,14 +9,14 @@ import ( "github.com/centrifugal/centrifuge/internal/timers" ) -// ChannelLayerOptions is an EXPERIMENTAL way to enable using a channel layer in Centrifuge. -// Note, channel layer is very unstable at the moment – do not use it in production! -// Channel layer is an optional per-channel intermediary between Broker PUB/SUB and Client connections. -// This intermediary layer may be used for various per-channel tweaks and optimizations. Channel layer -// comes with memory overhead depending on ChannelLayerOptions, and may consume one additional goroutine -// per channel if ChannelLayerOptions.EnableQueue is used. At the same time it can provide significant -// benefits in terms of overall system efficiency and flexibility. -type ChannelLayerOptions struct { +// ChannelMediumOptions is an EXPERIMENTAL way to enable using a channel medium layer in Centrifuge. +// Note, channel medium layer is very unstable at the moment – do not use it in production! +// Channel medium layer is an optional per-channel intermediary between Broker PUB/SUB and Client +// connections. This intermediary layer may be used for various per-channel tweaks and optimizations. +// Channel medium comes with memory overhead depending on ChannelMediumOptions, and may consume one +// additional goroutine per channel if ChannelMediumOptions.EnableQueue is used. At the same time it +// can provide significant benefits in terms of overall system efficiency and flexibility. +type ChannelMediumOptions struct { // EnableQueue for incoming publications. This can be useful to reduce PUB/SUB message processing time // (as we put it into a single cache layer queue instead of each individual connection queue), reduce // channel broadcast contention (when one channel waits for broadcast of another channel to finish), @@ -35,28 +35,28 @@ type ChannelLayerOptions struct { // used in channels with positioning/recovery on. BroadcastDelay time.Duration - // KeepLatestPublication enables keeping latest publication in channel cache layer. This is required + // KeepLatestPublication enables keeping latest publication in channel medium layer. This is required // for supporting deltas when BroadcastDelay > 0. // Probably it may be used for fast recovery also, but need to consider edge cases for races. KeepLatestPublication bool - // EnablePositionSync when true delegates connection position checks to the channel cache. In that case check - // is only performed no more often than once in Config.ClientChannelPositionCheckDelay thus reducing the load - // on broker in cases when channel has many subscribers. When message loss is detected cache layer tells caller - // about this and also marks all channel subscribers with insufficient state flag. By default, cache is not used - // for sync – in that case each individual connection syncs position independently. + // EnablePositionSync when true delegates connection position checks to the channel cache. In that case + // check is only performed no more often than once in Config.ClientChannelPositionCheckDelay thus reducing + // the load on broker in cases when channel has many subscribers. When message loss is detected medium layer + // tells caller about this and also marks all channel subscribers with insufficient state flag. By default, + // cache is not used for sync – in that case each individual connection syncs position independently. EnablePositionSync bool } // Keep global to save 8 byte per-channel. Must be only changed by tests. -var channelLayerTimeNow = time.Now +var channelMediumTimeNow = time.Now -// channelLayer is initialized when first subscriber comes into channel, and dropped as soon as last +// channelMedium is initialized when first subscriber comes into channel, and dropped as soon as last // subscriber leaves the channel on the Node. -type channelLayer struct { +type channelMedium struct { channel string node node - options ChannelLayerOptions + options ChannelMediumOptions mu sync.RWMutex closeCh chan struct{} @@ -80,17 +80,17 @@ type node interface { streamTopLatestPub(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) } -func newChannelInterlayer( +func newChannelMedium( channel string, node node, - options ChannelLayerOptions, -) (*channelLayer, error) { - c := &channelLayer{ + options ChannelMediumOptions, +) (*channelMedium, error) { + c := &channelMedium{ channel: channel, node: node, options: options, closeCh: make(chan struct{}), - positionCheckTime: channelLayerTimeNow().UnixNano(), + positionCheckTime: channelMediumTimeNow().UnixNano(), } if options.EnableQueue { c.messages = newPublicationQueue(2) @@ -114,11 +114,11 @@ type queuedPub struct { const defaultChannelLayerQueueMaxSize = 16 * 1024 * 1024 -func (c *channelLayer) broadcastPublication(pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) { +func (c *channelMedium) broadcastPublication(pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) { bp := queuedPub{pub: pub, sp: sp, delta: delta, prevPub: prevPub} c.mu.Lock() c.latestQueuedStreamPosition = sp - c.positionCheckTime = channelLayerTimeNow().UnixNano() + c.positionCheckTime = channelMediumTimeNow().UnixNano() c.mu.Unlock() if c.options.EnableQueue { @@ -137,11 +137,11 @@ func (c *channelLayer) broadcastPublication(pub *Publication, sp StreamPosition, } } -func (c *channelLayer) broadcastInsufficientState(currentStreamTop StreamPosition, latestPublication *Publication) { +func (c *channelMedium) broadcastInsufficientState(currentStreamTop StreamPosition, latestPublication *Publication) { bp := queuedPub{pub: latestPublication, sp: currentStreamTop, delta: false, isInsufficientState: true, prevPub: nil} c.mu.Lock() c.latestQueuedStreamPosition = currentStreamTop - c.positionCheckTime = channelLayerTimeNow().UnixNano() + c.positionCheckTime = channelMediumTimeNow().UnixNano() c.mu.Unlock() if c.options.EnableQueue { // TODO: possibly support c.messages.dropQueued() for this path ? @@ -153,7 +153,7 @@ func (c *channelLayer) broadcastInsufficientState(currentStreamTop StreamPositio } } -func (c *channelLayer) broadcast(qp queuedPub) { +func (c *channelMedium) broadcast(qp queuedPub) { pubToBroadcast := qp.pub spToBroadcast := qp.sp if qp.isInsufficientState { @@ -187,7 +187,7 @@ func (c *channelLayer) broadcast(qp queuedPub) { } } -func (c *channelLayer) writer() { +func (c *channelMedium) writer() { for { if ok := c.waitSendPub(c.options.BroadcastDelay); !ok { return @@ -195,7 +195,7 @@ func (c *channelLayer) writer() { } } -func (c *channelLayer) waitSendPub(delay time.Duration) bool { +func (c *channelMedium) waitSendPub(delay time.Duration) bool { // Wait for message from the queue. ok := c.messages.Wait() if !ok { @@ -240,8 +240,8 @@ func (c *channelLayer) waitSendPub(delay time.Duration) bool { return true } -func (c *channelLayer) CheckPosition(historyMetaTTL time.Duration, clientPosition StreamPosition, checkDelay time.Duration) bool { - nowUnixNano := channelLayerTimeNow().UnixNano() +func (c *channelMedium) CheckPosition(historyMetaTTL time.Duration, clientPosition StreamPosition, checkDelay time.Duration) bool { + nowUnixNano := channelMediumTimeNow().UnixNano() c.mu.Lock() needCheckPosition := nowUnixNano-c.positionCheckTime >= checkDelay.Nanoseconds() if needCheckPosition { @@ -262,7 +262,7 @@ func (c *channelLayer) CheckPosition(historyMetaTTL time.Duration, clientPositio return validPosition } -func (c *channelLayer) checkPositionWithRetry(historyMetaTTL time.Duration, clientPosition StreamPosition) (*Publication, StreamPosition, bool, error) { +func (c *channelMedium) checkPositionWithRetry(historyMetaTTL time.Duration, clientPosition StreamPosition) (*Publication, StreamPosition, bool, error) { latestPub, sp, validPosition, err := c.checkPositionOnce(historyMetaTTL, clientPosition) if err != nil || !validPosition { return c.checkPositionOnce(historyMetaTTL, clientPosition) @@ -270,7 +270,7 @@ func (c *channelLayer) checkPositionWithRetry(historyMetaTTL time.Duration, clie return latestPub, sp, validPosition, err } -func (c *channelLayer) checkPositionOnce(historyMetaTTL time.Duration, clientPosition StreamPosition) (*Publication, StreamPosition, bool, error) { +func (c *channelMedium) checkPositionOnce(historyMetaTTL time.Duration, clientPosition StreamPosition) (*Publication, StreamPosition, bool, error) { latestPublication, streamTop, err := c.node.streamTopLatestPub(c.channel, historyMetaTTL) if err != nil { return nil, StreamPosition{}, false, err @@ -285,7 +285,7 @@ func (c *channelLayer) checkPositionOnce(historyMetaTTL time.Duration, clientPos return latestPublication, streamTop, isValidPosition, nil } -func (c *channelLayer) close() { +func (c *channelMedium) close() { close(c.closeCh) } diff --git a/channel_layer_test.go b/channel_medium_test.go similarity index 73% rename from channel_layer_test.go rename to channel_medium_test.go index e9674a2a..a7b8c062 100644 --- a/channel_layer_test.go +++ b/channel_medium_test.go @@ -11,11 +11,11 @@ import ( "github.com/stretchr/testify/require" ) -// Helper function to create a channelLayer with options. -func setupChannelLayer(t testing.TB, options ChannelLayerOptions, node node) *channelLayer { +// Helper function to create a channelMedium with options. +func setupChannelMedium(t testing.TB, options ChannelMediumOptions, node node) *channelMedium { t.Helper() channel := "testChannel" - cache, err := newChannelInterlayer(channel, node, options) + cache, err := newChannelMedium(channel, node, options) if err != nil { require.NoError(t, err) } @@ -42,8 +42,8 @@ func (m *mockNode) streamTopLatestPub(ch string, historyMetaTTL time.Duration) ( return nil, StreamPosition{}, nil } -func TestChannelLayerHandlePublication(t *testing.T) { - optionSet := []ChannelLayerOptions{ +func TestChannelMediumHandlePublication(t *testing.T) { + optionSet := []ChannelMediumOptions{ { EnableQueue: false, KeepLatestPublication: false, @@ -68,7 +68,7 @@ func TestChannelLayerHandlePublication(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) { doneCh := make(chan struct{}) - cache := setupChannelLayer(t, options, &mockNode{ + cache := setupChannelMedium(t, options, &mockNode{ handlePublicationFunc: func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication) error { close(doneCh) return nil @@ -89,13 +89,13 @@ func TestChannelLayerHandlePublication(t *testing.T) { } } -func TestChannelLayerInsufficientState(t *testing.T) { - options := ChannelLayerOptions{ +func TestChannelMediumInsufficientState(t *testing.T) { + options := ChannelMediumOptions{ EnableQueue: true, KeepLatestPublication: true, } doneCh := make(chan struct{}) - cache := setupChannelLayer(t, options, &mockNode{ + medium := setupChannelMedium(t, options, &mockNode{ handlePublicationFunc: func(channel string, pub *Publication, sp StreamPosition, delta bool, prevPublication *Publication) error { require.Equal(t, uint64(math.MaxUint64), pub.Offset) require.Equal(t, uint64(math.MaxUint64), sp.Offset) @@ -105,7 +105,7 @@ func TestChannelLayerInsufficientState(t *testing.T) { }) // Simulate the behavior when the state is marked as insufficient - cache.broadcastInsufficientState(StreamPosition{Offset: 2}, &Publication{}) + medium.broadcastInsufficientState(StreamPosition{Offset: 2}, &Publication{}) select { case <-doneCh: @@ -114,13 +114,13 @@ func TestChannelLayerInsufficientState(t *testing.T) { } } -func TestChannelLayerPositionSync(t *testing.T) { - options := ChannelLayerOptions{ +func TestChannelMediumPositionSync(t *testing.T) { + options := ChannelMediumOptions{ EnablePositionSync: true, } doneCh := make(chan struct{}) var closeOnce sync.Once - layer := setupChannelLayer(t, options, &mockNode{ + medium := setupChannelMedium(t, options, &mockNode{ streamTopLatestPubFunc: func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { closeOnce.Do(func() { close(doneCh) @@ -128,12 +128,12 @@ func TestChannelLayerPositionSync(t *testing.T) { return nil, StreamPosition{}, nil }, }) - originalGetter := channelLayerTimeNow - channelLayerTimeNow = func() time.Time { + originalGetter := channelMediumTimeNow + channelMediumTimeNow = func() time.Time { return time.Now().Add(time.Hour) } - layer.CheckPosition(time.Second, StreamPosition{Offset: 1, Epoch: "test"}, time.Second) - channelLayerTimeNow = originalGetter + medium.CheckPosition(time.Second, StreamPosition{Offset: 1, Epoch: "test"}, time.Second) + channelMediumTimeNow = originalGetter select { case <-doneCh: case <-time.After(5 * time.Second): @@ -141,14 +141,14 @@ func TestChannelLayerPositionSync(t *testing.T) { } } -func TestChannelLayerPositionSyncRetry(t *testing.T) { - options := ChannelLayerOptions{ +func TestChannelMediumPositionSyncRetry(t *testing.T) { + options := ChannelMediumOptions{ EnablePositionSync: true, } doneCh := make(chan struct{}) var closeOnce sync.Once numCalls := 0 - layer := setupChannelLayer(t, options, &mockNode{ + medium := setupChannelMedium(t, options, &mockNode{ streamTopLatestPubFunc: func(ch string, historyMetaTTL time.Duration) (*Publication, StreamPosition, error) { if numCalls == 0 { numCalls++ @@ -160,12 +160,12 @@ func TestChannelLayerPositionSyncRetry(t *testing.T) { return nil, StreamPosition{}, nil }, }) - originalGetter := channelLayerTimeNow - channelLayerTimeNow = func() time.Time { + originalGetter := channelMediumTimeNow + channelMediumTimeNow = func() time.Time { return time.Now().Add(time.Hour) } - layer.CheckPosition(time.Second, StreamPosition{Offset: 1, Epoch: "test"}, time.Second) - channelLayerTimeNow = originalGetter + medium.CheckPosition(time.Second, StreamPosition{Offset: 1, Epoch: "test"}, time.Second) + channelMediumTimeNow = originalGetter select { case <-doneCh: case <-time.After(5 * time.Second): diff --git a/config.go b/config.go index ae252980..df0fde5d 100644 --- a/config.go +++ b/config.go @@ -115,9 +115,11 @@ type Config struct { // Delta encoding is an EXPERIMENTAL feature and may be changed. AllowedDeltaTypes []DeltaType - // GetChannelLayerOptions is a way to provide ChannelLayerOptions for channel. - // See the doc comment for ChannelLayerOptions. - GetChannelLayerOptions func(channel string) (ChannelLayerOptions, bool) + // GetChannelMediumOptions is a way to provide ChannelMediumOptions for specific channel. + // This function is called each time new channel appears on the Node. If it returns false + // then no medium layer will be used for the channel. + // See the doc comment for ChannelMediumOptions for more details about channel medium concept. + GetChannelMediumOptions func(channel string) (ChannelMediumOptions, bool) } const ( diff --git a/node.go b/node.go index a82850a7..c538fc19 100644 --- a/node.go +++ b/node.go @@ -84,7 +84,7 @@ type Node struct { emulationSurveyHandler *emulationSurveyHandler - layers map[string]*channelLayer + mediums map[string]*channelMedium } const ( @@ -164,7 +164,7 @@ func New(c Config) (*Node, error) { subDissolver: dissolve.New(numSubDissolverWorkers), nowTimeGetter: nowtime.Get, surveyRegistry: make(map[uint64]chan survey), - layers: map[string]*channelLayer{}, + mediums: map[string]*channelMedium{}, } n.emulationSurveyHandler = newEmulationSurveyHandler(n) @@ -683,13 +683,13 @@ func (n *Node) handleControl(data []byte) error { return nil } -func (n *Node) handlePublicationCached(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { +func (n *Node) handlePublicationViaMedium(ch string, pub *Publication, sp StreamPosition, delta bool, prevPub *Publication) error { mu := n.subLock(ch) mu.Lock() - cache, ok := n.layers[ch] + medium, ok := n.mediums[ch] mu.Unlock() if ok { - cache.broadcastPublication(pub, sp, delta, prevPub) + medium.broadcastPublication(pub, sp, delta, prevPub) return nil } return n.handlePublication(ch, pub, sp, delta, prevPub) @@ -1003,25 +1003,25 @@ func (n *Node) addSubscription(ch string, sub subInfo) error { return err } if first { - if n.config.GetChannelLayerOptions != nil { - cacheOpts, ok := n.config.GetChannelLayerOptions(ch) + if n.config.GetChannelMediumOptions != nil { + mediumOptions, ok := n.config.GetChannelMediumOptions(ch) if ok { - layer, err := newChannelInterlayer(ch, n, cacheOpts) + medium, err := newChannelMedium(ch, n, mediumOptions) if err != nil { return err } - n.layers[ch] = layer + n.mediums[ch] = medium } } err := n.broker.Subscribe(ch) if err != nil { _, _ = n.hub.removeSub(ch, sub.client) - if n.config.GetChannelLayerOptions != nil { - layer, ok := n.layers[ch] + if n.config.GetChannelMediumOptions != nil { + medium, ok := n.mediums[ch] if ok { - layer.close() - delete(n.layers, ch) + medium.close() + delete(n.mediums, ch) } } return err @@ -1058,10 +1058,10 @@ func (n *Node) removeSubscription(ch string, c *Client) error { // Cool down a bit since broker is not ready to process unsubscription. time.Sleep(500 * time.Millisecond) } else { - cache, ok := n.layers[ch] + medium, ok := n.mediums[ch] if ok { - cache.close() - delete(n.layers, ch) + medium.close() + delete(n.mediums, ch) } } return err @@ -1448,10 +1448,10 @@ func (n *Node) checkPosition(ch string, position StreamPosition, historyMetaTTL n.metrics.incActionCount("add_subscription") mu := n.subLock(ch) mu.Lock() - cache, ok := n.layers[ch] + medium, ok := n.mediums[ch] mu.Unlock() - if !ok || !cache.options.EnablePositionSync { - // No interlayer for channel or position sync disabled – we then check position over Broker. + if !ok || !medium.options.EnablePositionSync { + // No medium for channel or position sync disabled – we then check position over Broker. streamTop, err := n.streamTop(ch, historyMetaTTL) if err != nil { // Will be checked later. @@ -1459,7 +1459,7 @@ func (n *Node) checkPosition(ch string, position StreamPosition, historyMetaTTL } return streamTop.Epoch == position.Epoch && position.Offset == streamTop.Offset, nil } - validPosition := cache.CheckPosition(historyMetaTTL, position, n.config.ClientChannelPositionCheckDelay) + validPosition := medium.CheckPosition(historyMetaTTL, position, n.config.ClientChannelPositionCheckDelay) return validPosition, nil } @@ -1646,8 +1646,8 @@ func (h *brokerEventHandler) HandlePublication(ch string, pub *Publication, sp S if pub == nil { panic("nil Publication received, this must never happen") } - if h.node.config.GetChannelLayerOptions != nil { - return h.node.handlePublicationCached(ch, pub, sp, delta, prevPub) + if h.node.config.GetChannelMediumOptions != nil { + return h.node.handlePublicationViaMedium(ch, pub, sp, delta, prevPub) } return h.node.handlePublication(ch, pub, sp, delta, prevPub) } From b7c82a869463db8adb9bf5b01aabbe5bef443845 Mon Sep 17 00:00:00 2001 From: FZambia Date: Mon, 6 May 2024 13:45:25 +0300 Subject: [PATCH 35/61] test fix --- client_test.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/client_test.go b/client_test.go index ee54cf6c..3cc3a70c 100644 --- a/client_test.go +++ b/client_test.go @@ -664,7 +664,11 @@ func TestClientSubscribeDeltaNotAllowed(t *testing.T) { Channel: "test_channel", Delta: string(DeltaTypeFossil), }, &protocol.Command{Id: 1}, time.Now(), rwWrapper.rw) - require.Equal(t, DisconnectBadRequest, err) + require.NoError(t, err) + require.Equal(t, 1, len(rwWrapper.replies)) + require.Nil(t, rwWrapper.replies[0].Error) + res := extractSubscribeResult(rwWrapper.replies) + require.False(t, res.Delta) } func TestClientSubscribeUnknownDelta(t *testing.T) { From 76f63761be2919e36046e7c2719abeb51e42666e Mon Sep 17 00:00:00 2001 From: FZambia Date: Tue, 7 May 2024 14:56:07 +0300 Subject: [PATCH 36/61] change js protobuf port --- _examples/compression_playground/templates/protobuf.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_examples/compression_playground/templates/protobuf.html b/_examples/compression_playground/templates/protobuf.html index f0e88c6c..fcfc1a61 100644 --- a/_examples/compression_playground/templates/protobuf.html +++ b/_examples/compression_playground/templates/protobuf.html @@ -5,7 +5,7 @@ - + + - + - + + + Speedometer Visualization + + + +
    + + + diff --git a/_examples/recovery_mode_cache/main.go b/_examples/recovery_mode_cache/main.go new file mode 100644 index 00000000..512658b2 --- /dev/null +++ b/_examples/recovery_mode_cache/main.go @@ -0,0 +1,166 @@ +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "log" + "net/http" + "os" + "os/signal" + "strconv" + "syscall" + "time" + + _ "net/http/pprof" + + "github.com/centrifugal/centrifuge" +) + +var port = flag.Int("port", 8000, "Port to bind app to") + +func handleLog(e centrifuge.LogEntry) { + log.Printf("%s: %v", e.Message, e.Fields) +} + +func authMiddleware(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + newCtx := centrifuge.SetCredentials(ctx, ¢rifuge.Credentials{ + UserID: "", + }) + r = r.WithContext(newCtx) + h.ServeHTTP(w, r) + }) +} + +func waitExitSignal(n *centrifuge.Node, s *http.Server) { + sigCh := make(chan os.Signal, 1) + done := make(chan bool, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sigCh + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + _ = n.Shutdown(ctx) + _ = s.Shutdown(ctx) + done <- true + }() + <-done +} + +const exampleChannel = "speed" + +// Check whether channel is allowed for subscribing. In real case permission +// check will probably be more complex than in this example. +func channelSubscribeAllowed(channel string) bool { + return channel == exampleChannel +} + +func main() { + node, _ := centrifuge.New(centrifuge.Config{ + LogLevel: centrifuge.LogLevelInfo, + LogHandler: handleLog, + HistoryMetaTTL: 24 * time.Hour, + }) + + node.OnConnect(func(client *centrifuge.Client) { + transport := client.Transport() + log.Printf("[user %s] connected via %s with protocol: %s", client.UserID(), transport.Name(), transport.Protocol()) + + client.OnSubscribe(func(e centrifuge.SubscribeEvent, cb centrifuge.SubscribeCallback) { + log.Printf("[user %s] subscribes on %s", client.UserID(), e.Channel) + + if !channelSubscribeAllowed(e.Channel) { + cb(centrifuge.SubscribeReply{}, centrifuge.ErrorPermissionDenied) + return + } + + cb(centrifuge.SubscribeReply{ + Options: centrifuge.SubscribeOptions{ + EnableRecovery: true, + RecoveryMode: centrifuge.RecoveryModeCache, + }, + }, nil) + }) + + client.OnUnsubscribe(func(e centrifuge.UnsubscribeEvent) { + log.Printf("[user %s] unsubscribed from %s: %s", client.UserID(), e.Channel, e.Reason) + }) + + client.OnDisconnect(func(e centrifuge.DisconnectEvent) { + log.Printf("[user %s] disconnected: %s", client.UserID(), e.Reason) + }) + }) + + if err := node.Run(); err != nil { + log.Fatal(err) + } + + go func() { + const ( + accelerationRate = 2.0 // Speed increment per 100 ms + brakingRate = 10.0 // Speed decrement per 100 ms + maxSpeed = 190.0 + minSpeed = 50.0 + ) + + speed := 0.0 + increasing := true + + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if increasing { + speed += accelerationRate + if speed >= maxSpeed { + increasing = false + } + } else { + speed -= brakingRate + if speed <= minSpeed { + increasing = true + } + } + _, err := node.Publish( + exampleChannel, + []byte(`{"speed": `+fmt.Sprint(speed)+`}`), + centrifuge.WithHistory(1, time.Minute), + ) + if err != nil { + log.Printf("error publishing to personal channel: %s", err) + } + } + } + }() + + mux := http.DefaultServeMux + + websocketHandler := centrifuge.NewWebsocketHandler(node, centrifuge.WebsocketConfig{ + ReadBufferSize: 1024, + UseWriteBufferPool: true, + }) + mux.Handle("/connection/websocket", authMiddleware(websocketHandler)) + mux.Handle("/", http.FileServer(http.Dir("./"))) + + server := &http.Server{ + Handler: mux, + Addr: "127.0.0.1:" + strconv.Itoa(*port), + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + } + + log.Print("Starting server, visit http://localhost:8000") + go func() { + if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + log.Fatal(err) + } + }() + + waitExitSignal(node, server) + log.Println("bye!") +} From 4e575c444e1b6953c8fe6db73e56ce85ba812f53 Mon Sep 17 00:00:00 2001 From: FZambia Date: Wed, 15 May 2024 22:03:20 +0300 Subject: [PATCH 61/61] example readmr --- _examples/recovery_mode_cache/readme.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 _examples/recovery_mode_cache/readme.md diff --git a/_examples/recovery_mode_cache/readme.md b/_examples/recovery_mode_cache/readme.md new file mode 100644 index 00000000..3f2912df --- /dev/null +++ b/_examples/recovery_mode_cache/readme.md @@ -0,0 +1,7 @@ +Demonstration on using cache recovery mode. + +``` +go run main.go +``` + +Then go to http://localhost:8000