Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

S2 perf improvements #563

Merged
merged 8 commits into from
Sep 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ require (
github.com/llgcode/draw2d v0.0.0-20230723155556-e595d7c7e75e
github.com/markus-wa/go-heatmap/v2 v2.0.0
github.com/markus-wa/go-unassert v0.1.3
github.com/markus-wa/gobitread v0.2.3
github.com/markus-wa/gobitread v0.2.4
github.com/markus-wa/godispatch v1.4.1
github.com/markus-wa/ice-cipher-go v0.0.0-20230901094113-348096939ba7
github.com/markus-wa/quickhull-go/v2 v2.2.0
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ github.com/markus-wa/go-unassert v0.1.3 h1:4N2fPLUS3929Rmkv94jbWskjsLiyNT2yQpCul
github.com/markus-wa/go-unassert v0.1.3/go.mod h1:/pqt7a0LRmdsRNYQ2nU3SGrXfw3bLXrvIkakY/6jpPY=
github.com/markus-wa/gobitread v0.2.3 h1:COx7dtYQ7Q+77hgUmD+O4MvOcqG7y17RP3Z7BbjRvPs=
github.com/markus-wa/gobitread v0.2.3/go.mod h1:PcWXMH4gx7o2CKslbkFkLyJB/aHW7JVRG3MRZe3PINg=
github.com/markus-wa/gobitread v0.2.4 h1:BDr3dZnsqntDD4D8E7DzhkQlASIkQdfxCXLhWcI2K5A=
github.com/markus-wa/gobitread v0.2.4/go.mod h1:PcWXMH4gx7o2CKslbkFkLyJB/aHW7JVRG3MRZe3PINg=
github.com/markus-wa/godispatch v1.4.1 h1:Cdff5x33ShuX3sDmUbYWejk7tOuoHErFYMhUc2h7sLc=
github.com/markus-wa/godispatch v1.4.1/go.mod h1:tk8L0yzLO4oAcFwM2sABMge0HRDJMdE8E7xm4gK/+xM=
github.com/markus-wa/ice-cipher-go v0.0.0-20230901094113-348096939ba7 h1:aR9pvnlnBxifXBmzidpAiq2prLSGlkhE904qnk2sCz4=
Expand Down
9 changes: 6 additions & 3 deletions pkg/demoinfocs/common/player.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ type Player struct {
demoInfoProvider demoInfoProvider // provider for demo info such as tick-rate or current tick

SteamID64 uint64 // 64-bit representation of the user's Steam ID. See https://developer.valvesoftware.com/wiki/SteamID
LastAlivePosition r3.Vector // The location where the player was last alive. Should be equal to Position if the player is still alive.
LastAlivePosition r3.Vector // Deprecated: will be removed in v5 due to performance concerns, track this yourself.
UserID int // Mostly used in game-events to address this player
Name string // Steam / in-game user name
Inventory map[int]*Equipment // All weapons / equipment the player is currently carrying. See also Weapons().
Expand All @@ -33,7 +33,7 @@ type Player struct {
IsPlanting bool
IsReloading bool
IsUnknown bool // Used to identify unknown/broken players. see https://github.com/markus-wa/demoinfocs-golang/issues/162
PreviousFramePosition r3.Vector // CS2 only, used to compute velocity as it's not networked in CS2 demos
PreviousFramePosition r3.Vector // Deprecated: may be removed in v5 due to performance concerns, track this yourself.
}

func (p *Player) PlayerPawnEntity() st.Entity {
Expand Down Expand Up @@ -85,9 +85,11 @@ func (p *Player) IsAlive() bool {
}

if p.demoInfoProvider.IsSource2() {
if pawnEntity := p.PlayerPawnEntity(); pawnEntity != nil {
pawnEntity := p.PlayerPawnEntity()
if pawnEntity != nil {
return pawnEntity.PropertyValueMust("m_lifeState").S2UInt64() == 0
}

return getBool(p.Entity, "m_bPawnIsAlive")
}

Expand Down Expand Up @@ -535,6 +537,7 @@ func (p *Player) PositionEyes() r3.Vector {
}

// Velocity returns the player's velocity.
// Deprecated: will be removed due to performance concerns, you will need to track this yourself.
func (p *Player) Velocity() r3.Vector {
if p.demoInfoProvider.IsSource2() {
t := 64.0
Expand Down
4 changes: 3 additions & 1 deletion pkg/demoinfocs/datatables.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package demoinfocs
import (
"fmt"
"math"
"os"
"strings"

"github.com/golang/geo/r3"
Expand Down Expand Up @@ -637,6 +638,7 @@ func (p *parser) bindNewPlayerPawnS2(pawnEntity st.Entity) {
if pl == nil {
return
}

if pl.IsAlive() {
pl.LastAlivePosition = pos
}
Expand Down Expand Up @@ -919,7 +921,7 @@ func (p *parser) bindGrenadeProjectiles(entity st.Entity) {
if exists {
wep = weaponType
} else {
fmt.Printf("unknown grenade model %d\n", model)
fmt.Fprintf(os.Stderr, "unknown grenade model %d\n", model)
}
}
}
Expand Down
8 changes: 4 additions & 4 deletions pkg/demoinfocs/demoinfocs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -493,7 +493,7 @@ func TestConcurrent(t *testing.T) {
func parseDefaultDemo(tb testing.TB) {
tb.Helper()

f := openFile(tb, defaultDemPath)
f := openFile(tb, s2DemPath)
defer mustClose(tb, f)

p := demoinfocs.NewParser(f)
Expand Down Expand Up @@ -599,15 +599,15 @@ func BenchmarkDemoInfoCs(b *testing.B) {
}

func BenchmarkInMemory(b *testing.B) {
f := openFile(b, defaultDemPath)
f := openFile(b, s2DemPath)
defer mustClose(b, f)

inf, err := f.Stat()
assert.NoError(b, err, "failed to stat file %q", defaultDemPath)
assert.NoError(b, err, "failed to stat file %q", s2DemPath)

d := make([]byte, inf.Size())
n, err := f.Read(d)
assert.NoError(b, err, "failed to read file %q", defaultDemPath)
assert.NoError(b, err, "failed to read file %q", s2DemPath)
assert.Equal(b, int64(n), inf.Size(), "byte count not as expected")

b.ResetTimer()
Expand Down
1 change: 1 addition & 0 deletions pkg/demoinfocs/parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ type parser struct {
equipmentTypePerModel map[uint64]common.EquipmentType // Used to retrieve the EquipmentType of grenade projectiles based on models value. Source 2 only.
stringTables []createStringTable // Contains all created sendtables, needed when updating them
delayedEventHandlers []func() // Contains event handlers that need to be executed at the end of a tick (e.g. flash events because FlashDuration isn't updated before that)
pendingMessagesCache []pendingMessage // Cache for pending messages that need to be dispatched after the current tick
}

// NetMessageCreator creates additional net-messages to be dispatched to net-message handlers.
Expand Down
10 changes: 5 additions & 5 deletions pkg/demoinfocs/s2_commands.go
Original file line number Diff line number Diff line change
Expand Up @@ -310,21 +310,21 @@ func (p *parser) handleDemoPacket(pack *msgs2.CDemoPacket) {

r := bitread.NewSmallBitReader(bytes.NewReader(b))

ms := make([]pendingMessage, 0)
p.pendingMessagesCache = p.pendingMessagesCache[:0]

for len(b)*8-r.ActualPosition() > 7 {
t := int32(r.ReadUBitInt())
size := r.ReadVarInt32()
buf := r.ReadBytes(int(size))

ms = append(ms, pendingMessage{t, buf})
p.pendingMessagesCache = append(p.pendingMessagesCache, pendingMessage{t, buf})
}

sort.SliceStable(ms, func(i, j int) bool {
return ms[i].priority() < ms[j].priority() // TODO: taken from dotabuff/manta. do we really need this?
sort.SliceStable(p.pendingMessagesCache, func(i, j int) bool {
return p.pendingMessagesCache[i].priority() < p.pendingMessagesCache[j].priority()
})

for _, m := range ms {
for _, m := range p.pendingMessagesCache {
var msgCreator NetMessageCreator

if m.t < int32(msgs2.SVC_Messages_svc_ServerInfo) {
Expand Down
29 changes: 8 additions & 21 deletions pkg/demoinfocs/sendtables2/entity.go
Original file line number Diff line number Diff line change
Expand Up @@ -424,9 +424,9 @@ func (p *Parser) FilterEntity(fb func(*Entity) bool) []*Entity {
}

func (e *Entity) readFields(r *reader, paths *[]*fieldPath) {
readFieldPaths(r, paths)
n := readFieldPaths(r, paths)

for _, fp := range *paths {
for _, fp := range (*paths)[:n] {
f := e.class.serializer.getFieldForFieldPath(fp, 0)
name := e.class.getNameForFieldPath(fp)
decoder, base := e.class.serializer.getDecoderForFieldPath2(fp, 0)
Expand Down Expand Up @@ -462,8 +462,6 @@ func (e *Entity) readFields(r *reader, paths *[]*fieldPath) {
S2: true,
})
}

fp.release()
}
}

Expand All @@ -486,15 +484,7 @@ func (p *Parser) OnPacketEntities(m *msgs2.CSVCMsg_PacketEntities) error {
p.entityFullPackets++
}

type tuple struct {
ent *Entity
op st.EntityOp
}

var (
tuples []tuple
paths = make([]*fieldPath, 0)
)
p.tuplesCache = p.tuplesCache[:0]

for ; updates > 0; updates-- {
var (
Expand Down Expand Up @@ -530,12 +520,10 @@ func (p *Parser) OnPacketEntities(m *msgs2.CSVCMsg_PacketEntities) error {

if baseline != nil {
// POV demos are missing some baselines?
e.readFields(newReader(baseline), &paths)
paths = paths[:0]
e.readFields(newReader(baseline), &p.pathCache)
}

e.readFields(r, &paths)
paths = paths[:0]
e.readFields(r, &p.pathCache)

// Fire created-handlers so update-handlers can be registered
for _, h := range class.createdHandlers {
Expand All @@ -559,8 +547,7 @@ func (p *Parser) OnPacketEntities(m *msgs2.CSVCMsg_PacketEntities) error {
op |= st.EntityOpEntered
}

e.readFields(r, &paths)
paths = paths[:0]
e.readFields(r, &p.pathCache)
}
} else {
e = p.entities[index]
Expand All @@ -583,10 +570,10 @@ func (p *Parser) OnPacketEntities(m *msgs2.CSVCMsg_PacketEntities) error {
}
}

tuples = append(tuples, tuple{e, op})
p.tuplesCache = append(p.tuplesCache, tuple{e, op})
}

for _, t := range tuples {
for _, t := range p.tuplesCache {
e := t.ent

for _, h := range p.entityHandlers {
Expand Down
5 changes: 4 additions & 1 deletion pkg/demoinfocs/sendtables2/field_decoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,7 @@ func quantizedFactory(f *field) fieldDecoder {
}

qfd := newQuantizedFloatDecoder(f.bitCount, f.encodeFlags, f.lowValue, f.highValue)

return func(r *reader) interface{} {
return qfd.decode(r)
}
Expand All @@ -274,9 +275,11 @@ func vectorFactory(n int) fieldFactory {
d := floatFactory(f)
return func(r *reader) interface{} {
x := make([]float32, n)

for i := 0; i < n; i++ {
x[i] = d(r).(float32)
}

return x
}
}
Expand Down Expand Up @@ -319,7 +322,7 @@ func ammoDecoder(r *reader) interface{} {
}

func noscaleDecoder(r *reader) interface{} {
return math.Float32frombits(r.readBits(32))
return math.Float32frombits(r.readLeUint32())
}

func runeTimeDecoder(r *reader) interface{} {
Expand Down
18 changes: 15 additions & 3 deletions pkg/demoinfocs/sendtables2/field_path.go
Original file line number Diff line number Diff line change
Expand Up @@ -306,10 +306,10 @@ func (fp *fieldPath) release() {
}

// readFieldPaths reads a new slice of fieldPath values from the given reader
func readFieldPaths(r *reader, paths *[]*fieldPath) {
func readFieldPaths(r *reader, paths *[]*fieldPath) int {
fp := newFieldPath()

node := huffTree
i := 0

for !fp.done {
var next huffmanTree
Expand All @@ -326,14 +326,26 @@ func readFieldPaths(r *reader, paths *[]*fieldPath) {
fieldPathTable[next.Value()].fn(r, fp)

if !fp.done {
*paths = append(*paths, fp.copy())
if len(*paths) <= i {
*paths = append(*paths, fp.copy())
} else {
x := (*paths)[i]
x.last = fp.last
x.done = fp.done

copy(x.path, fp.path)
}

i++
}
} else {
node = next
}
}

fp.release()

return i
}

// newHuffmanTree creates a new huffmanTree from the field path table
Expand Down
7 changes: 6 additions & 1 deletion pkg/demoinfocs/sendtables2/field_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,13 @@ func (s *fieldState) get(fp *fieldPath) interface{} {
func (s *fieldState) set(fp *fieldPath, v interface{}) {
x := s
z := 0

for i := 0; i <= fp.last; i++ {
z = fp.path[i]

if y := len(x.state); y <= z {
newCap := max(z+2, y*2)
if newCap > cap(x.state) {
if z+2 > cap(x.state) {
newSlice := make([]interface{}, z+1, newCap)
copy(newSlice, x.state)
x.state = newSlice
Expand All @@ -45,15 +47,18 @@ func (s *fieldState) set(fp *fieldPath, v interface{}) {
x.state = x.state[:z+1]
}
}

if i == fp.last {
if _, ok := x.state[z].(*fieldState); !ok {
x.state[z] = v
}
return
}

if _, ok := x.state[z].(*fieldState); !ok {
x.state[z] = newFieldState()
}

x = x.state[z].(*fieldState)
}
}
Expand Down
7 changes: 7 additions & 0 deletions pkg/demoinfocs/sendtables2/parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,11 @@ var itemCounts = map[string]int{
"MAX_ABILITY_DRAFT_ABILITIES": 48,
}

type tuple struct {
ent *Entity
op st.EntityOp
}

type Parser struct {
serializers map[string]*serializer
classIdSize uint32
Expand All @@ -60,6 +65,8 @@ type Parser struct {
entityFullPackets int
entities map[int32]*Entity
entityHandlers []st.EntityHandler
pathCache []*fieldPath
tuplesCache []tuple
}

func (p *Parser) ReadEnterPVS(r *bit.BitReader, index int, entities map[int]st.Entity, slot int) st.Entity {
Expand Down
21 changes: 16 additions & 5 deletions pkg/demoinfocs/sendtables2/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,15 @@ func (r *reader) remBytes() uint32 {

// nextByte reads the next byte from the buffer
func (r *reader) nextByte() byte {
r.pos++

if r.pos > r.size {
if r.pos >= r.size {
_panicf("nextByte: insufficient buffer (%d of %d)", r.pos, r.size)
}

return r.buf[r.pos-1]
x := r.buf[r.pos]

r.pos++

return x
}

// readBits returns the uint32 value for the given number of sequential bits
Expand Down Expand Up @@ -77,22 +79,31 @@ func (r *reader) readBytes(n uint32) []byte {
// Fast path if we're byte aligned
if r.bitCount == 0 {
r.pos += n

if r.pos > r.size {
_panicf("readBytes: insufficient buffer (%d of %d)", r.pos, r.size)
}

return r.buf[r.pos-n : r.pos]
}

buf := make([]byte, n)

for i := uint32(0); i < n; i++ {
buf[i] = byte(r.readBits(8))
}

return buf
}

// readLeUint32 reads an little-endian uint32
func (r *reader) readLeUint32() uint32 {
return binary.LittleEndian.Uint32(r.readBytes(4))
// Fast path if we're byte aligned
if r.bitCount == 0 {
return binary.LittleEndian.Uint32(r.readBytes(4))
}

return r.readBits(32)
}

// readLeUint64 reads a little-endian uint64
Expand Down
Loading