diff --git a/pdata/internal/cmd/pdatagen/internal/templates/primitive_slice.go.tmpl b/pdata/internal/cmd/pdatagen/internal/templates/primitive_slice.go.tmpl index c8b848a81f4..556fba151cf 100644 --- a/pdata/internal/cmd/pdatagen/internal/templates/primitive_slice.go.tmpl +++ b/pdata/internal/cmd/pdatagen/internal/templates/primitive_slice.go.tmpl @@ -108,3 +108,62 @@ func copy{{ .structName }}(dst, src []{{ .itemType }}) []{{ .itemType }} { dst = dst[:0] return append(dst, src...) } + +{{- if ne .itemType "string" }} +// TryIncrementFrom increments all elements from the current slice by the elements from another slice +// if it has enough capacity for the other slice's length plus the offset. +// If there isn't enough capacity, this method returns false and the slice is not mutated. +func (ms {{ .structName }}) TryIncrementFrom(other {{ .structName }}, offset int) bool { + if offset < 0 { + return false + } + ms.getState().AssertMutable() + newLen := max(ms.Len(), other.Len()+offset) + ours := *ms.getOrig() + if cap(ours) < newLen { + return false + } + ours = ours[:newLen] + theirs := *other.getOrig() + for i := 0; i < len(theirs); i++ { + ours[i+offset] += theirs[i] + } + *ms.getOrig() = ours + return true +} + + +// Collapse merges (sums) n adjacent buckets and reslices to account for the decreased length +// +// n=2 offset=1 +// before: 1 1 1 1 1 1 1 1 +// V V V V V +// after: 1 2 2 2 1 +func (ms {{ .structName }}) Collapse(n, offset int) { + ms.getState().AssertMutable() + if offset >= n || offset < 0 { + panic(fmt.Sprintf("offset %d must be positive and smaller than n %d", offset, n)) + } + if n < 2 { + return + } + orig := *ms.getOrig() + newLen := (len(orig) + offset) / n + if (len(orig)+offset)%n != 0 { + newLen++ + } + + for i := 0; i < newLen; i++ { + if offset == 0 || i > 0 { + orig[i] = orig[i*n-offset] + } + for j := i*n + 1 - offset; j < i*n+n-offset && j < len(orig); j++ { + if j > 0 { + orig[i] += orig[j] + } + } + } + + *ms.getOrig() = orig[:newLen] +} +{{- end }} diff --git a/pdata/internal/cmd/pdatagen/internal/templates/primitive_slice_test.go.tmpl b/pdata/internal/cmd/pdatagen/internal/templates/primitive_slice_test.go.tmpl index 26f6038ba7c..38fd8710e30 100644 --- a/pdata/internal/cmd/pdatagen/internal/templates/primitive_slice_test.go.tmpl +++ b/pdata/internal/cmd/pdatagen/internal/templates/primitive_slice_test.go.tmpl @@ -111,3 +111,61 @@ func Test{{ .structName }}EnsureCapacity(t *testing.T) { ms.EnsureCapacity(2) assert.Equal(t, 4, cap(*ms.getOrig())) } + +{{- if ne .itemType "string" }} + +func Test{{ .structName }}TryIncrementFrom(t *testing.T) { + ms := New{{ .structName }}() + ms.FromRaw([]{{ .itemType }}{10, 9}) + + ms2 := New{{ .structName }}() + ms2.FromRaw([]{{ .itemType }}{1, 10}) + + assert.False(t, ms.TryIncrementFrom(ms2, 1)) + ms.EnsureCapacity(4) + assert.True(t, ms.TryIncrementFrom(ms2, 1)) + {{- if eq .itemType "float64" }} + assert.InDelta(t, {{ .itemType }}(10), ms.At(0), 0.01) + assert.InDelta(t, {{ .itemType }}(10), ms.At(1), 0.01) + assert.InDelta(t, {{ .itemType }}(10), ms.At(2), 0.01) + {{- else }} + assert.Equal(t, {{ .itemType }}(10), ms.At(0)) + assert.Equal(t, {{ .itemType }}(10), ms.At(1)) + assert.Equal(t, {{ .itemType }}(10), ms.At(2)) + {{- end }} +} + +func Test{{ .structName }}Collapse(t *testing.T) { + ms := New{{ .structName }}() + ms.FromRaw([]{{ .itemType }}{1, 1, 1, 1, 1, 1}) + + ms.Collapse(4, 0) + + assert.Equal(t, 2, ms.Len()) + {{- if eq .itemType "float64" }} + assert.InDelta(t, {{ .itemType }}(4), ms.At(0), 0.01) + assert.InDelta(t, {{ .itemType }}(2), ms.At(1), 0.01) + {{- else }} + assert.Equal(t, {{ .itemType }}(4), ms.At(0)) + assert.Equal(t, {{ .itemType }}(2), ms.At(1)) + {{- end }} +} + +func Test{{ .structName }}CollapseOffset(t *testing.T) { + ms := New{{ .structName }}() + ms.FromRaw([]{{ .itemType }}{1, 1, 1, 1, 1, 1}) + + ms.Collapse(4, 3) + + assert.Equal(t, 3, ms.Len()) + {{- if eq .itemType "float64" }} + assert.InDelta(t, {{ .itemType }}(1), ms.At(0), 0.01) + assert.InDelta(t, {{ .itemType }}(4), ms.At(1), 0.01) + assert.InDelta(t, {{ .itemType }}(1), ms.At(2), 0.01) + {{- else }} + assert.Equal(t, {{ .itemType }}(1), ms.At(0)) + assert.Equal(t, {{ .itemType }}(4), ms.At(1)) + assert.Equal(t, {{ .itemType }}(1), ms.At(2)) + {{- end }} +} +{{- end }} diff --git a/pdata/pcommon/generated_byteslice.go b/pdata/pcommon/generated_byteslice.go index cbb64987d2b..fa6ee2d7ac4 100644 --- a/pdata/pcommon/generated_byteslice.go +++ b/pdata/pcommon/generated_byteslice.go @@ -7,6 +7,8 @@ package pcommon import ( + "fmt" + "go.opentelemetry.io/collector/pdata/internal" ) @@ -106,3 +108,59 @@ func copyByteSlice(dst, src []byte) []byte { dst = dst[:0] return append(dst, src...) } + +// TryIncrementFrom increments all elements from the current slice by the elements from another slice +// if it has enough capacity for the other slice's length plus the offset. +// If there isn't enough capacity, this method returns false and the slice is not mutated. +func (ms ByteSlice) TryIncrementFrom(other ByteSlice, offset int) bool { + if offset < 0 { + return false + } + ms.getState().AssertMutable() + newLen := max(ms.Len(), other.Len()+offset) + ours := *ms.getOrig() + if cap(ours) < newLen { + return false + } + ours = ours[:newLen] + theirs := *other.getOrig() + for i := 0; i < len(theirs); i++ { + ours[i+offset] += theirs[i] + } + *ms.getOrig() = ours + return true +} + +// Collapse merges (sums) n adjacent buckets and reslices to account for the decreased length +// +// n=2 offset=1 +// before: 1 1 1 1 1 1 1 1 +// V V V V V +// after: 1 2 2 2 1 +func (ms ByteSlice) Collapse(n, offset int) { + ms.getState().AssertMutable() + if offset >= n || offset < 0 { + panic(fmt.Sprintf("offset %d must be positive and smaller than n %d", offset, n)) + } + if n < 2 { + return + } + orig := *ms.getOrig() + newLen := (len(orig) + offset) / n + if (len(orig)+offset)%n != 0 { + newLen++ + } + + for i := 0; i < newLen; i++ { + if offset == 0 || i > 0 { + orig[i] = orig[i*n-offset] + } + for j := i*n + 1 - offset; j < i*n+n-offset && j < len(orig); j++ { + if j > 0 { + orig[i] += orig[j] + } + } + } + + *ms.getOrig() = orig[:newLen] +} diff --git a/pdata/pcommon/generated_byteslice_test.go b/pdata/pcommon/generated_byteslice_test.go index 1c0dc8219cd..b97f7d5801b 100644 --- a/pdata/pcommon/generated_byteslice_test.go +++ b/pdata/pcommon/generated_byteslice_test.go @@ -81,3 +81,41 @@ func TestByteSliceEnsureCapacity(t *testing.T) { ms.EnsureCapacity(2) assert.Equal(t, 4, cap(*ms.getOrig())) } + +func TestByteSliceTryIncrementFrom(t *testing.T) { + ms := NewByteSlice() + ms.FromRaw([]byte{10, 9}) + + ms2 := NewByteSlice() + ms2.FromRaw([]byte{1, 10}) + + assert.False(t, ms.TryIncrementFrom(ms2, 1)) + ms.EnsureCapacity(4) + assert.True(t, ms.TryIncrementFrom(ms2, 1)) + assert.Equal(t, byte(10), ms.At(0)) + assert.Equal(t, byte(10), ms.At(1)) + assert.Equal(t, byte(10), ms.At(2)) +} + +func TestByteSliceCollapse(t *testing.T) { + ms := NewByteSlice() + ms.FromRaw([]byte{1, 1, 1, 1, 1, 1}) + + ms.Collapse(4, 0) + + assert.Equal(t, 2, ms.Len()) + assert.Equal(t, byte(4), ms.At(0)) + assert.Equal(t, byte(2), ms.At(1)) +} + +func TestByteSliceCollapseOffset(t *testing.T) { + ms := NewByteSlice() + ms.FromRaw([]byte{1, 1, 1, 1, 1, 1}) + + ms.Collapse(4, 3) + + assert.Equal(t, 3, ms.Len()) + assert.Equal(t, byte(1), ms.At(0)) + assert.Equal(t, byte(4), ms.At(1)) + assert.Equal(t, byte(1), ms.At(2)) +} diff --git a/pdata/pcommon/generated_float64slice.go b/pdata/pcommon/generated_float64slice.go index 83a07ccf483..2713e6939e5 100644 --- a/pdata/pcommon/generated_float64slice.go +++ b/pdata/pcommon/generated_float64slice.go @@ -7,6 +7,8 @@ package pcommon import ( + "fmt" + "go.opentelemetry.io/collector/pdata/internal" ) @@ -106,3 +108,59 @@ func copyFloat64Slice(dst, src []float64) []float64 { dst = dst[:0] return append(dst, src...) } + +// TryIncrementFrom increments all elements from the current slice by the elements from another slice +// if it has enough capacity for the other slice's length plus the offset. +// If there isn't enough capacity, this method returns false and the slice is not mutated. +func (ms Float64Slice) TryIncrementFrom(other Float64Slice, offset int) bool { + if offset < 0 { + return false + } + ms.getState().AssertMutable() + newLen := max(ms.Len(), other.Len()+offset) + ours := *ms.getOrig() + if cap(ours) < newLen { + return false + } + ours = ours[:newLen] + theirs := *other.getOrig() + for i := 0; i < len(theirs); i++ { + ours[i+offset] += theirs[i] + } + *ms.getOrig() = ours + return true +} + +// Collapse merges (sums) n adjacent buckets and reslices to account for the decreased length +// +// n=2 offset=1 +// before: 1 1 1 1 1 1 1 1 +// V V V V V +// after: 1 2 2 2 1 +func (ms Float64Slice) Collapse(n, offset int) { + ms.getState().AssertMutable() + if offset >= n || offset < 0 { + panic(fmt.Sprintf("offset %d must be positive and smaller than n %d", offset, n)) + } + if n < 2 { + return + } + orig := *ms.getOrig() + newLen := (len(orig) + offset) / n + if (len(orig)+offset)%n != 0 { + newLen++ + } + + for i := 0; i < newLen; i++ { + if offset == 0 || i > 0 { + orig[i] = orig[i*n-offset] + } + for j := i*n + 1 - offset; j < i*n+n-offset && j < len(orig); j++ { + if j > 0 { + orig[i] += orig[j] + } + } + } + + *ms.getOrig() = orig[:newLen] +} diff --git a/pdata/pcommon/generated_float64slice_test.go b/pdata/pcommon/generated_float64slice_test.go index aa9b46201bc..94d82ad660a 100644 --- a/pdata/pcommon/generated_float64slice_test.go +++ b/pdata/pcommon/generated_float64slice_test.go @@ -81,3 +81,41 @@ func TestFloat64SliceEnsureCapacity(t *testing.T) { ms.EnsureCapacity(2) assert.Equal(t, 4, cap(*ms.getOrig())) } + +func TestFloat64SliceTryIncrementFrom(t *testing.T) { + ms := NewFloat64Slice() + ms.FromRaw([]float64{10, 9}) + + ms2 := NewFloat64Slice() + ms2.FromRaw([]float64{1, 10}) + + assert.False(t, ms.TryIncrementFrom(ms2, 1)) + ms.EnsureCapacity(4) + assert.True(t, ms.TryIncrementFrom(ms2, 1)) + assert.InDelta(t, float64(10), ms.At(0), 0.01) + assert.InDelta(t, float64(10), ms.At(1), 0.01) + assert.InDelta(t, float64(10), ms.At(2), 0.01) +} + +func TestFloat64SliceCollapse(t *testing.T) { + ms := NewFloat64Slice() + ms.FromRaw([]float64{1, 1, 1, 1, 1, 1}) + + ms.Collapse(4, 0) + + assert.Equal(t, 2, ms.Len()) + assert.InDelta(t, float64(4), ms.At(0), 0.01) + assert.InDelta(t, float64(2), ms.At(1), 0.01) +} + +func TestFloat64SliceCollapseOffset(t *testing.T) { + ms := NewFloat64Slice() + ms.FromRaw([]float64{1, 1, 1, 1, 1, 1}) + + ms.Collapse(4, 3) + + assert.Equal(t, 3, ms.Len()) + assert.InDelta(t, float64(1), ms.At(0), 0.01) + assert.InDelta(t, float64(4), ms.At(1), 0.01) + assert.InDelta(t, float64(1), ms.At(2), 0.01) +} diff --git a/pdata/pcommon/generated_int32slice.go b/pdata/pcommon/generated_int32slice.go index 35a40bd079c..c1854bbd743 100644 --- a/pdata/pcommon/generated_int32slice.go +++ b/pdata/pcommon/generated_int32slice.go @@ -7,6 +7,8 @@ package pcommon import ( + "fmt" + "go.opentelemetry.io/collector/pdata/internal" ) @@ -106,3 +108,59 @@ func copyInt32Slice(dst, src []int32) []int32 { dst = dst[:0] return append(dst, src...) } + +// TryIncrementFrom increments all elements from the current slice by the elements from another slice +// if it has enough capacity for the other slice's length plus the offset. +// If there isn't enough capacity, this method returns false and the slice is not mutated. +func (ms Int32Slice) TryIncrementFrom(other Int32Slice, offset int) bool { + if offset < 0 { + return false + } + ms.getState().AssertMutable() + newLen := max(ms.Len(), other.Len()+offset) + ours := *ms.getOrig() + if cap(ours) < newLen { + return false + } + ours = ours[:newLen] + theirs := *other.getOrig() + for i := 0; i < len(theirs); i++ { + ours[i+offset] += theirs[i] + } + *ms.getOrig() = ours + return true +} + +// Collapse merges (sums) n adjacent buckets and reslices to account for the decreased length +// +// n=2 offset=1 +// before: 1 1 1 1 1 1 1 1 +// V V V V V +// after: 1 2 2 2 1 +func (ms Int32Slice) Collapse(n, offset int) { + ms.getState().AssertMutable() + if offset >= n || offset < 0 { + panic(fmt.Sprintf("offset %d must be positive and smaller than n %d", offset, n)) + } + if n < 2 { + return + } + orig := *ms.getOrig() + newLen := (len(orig) + offset) / n + if (len(orig)+offset)%n != 0 { + newLen++ + } + + for i := 0; i < newLen; i++ { + if offset == 0 || i > 0 { + orig[i] = orig[i*n-offset] + } + for j := i*n + 1 - offset; j < i*n+n-offset && j < len(orig); j++ { + if j > 0 { + orig[i] += orig[j] + } + } + } + + *ms.getOrig() = orig[:newLen] +} diff --git a/pdata/pcommon/generated_int32slice_test.go b/pdata/pcommon/generated_int32slice_test.go index b6308fb91fd..5bab7aea423 100644 --- a/pdata/pcommon/generated_int32slice_test.go +++ b/pdata/pcommon/generated_int32slice_test.go @@ -81,3 +81,41 @@ func TestInt32SliceEnsureCapacity(t *testing.T) { ms.EnsureCapacity(2) assert.Equal(t, 4, cap(*ms.getOrig())) } + +func TestInt32SliceTryIncrementFrom(t *testing.T) { + ms := NewInt32Slice() + ms.FromRaw([]int32{10, 9}) + + ms2 := NewInt32Slice() + ms2.FromRaw([]int32{1, 10}) + + assert.False(t, ms.TryIncrementFrom(ms2, 1)) + ms.EnsureCapacity(4) + assert.True(t, ms.TryIncrementFrom(ms2, 1)) + assert.Equal(t, int32(10), ms.At(0)) + assert.Equal(t, int32(10), ms.At(1)) + assert.Equal(t, int32(10), ms.At(2)) +} + +func TestInt32SliceCollapse(t *testing.T) { + ms := NewInt32Slice() + ms.FromRaw([]int32{1, 1, 1, 1, 1, 1}) + + ms.Collapse(4, 0) + + assert.Equal(t, 2, ms.Len()) + assert.Equal(t, int32(4), ms.At(0)) + assert.Equal(t, int32(2), ms.At(1)) +} + +func TestInt32SliceCollapseOffset(t *testing.T) { + ms := NewInt32Slice() + ms.FromRaw([]int32{1, 1, 1, 1, 1, 1}) + + ms.Collapse(4, 3) + + assert.Equal(t, 3, ms.Len()) + assert.Equal(t, int32(1), ms.At(0)) + assert.Equal(t, int32(4), ms.At(1)) + assert.Equal(t, int32(1), ms.At(2)) +} diff --git a/pdata/pcommon/generated_int64slice.go b/pdata/pcommon/generated_int64slice.go index e50cd3cc3a5..67e0659b02a 100644 --- a/pdata/pcommon/generated_int64slice.go +++ b/pdata/pcommon/generated_int64slice.go @@ -7,6 +7,8 @@ package pcommon import ( + "fmt" + "go.opentelemetry.io/collector/pdata/internal" ) @@ -106,3 +108,59 @@ func copyInt64Slice(dst, src []int64) []int64 { dst = dst[:0] return append(dst, src...) } + +// TryIncrementFrom increments all elements from the current slice by the elements from another slice +// if it has enough capacity for the other slice's length plus the offset. +// If there isn't enough capacity, this method returns false and the slice is not mutated. +func (ms Int64Slice) TryIncrementFrom(other Int64Slice, offset int) bool { + if offset < 0 { + return false + } + ms.getState().AssertMutable() + newLen := max(ms.Len(), other.Len()+offset) + ours := *ms.getOrig() + if cap(ours) < newLen { + return false + } + ours = ours[:newLen] + theirs := *other.getOrig() + for i := 0; i < len(theirs); i++ { + ours[i+offset] += theirs[i] + } + *ms.getOrig() = ours + return true +} + +// Collapse merges (sums) n adjacent buckets and reslices to account for the decreased length +// +// n=2 offset=1 +// before: 1 1 1 1 1 1 1 1 +// V V V V V +// after: 1 2 2 2 1 +func (ms Int64Slice) Collapse(n, offset int) { + ms.getState().AssertMutable() + if offset >= n || offset < 0 { + panic(fmt.Sprintf("offset %d must be positive and smaller than n %d", offset, n)) + } + if n < 2 { + return + } + orig := *ms.getOrig() + newLen := (len(orig) + offset) / n + if (len(orig)+offset)%n != 0 { + newLen++ + } + + for i := 0; i < newLen; i++ { + if offset == 0 || i > 0 { + orig[i] = orig[i*n-offset] + } + for j := i*n + 1 - offset; j < i*n+n-offset && j < len(orig); j++ { + if j > 0 { + orig[i] += orig[j] + } + } + } + + *ms.getOrig() = orig[:newLen] +} diff --git a/pdata/pcommon/generated_int64slice_test.go b/pdata/pcommon/generated_int64slice_test.go index 868e2088ca9..36036e0b0d2 100644 --- a/pdata/pcommon/generated_int64slice_test.go +++ b/pdata/pcommon/generated_int64slice_test.go @@ -81,3 +81,41 @@ func TestInt64SliceEnsureCapacity(t *testing.T) { ms.EnsureCapacity(2) assert.Equal(t, 4, cap(*ms.getOrig())) } + +func TestInt64SliceTryIncrementFrom(t *testing.T) { + ms := NewInt64Slice() + ms.FromRaw([]int64{10, 9}) + + ms2 := NewInt64Slice() + ms2.FromRaw([]int64{1, 10}) + + assert.False(t, ms.TryIncrementFrom(ms2, 1)) + ms.EnsureCapacity(4) + assert.True(t, ms.TryIncrementFrom(ms2, 1)) + assert.Equal(t, int64(10), ms.At(0)) + assert.Equal(t, int64(10), ms.At(1)) + assert.Equal(t, int64(10), ms.At(2)) +} + +func TestInt64SliceCollapse(t *testing.T) { + ms := NewInt64Slice() + ms.FromRaw([]int64{1, 1, 1, 1, 1, 1}) + + ms.Collapse(4, 0) + + assert.Equal(t, 2, ms.Len()) + assert.Equal(t, int64(4), ms.At(0)) + assert.Equal(t, int64(2), ms.At(1)) +} + +func TestInt64SliceCollapseOffset(t *testing.T) { + ms := NewInt64Slice() + ms.FromRaw([]int64{1, 1, 1, 1, 1, 1}) + + ms.Collapse(4, 3) + + assert.Equal(t, 3, ms.Len()) + assert.Equal(t, int64(1), ms.At(0)) + assert.Equal(t, int64(4), ms.At(1)) + assert.Equal(t, int64(1), ms.At(2)) +} diff --git a/pdata/pcommon/generated_uint64slice.go b/pdata/pcommon/generated_uint64slice.go index 1344ca35bcf..2557f451750 100644 --- a/pdata/pcommon/generated_uint64slice.go +++ b/pdata/pcommon/generated_uint64slice.go @@ -7,6 +7,9 @@ package pcommon import ( + "fmt" + "iter" + "go.opentelemetry.io/collector/pdata/internal" ) @@ -49,6 +52,14 @@ func (ms UInt64Slice) Len() int { return len(*ms.getOrig()) } +func (ms UInt64Slice) Cap() int { + return cap(*ms.getOrig()) +} + +func (ms UInt64Slice) Reslice(from, to int) { + *ms.getOrig() = (*ms.getOrig())[from:to] +} + // At returns an item from particular index. // Equivalent of uInt64Slice[i]. func (ms UInt64Slice) At(i int) uint64 { @@ -106,3 +117,88 @@ func copyUInt64Slice(dst, src []uint64) []uint64 { dst = dst[:0] return append(dst, src...) } + +func (ms UInt64Slice) Transform(f func(i int, v uint64) uint64, from, to int) { + ms.getState().AssertMutable() + orig := *ms.getOrig() + for i := from; i < to; i++ { + orig[i] = f(i, orig[i]) + } +} + +// TryIncrementFrom increments all elements from the current slice by the elements from another slice +// if it has enough capacity for the other slice's length plus the offset. +// If there isn't enough capacity, this method returns false and the slice is not mutated. +func (ms UInt64Slice) TryIncrementFrom(other UInt64Slice, offset int) bool { + if offset < 0 { + return false + } + ms.getState().AssertMutable() + newLen := max(ms.Len(), other.Len()+offset) + ours := *ms.getOrig() + if cap(ours) < newLen { + return false + } + ours = ours[:newLen] + theirs := *other.getOrig() + for i := 0; i < len(theirs); i++ { + ours[i+offset] += theirs[i] + } + *ms.getOrig() = ours + return true +} + +func (ms UInt64Slice) IncrementFromSeq(other iter.Seq2[int, uint64], offset int) { + ms.getState().AssertMutable() + ours := *ms.getOrig() + for i, v := range other { + if i+offset >= len(ours) { + return + } + ours[i+offset] += v + } +} + +func (ms UInt64Slice) All() iter.Seq2[int, uint64] { + return func(yield func(int, uint64) bool) { + for i := 0; i < ms.Len(); i++ { + if !yield(i, ms.At(i)) { + return + } + } + } +} + +// Collapse merges (sums) n adjacent buckets and reslices to account for the decreased length +// +// n=2 offset=1 +// before: 1 1 1 1 1 1 1 1 +// V V V V V +// after: 1 2 2 2 1 +func (ms UInt64Slice) Collapse(n, offset int) { + ms.getState().AssertMutable() + if offset >= n || offset < 0 { + panic(fmt.Sprintf("offset %d must be positive and smaller than n %d", offset, n)) + } + if n < 2 { + return + } + orig := *ms.getOrig() + newLen := (len(orig) + offset) / n + if (len(orig)+offset)%n != 0 { + newLen++ + } + + for i := 0; i < newLen; i++ { + if offset == 0 || i > 0 { + orig[i] = orig[i*n-offset] + } + for j := i*n + 1 - offset; j < i*n+n-offset && j < len(orig); j++ { + if j > 0 { + orig[i] += orig[j] + } + } + } + + *ms.getOrig() = orig[:newLen] +} diff --git a/pdata/pcommon/generated_uint64slice_test.go b/pdata/pcommon/generated_uint64slice_test.go index 27013b985b9..40ecc0c4dcf 100644 --- a/pdata/pcommon/generated_uint64slice_test.go +++ b/pdata/pcommon/generated_uint64slice_test.go @@ -81,3 +81,129 @@ func TestUInt64SliceEnsureCapacity(t *testing.T) { ms.EnsureCapacity(2) assert.Equal(t, 4, cap(*ms.getOrig())) } + +func TestUInt64SliceTryIncrementFrom(t *testing.T) { + ms := NewUInt64Slice() + ms.FromRaw([]uint64{10, 9}) + + ms2 := NewUInt64Slice() + ms2.FromRaw([]uint64{1, 10}) + + //assert.False(t, ms.TryIncrementFrom(ms2, 1)) + //assert.False(t, ms.tryIncrementFromWithCurrentFunctions(ms2, 1)) + //assert.False(t, ms.tryIncrementFromTransform(ms2, 1)) + assert.False(t, ms.tryIncrementFromNewSlice(ms2, 1)) + ms.EnsureCapacity(4) + //assert.True(t, ms.TryIncrementFrom(ms2, 1)) + //assert.True(t, ms.tryIncrementFromWithCurrentFunctions(ms2, 1)) + //assert.True(t, ms.tryIncrementFromTransform(ms2, 1)) + assert.True(t, ms.tryIncrementFromNewSlice(ms2, 1)) + assert.Equal(t, uint64(10), ms.At(0)) + assert.Equal(t, uint64(10), ms.At(1)) + assert.Equal(t, uint64(10), ms.At(2)) +} + +func TestUInt64SliceCollapse(t *testing.T) { + ms := NewUInt64Slice() + ms.FromRaw([]uint64{1, 1, 1, 1, 1, 1}) + + ms.Collapse(4, 0) + + assert.Equal(t, 2, ms.Len()) + assert.Equal(t, uint64(4), ms.At(0)) + assert.Equal(t, uint64(2), ms.At(1)) +} + +func TestUInt64SliceCollapseOffset(t *testing.T) { + ms := NewUInt64Slice() + ms.FromRaw([]uint64{1, 1, 1, 1, 1, 1}) + + ms.Collapse(4, 3) + + assert.Equal(t, 3, ms.Len()) + assert.Equal(t, uint64(1), ms.At(0)) + assert.Equal(t, uint64(4), ms.At(1)) + assert.Equal(t, uint64(1), ms.At(2)) +} + +func BenchmarkUInt64SliceTryIncrementFrom(b *testing.B) { + benchmarks := []struct { + name string + tryIncrementFrom func(s1, s2 UInt64Slice, offset int) bool + }{ + { + name: "TryIncrementFromDirectAccess", + tryIncrementFrom: UInt64Slice.TryIncrementFrom, + }, + { + name: "tryIncrementFromWithCurrentFunctions", + tryIncrementFrom: UInt64Slice.tryIncrementFromWithCurrentFunctions, + }, + { + name: "tryIncrementFromTransform", + tryIncrementFrom: UInt64Slice.tryIncrementFromTransform, + }, + { + name: "tryIncrementFromNewSlice", + tryIncrementFrom: UInt64Slice.tryIncrementFromNewSlice, + }, + { + name: "incrementFromSeq", + tryIncrementFrom: func(s1, s2 UInt64Slice, offset int) bool { + s1.IncrementFromSeq(s2.All(), offset) + return true + }, + }, + } + ms1 := NewUInt64Slice() + ms1.FromRaw(make([]uint64, 160)) + ms2 := NewUInt64Slice() + ms2.FromRaw(make([]uint64, 80)) + b.ResetTimer() + for _, bm := range benchmarks { + + b.Run(bm.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + bm.tryIncrementFrom(ms1, ms2, 40) + } + }) + } +} + +func (ms UInt64Slice) tryIncrementFromWithCurrentFunctions(s2 UInt64Slice, offset int) bool { + if ms.Cap() < s2.Len()+offset { + return false + } + ms.Reslice(0, max(ms.Len(), s2.Len()+offset)) + for i := 0; i < s2.Len(); i++ { + ms.SetAt(i+offset, ms.At(i+offset)+s2.At(i)) + } + return true +} + +func (ms UInt64Slice) tryIncrementFromTransform(s2 UInt64Slice, offset int) bool { + if ms.Cap() < s2.Len()+offset { + return false + } + ms.Reslice(0, max(ms.Len(), s2.Len()+offset)) + ms.Transform(func(i int, v uint64) uint64 { + return v + s2.At(i-offset) + }, offset, s2.Len()+offset) + return true +} + +func (ms UInt64Slice) tryIncrementFromNewSlice(s2 UInt64Slice, offset int) bool { + if ms.Cap() < s2.Len()+offset { + return false + } + + newSlice := make([]uint64, max(ms.Len(), s2.Len()+offset)) + for i := 0; i < ms.Len(); i++ { + newSlice[i] = ms.At(i) + } + for i := 0; i < s2.Len(); i++ { + newSlice[i+offset] += s2.At(i) + } + ms.FromRaw(newSlice) + return true +}