From 6f4e4608fb0044178b0ade12943d538efb7385a8 Mon Sep 17 00:00:00 2001 From: Sindy Li Date: Mon, 13 Jan 2025 18:09:34 -0800 Subject: [PATCH] Benchmarks and optimizations --- exporter/exporterhelper/logs.go | 5 +- exporter/exporterhelper/logs_batch.go | 12 +++- exporter/exporterhelper/logs_batch_test.go | 68 ++++++++++++++++++---- 3 files changed, 70 insertions(+), 15 deletions(-) diff --git a/exporter/exporterhelper/logs.go b/exporter/exporterhelper/logs.go index a19d65525e0f..24cac9552a2d 100644 --- a/exporter/exporterhelper/logs.go +++ b/exporter/exporterhelper/logs.go @@ -26,8 +26,9 @@ var ( ) type logsRequest struct { - ld plog.Logs - pusher consumer.ConsumeLogsFunc + ld plog.Logs + pusher consumer.ConsumeLogsFunc + byteSize int } func newLogsRequest(ld plog.Logs, pusher consumer.ConsumeLogsFunc) Request { diff --git a/exporter/exporterhelper/logs_batch.go b/exporter/exporterhelper/logs_batch.go index 3bd7dc472f9f..0ae7a13a2e55 100644 --- a/exporter/exporterhelper/logs_batch.go +++ b/exporter/exporterhelper/logs_batch.go @@ -46,12 +46,22 @@ func (req *logsRequest) mergeSplitBasedOnByteSize(cfg exporterbatcher.MaxSizeCon continue } - ByteSize := srcReq.ld.ByteSize() + ByteSize := srcReq.byteSize + if ByteSize == 0 { + ByteSize = srcReq.ld.ByteSize() + } + // if ByteSize > capacityLeft && capacityLeft < cfg.MaxSizeBytes { + // res = append(res, destReq) + // destReq = nil + // capacityLeft = cfg.MaxSizeBytes + // } + if ByteSize <= capacityLeft { if destReq == nil { destReq = srcReq } else { srcReq.ld.ResourceLogs().MoveAndAppendTo(destReq.ld.ResourceLogs()) + destReq.byteSize += ByteSize } capacityLeft -= ByteSize continue diff --git a/exporter/exporterhelper/logs_batch_test.go b/exporter/exporterhelper/logs_batch_test.go index 5fe2920034f8..7c9312ffc93f 100644 --- a/exporter/exporterhelper/logs_batch_test.go +++ b/exporter/exporterhelper/logs_batch_test.go @@ -252,42 +252,86 @@ func TestMergeSplitLogsBasedOnByteSize(t *testing.T) { } } -func BenchmarkSplittingBasedOnItemCountManyLogs(b *testing.B) { - cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 10} +func BenchmarkSplittingBasedOnItemCountManySmallLogs(b *testing.B) { + cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 10000} for i := 0; i < b.N; i++ { - lr1 := &logsRequest{ld: testdata.GenerateLogs(9)} + lr1 := &logsRequest{ld: testdata.GenerateLogs(10)} for j := 0; j < 1000; j++ { - lr2 := &logsRequest{ld: testdata.GenerateLogs(9)} + lr2 := &logsRequest{ld: testdata.GenerateLogs(10)} lr1.MergeSplit(context.Background(), cfg, lr2) } } } -func BenchmarkSplittingBasedOnByteSizeManyLogs(b *testing.B) { - cfg := exporterbatcher.MaxSizeConfig{MaxSizeBytes: 1010} +func BenchmarkSplittingBasedOnByteSizeManySmallLogs(b *testing.B) { + cfg := exporterbatcher.MaxSizeConfig{MaxSizeBytes: 1010000} for i := 0; i < b.N; i++ { - lr1 := &logsRequest{ld: testdata.GenerateLogs(9)} + lr1 := &logsRequest{ld: testdata.GenerateLogs(10)} for j := 0; j < 1000; j++ { - lr2 := &logsRequest{ld: testdata.GenerateLogs(9)} + lr2 := &logsRequest{ld: testdata.GenerateLogs(10)} + lr1.MergeSplit(context.Background(), cfg, lr2) + } + } +} + +func BenchmarkSplittingBasedOnItemCountManyLogsSlightlyAboveLimit(b *testing.B) { + cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 10000} + for i := 0; i < b.N; i++ { + lr1 := &logsRequest{ld: testdata.GenerateLogs(10001)} + for j := 0; j < 10; j++ { + lr2 := &logsRequest{ld: testdata.GenerateLogs(10001)} + lr1.MergeSplit(context.Background(), cfg, lr2) + } + } +} + +func BenchmarkSplittingBasedOnByteSizeManyLogsSlightlyAboveLimit(b *testing.B) { + cfg := exporterbatcher.MaxSizeConfig{MaxSizeBytes: 960052} // 960052 corresponds to 10000 generated logs + for i := 0; i < b.N; i++ { + lr1 := &logsRequest{ld: testdata.GenerateLogs(10001)} + for j := 0; j < 10; j++ { + lr2 := &logsRequest{ld: testdata.GenerateLogs(10001)} + lr1.MergeSplit(context.Background(), cfg, lr2) + } + } +} + +func BenchmarkSplittingBasedOnItemCountManyLogsSlightlyBelowLimit(b *testing.B) { + cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 10000} + for i := 0; i < b.N; i++ { + lr1 := &logsRequest{ld: testdata.GenerateLogs(9999)} + for j := 0; j < 10; j++ { + lr2 := &logsRequest{ld: testdata.GenerateLogs(9999)} + lr1.MergeSplit(context.Background(), cfg, lr2) + } + } +} + +func BenchmarkSplittingBasedOnByteSizeManyLogsSlightlyBelowLimit(b *testing.B) { + cfg := exporterbatcher.MaxSizeConfig{MaxSizeBytes: 960052} // 960052 corresponds to 10000 generated logs + for i := 0; i < b.N; i++ { + lr1 := &logsRequest{ld: testdata.GenerateLogs(9999)} + for j := 0; j < 10; j++ { + lr2 := &logsRequest{ld: testdata.GenerateLogs(9999)} lr1.MergeSplit(context.Background(), cfg, lr2) } } } func BenchmarkSplittingBasedOnItemCountHugeLog(b *testing.B) { - cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 10} + cfg := exporterbatcher.MaxSizeConfig{MaxSizeItems: 10000} for i := 0; i < b.N; i++ { lr1 := &logsRequest{ld: testdata.GenerateLogs(1)} - lr2 := &logsRequest{ld: testdata.GenerateLogs(1000)} + lr2 := &logsRequest{ld: testdata.GenerateLogs(100000)} // l2 is of size 9.600054 MB lr1.MergeSplit(context.Background(), cfg, lr2) } } func BenchmarkSplittingBasedOnByteSizeHugeLog(b *testing.B) { - cfg := exporterbatcher.MaxSizeConfig{MaxSizeBytes: 1010} + cfg := exporterbatcher.MaxSizeConfig{MaxSizeBytes: 970000} for i := 0; i < b.N; i++ { lr1 := &logsRequest{ld: testdata.GenerateLogs(1)} - lr2 := &logsRequest{ld: testdata.GenerateLogs(1000)} + lr2 := &logsRequest{ld: testdata.GenerateLogs(100000)} lr1.MergeSplit(context.Background(), cfg, lr2) } }