From c4cd8d1188a069947d35706c1418bfceba0977e1 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Tue, 8 Oct 2024 11:56:16 +1100 Subject: [PATCH] MQE: only reduce resolution of delta in `rate` and `increase` once (#9531) * Only reduce resolution of delta once if schema changes multiple times in range * Add changelog entry --- CHANGELOG.md | 2 +- pkg/streamingpromql/functions/rate_increase.go | 16 ++++++++++------ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c5e5ebbaa..9f9e3ca55f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ * `cortex_alertmanager_alerts` * `cortex_alertmanager_silences` * [CHANGE] Cache: Deprecate experimental support for Redis as a cache backend. #9453 -* [FEATURE] Querier: add experimental streaming PromQL engine, enabled with `-querier.query-engine=mimir`. #9367 #9368 #9398 #9399 #9403 #9417 #9418 #9419 #9420 #9482 #9504 #9505 #9507 #9518 #9532 #9533 +* [FEATURE] Querier: add experimental streaming PromQL engine, enabled with `-querier.query-engine=mimir`. #9367 #9368 #9398 #9399 #9403 #9417 #9418 #9419 #9420 #9482 #9504 #9505 #9507 #9518 #9531 #9532 #9533 * [FEATURE] Query-frontend: added experimental configuration options `query-frontend.cache-errors` and `query-frontend.results-cache-ttl-for-errors` to allow non-transient responses to be cached. When set to `true` error responses from hitting limits or bad data are cached for a short TTL. #9028 * [FEATURE] gRPC: Support S2 compression. #9322 * `-alertmanager.alertmanager-client.grpc-compression=s2` diff --git a/pkg/streamingpromql/functions/rate_increase.go b/pkg/streamingpromql/functions/rate_increase.go index 3e79d41464..13465c62a4 100644 --- a/pkg/streamingpromql/functions/rate_increase.go +++ b/pkg/streamingpromql/functions/rate_increase.go @@ -84,9 +84,9 @@ func histogramRate(isRate bool, step types.RangeVectorStepData, hHead []promql.H emitAnnotation(annotations.NewNativeHistogramNotCounterWarning) } - initialSchema := firstPoint.H.Schema - if lastPoint.H.Schema < initialSchema { - initialSchema = lastPoint.H.Schema + desiredSchema := firstPoint.H.Schema + if lastPoint.H.Schema < desiredSchema { + desiredSchema = lastPoint.H.Schema } usingCustomBuckets := firstPoint.H.UsesCustomBuckets() @@ -94,7 +94,7 @@ func histogramRate(isRate bool, step types.RangeVectorStepData, hHead []promql.H return nil, histogram.ErrHistogramsIncompatibleSchema } - delta := lastPoint.H.CopyToSchema(initialSchema) + delta := lastPoint.H.CopyToSchema(desiredSchema) _, err := delta.Sub(firstPoint.H) if err != nil { return nil, err @@ -114,8 +114,8 @@ func histogramRate(isRate bool, step types.RangeVectorStepData, hHead []promql.H return histogram.ErrHistogramsIncompatibleSchema } - if p.H.Schema < delta.Schema { - delta = delta.CopyToSchema(p.H.Schema) + if p.H.Schema < desiredSchema { + desiredSchema = p.H.Schema } if p.H.CounterResetHint == histogram.GaugeType { @@ -138,6 +138,10 @@ func histogramRate(isRate bool, step types.RangeVectorStepData, hHead []promql.H } + if delta.Schema != desiredSchema { + delta = delta.CopyToSchema(desiredSchema) + } + val := calculateHistogramRate(isRate, step.RangeStart, step.RangeEnd, rangeSeconds, firstPoint, lastPoint, delta, hCount) return val, err }