From 557479504cc0c77b5119537a739113ee0215b850 Mon Sep 17 00:00:00 2001 From: "Grot (@grafanabot)" <43478413+grafanabot@users.noreply.github.com> Date: Mon, 23 Sep 2024 08:06:36 +0100 Subject: [PATCH 01/33] MQE: fix `rate()` over native histograms where first point in range is a counter reset (#9371) (#9372) * MQE: fix `rate()` over native histograms where first point is a counter reset * Add changelog entry (cherry picked from commit 20a3c74bb82962e93f46aecd3b7d8916c58e4aaf) Co-authored-by: Charles Korn --- CHANGELOG.md | 2 +- pkg/streamingpromql/functions/rate.go | 15 +++++++++++---- .../testdata/ours/native_histograms.test | 9 +++++++++ 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 96b7671bc54..c7f5c09814c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,7 +34,7 @@ * [CHANGE] Distributor: reject incoming requests until the distributor service has started. #9317 * [FEATURE] Alertmanager: Added `-alertmanager.log-parsing-label-matchers` to control logging when parsing label matchers. This flag is intended to be used with `-alertmanager.utf8-strict-mode-enabled` to validate UTF-8 strict mode is working as intended. The default value is `false`. #9173 * [FEATURE] Alertmanager: Added `-alertmanager.utf8-migration-logging-enabled` to enable logging of tenant configurations that are incompatible with UTF-8 strict mode. The default value is `false`. #9174 -* [FEATURE] Querier: add experimental streaming PromQL engine, enabled with `-querier.query-engine=mimir`. #8422 #8430 #8454 #8455 #8360 #8490 #8508 #8577 #8660 #8671 #8677 #8747 #8850 #8872 #8838 #8911 #8909 #8923 #8924 #8925 #8932 #8933 #8934 #8962 #8986 #8993 #8995 #9008 #9017 #9018 #9019 #9120 #9121 #9136 #9139 #9140 #9145 #9191 #9192 #9194 #9196 #9201 #9212 #9225 #9260 #9272 #9277 #9278 #9280 #9281 #9342 #9343 +* [FEATURE] Querier: add experimental streaming PromQL engine, enabled with `-querier.query-engine=mimir`. #8422 #8430 #8454 #8455 #8360 #8490 #8508 #8577 #8660 #8671 #8677 #8747 #8850 #8872 #8838 #8911 #8909 #8923 #8924 #8925 #8932 #8933 #8934 #8962 #8986 #8993 #8995 #9008 #9017 #9018 #9019 #9120 #9121 #9136 #9139 #9140 #9145 #9191 #9192 #9194 #9196 #9201 #9212 #9225 #9260 #9272 #9277 #9278 #9280 #9281 #9342 #9343 #9371 * [FEATURE] Experimental Kafka-based ingest storage. #6888 #6894 #6929 #6940 #6951 #6974 #6982 #7029 #7030 #7091 #7142 #7147 #7148 #7153 #7160 #7193 #7349 #7376 #7388 #7391 #7393 #7394 #7402 #7404 #7423 #7424 #7437 #7486 #7503 #7508 #7540 #7621 #7682 #7685 #7694 #7695 #7696 #7697 #7701 #7733 #7734 #7741 #7752 #7838 #7851 #7871 #7877 #7880 #7882 #7887 #7891 #7925 #7955 #7967 #8031 #8063 #8077 #8088 #8135 #8176 #8184 #8194 #8216 #8217 #8222 #8233 #8503 #8542 #8579 #8657 #8686 #8688 #8703 #8706 #8708 #8738 #8750 #8778 #8808 #8809 #8841 #8842 #8845 #8853 #8886 #8988 * What it is: * When the new ingest storage architecture is enabled, distributors write incoming write requests to a Kafka-compatible backend, and the ingesters asynchronously replay ingested data from Kafka. In this architecture, the write and read path are de-coupled through a Kafka-compatible backend. The write path and Kafka load is a function of the incoming write traffic, the read path load is a function of received queries. Whatever the load on the read path, it doesn't affect the write path. diff --git a/pkg/streamingpromql/functions/rate.go b/pkg/streamingpromql/functions/rate.go index 3352d84c0aa..10dce2f01ca 100644 --- a/pkg/streamingpromql/functions/rate.go +++ b/pkg/streamingpromql/functions/rate.go @@ -42,7 +42,7 @@ func rate(step types.RangeVectorStepData, rangeSeconds float64, floatBuffer *typ } if hCount >= 2 { - val, err := histogramRate(histogramBuffer, step, hHead, hTail, rangeSeconds, hCount, emitAnnotation) + val, err := histogramRate(step, hHead, hTail, rangeSeconds, hCount, emitAnnotation) if err != nil { err = NativeHistogramErrorToAnnotation(err, emitAnnotation) return 0, false, nil, err @@ -53,9 +53,15 @@ func rate(step types.RangeVectorStepData, rangeSeconds float64, floatBuffer *typ return 0, false, nil, nil } -func histogramRate(histogramBuffer *types.HPointRingBuffer, step types.RangeVectorStepData, hHead []promql.HPoint, hTail []promql.HPoint, rangeSeconds float64, hCount int, emitAnnotation EmitAnnotationFunc) (*histogram.FloatHistogram, error) { - firstPoint := histogramBuffer.First() - usingCustomBuckets := firstPoint.H.UsesCustomBuckets() +func histogramRate(step types.RangeVectorStepData, hHead []promql.HPoint, hTail []promql.HPoint, rangeSeconds float64, hCount int, emitAnnotation EmitAnnotationFunc) (*histogram.FloatHistogram, error) { + var firstPoint promql.HPoint + if len(hHead) > 0 { + firstPoint = hHead[0] + hHead = hHead[1:] + } else { + firstPoint = hTail[0] + hTail = hTail[1:] + } var lastPoint promql.HPoint if len(hTail) > 0 { @@ -73,6 +79,7 @@ func histogramRate(histogramBuffer *types.HPointRingBuffer, step types.RangeVect currentSchema = lastPoint.H.Schema } + usingCustomBuckets := firstPoint.H.UsesCustomBuckets() if lastPoint.H.UsesCustomBuckets() != usingCustomBuckets { return nil, histogram.ErrHistogramsIncompatibleSchema } diff --git a/pkg/streamingpromql/testdata/ours/native_histograms.test b/pkg/streamingpromql/testdata/ours/native_histograms.test index ce4be635e22..8e2c64b4e61 100644 --- a/pkg/streamingpromql/testdata/ours/native_histograms.test +++ b/pkg/streamingpromql/testdata/ours/native_histograms.test @@ -147,6 +147,15 @@ eval range from 0 to 4m step 1m sum by (env) (rate(incr_histogram[5m])) clear +# Test rate() with explicit counter reset information. +load 1m + metric {{sum:3 count:4 buckets:[1 2 1] counter_reset_hint:reset}} {{sum:63 count:124 buckets:[100 4 20] counter_reset_hint:not_reset}} + +eval instant at 1m rate(metric[1m]) + {} {{sum:1 count:2 buckets:[1.65 0.03333333333333333 0.31666666666666665] counter_reset_hint:gauge}} + +clear + # Test mixing exponential and custom buckets. load 6m metric{series="exponential"} {{sum:4 count:5 buckets:[1 3 1]}} _ {{sum:4 count:5 buckets:[1 3 1]}} {{sum:4 count:5 buckets:[1 3 1]}} _ From 2a82d019648bbbfe79f0e55e18a5e2b52b3f7067 Mon Sep 17 00:00:00 2001 From: "Grot (@grafanabot)" <43478413+grafanabot@users.noreply.github.com> Date: Thu, 26 Sep 2024 14:08:57 +0100 Subject: [PATCH 02/33] Clean up deprecated features for 2.14 (#9407) (#9426) * ingester, distributor: remove deprecated limit-inflight-requests-using-grpc-method-limiter Signed-off-by: Vladimir Varankin * querier: remove deprecated -querier.max-query-into-future Signed-off-by: Vladimir Varankin * update CHANGELOG Signed-off-by: Vladimir Varankin * rebuild assets Signed-off-by: Vladimir Varankin --------- Signed-off-by: Vladimir Varankin (cherry picked from commit 286c23cc6bc77bbcc20cd34df50003395a6ef7c8) Co-authored-by: Vladimir Varankin --- CHANGELOG.md | 2 + cmd/mimir/config-descriptor.json | 33 -- cmd/mimir/help-all.txt.tmpl | 6 - cmd/mimir/main_test.go | 6 +- .../configuration-parameters/index.md | 14 - pkg/distributor/distributor.go | 6 +- pkg/ingester/ingester.go | 5 +- pkg/ingester/ingester_test.go | 390 +++++++----------- pkg/mimir/modules.go | 26 +- pkg/querier/querier.go | 31 +- pkg/querier/querier_test.go | 66 ++- 11 files changed, 212 insertions(+), 373 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7f5c09814c..dc795b5d2e6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,8 @@ * [CHANGE] Querier: allow wrapping errors with context errors only when the former actually correspond to `context.Canceled` and `context.DeadlineExceeded`. #9175 * [CHANGE] Query-scheduler: Remove the experimental `-query-scheduler.use-multi-algorithm-query-queue` flag. The new multi-algorithm tree queue is always used for the scheduler. #9210 * [CHANGE] Distributor: reject incoming requests until the distributor service has started. #9317 +* [CHANGE] Ingester, Distributor: Remove deprecated `-ingester.limit-inflight-requests-using-grpc-method-limiter` and `-distributor.limit-inflight-requests-using-grpc-method-limiter`. The feature was deprecated and enabled by default in Mimir 2.12. #9407 +* [CHANGE] Querier: Remove deprecated `-querier.max-query-into-future`. The feature was deprecated in Mimir 2.12. #9407 * [FEATURE] Alertmanager: Added `-alertmanager.log-parsing-label-matchers` to control logging when parsing label matchers. This flag is intended to be used with `-alertmanager.utf8-strict-mode-enabled` to validate UTF-8 strict mode is working as intended. The default value is `false`. #9173 * [FEATURE] Alertmanager: Added `-alertmanager.utf8-migration-logging-enabled` to enable logging of tenant configurations that are incompatible with UTF-8 strict mode. The default value is `false`. #9174 * [FEATURE] Querier: add experimental streaming PromQL engine, enabled with `-querier.query-engine=mimir`. #8422 #8430 #8454 #8455 #8360 #8490 #8508 #8577 #8660 #8671 #8677 #8747 #8850 #8872 #8838 #8911 #8909 #8923 #8924 #8925 #8932 #8933 #8934 #8962 #8986 #8993 #8995 #9008 #9017 #9018 #9019 #9120 #9121 #9136 #9139 #9140 #9145 #9191 #9192 #9194 #9196 #9201 #9212 #9225 #9260 #9272 #9277 #9278 #9280 #9281 #9342 #9343 #9371 diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index a19108b5964..2702c82f07c 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -1680,17 +1680,6 @@ "fieldType": "boolean", "fieldCategory": "experimental" }, - { - "kind": "field", - "name": "limit_inflight_requests_using_grpc_method_limiter", - "required": false, - "desc": "When enabled, in-flight write requests limit is checked as soon as the gRPC request is received, before the request is decoded and parsed.", - "fieldValue": null, - "fieldDefaultValue": true, - "fieldFlag": "distributor.limit-inflight-requests-using-grpc-method-limiter", - "fieldType": "boolean", - "fieldCategory": "deprecated" - }, { "kind": "field", "name": "reusable_ingester_push_workers", @@ -1734,17 +1723,6 @@ "fieldType": "duration", "fieldCategory": "advanced" }, - { - "kind": "field", - "name": "max_query_into_future", - "required": false, - "desc": "Maximum duration into the future you can query. 0 to disable.", - "fieldValue": null, - "fieldDefaultValue": 600000000000, - "fieldFlag": "querier.max-query-into-future", - "fieldType": "duration", - "fieldCategory": "deprecated" - }, { "kind": "block", "name": "store_gateway_client", @@ -3425,17 +3403,6 @@ "fieldType": "boolean", "fieldCategory": "experimental" }, - { - "kind": "field", - "name": "limit_inflight_requests_using_grpc_method_limiter", - "required": false, - "desc": "When enabled, in-flight write requests limit is checked as soon as the gRPC request is received, before the request is decoded and parsed.", - "fieldValue": null, - "fieldDefaultValue": true, - "fieldFlag": "ingester.limit-inflight-requests-using-grpc-method-limiter", - "fieldType": "boolean", - "fieldCategory": "deprecated" - }, { "kind": "field", "name": "error_sample_rate", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index e6477b91821..2b0b1f28dfe 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -1227,8 +1227,6 @@ Usage of ./cmd/mimir/mimir: The sum of the request sizes in bytes of inflight push requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited. -distributor.instance-limits.max-ingestion-rate float Max ingestion rate (samples/sec) that this distributor will accept. This limit is per-distributor, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. 0 = unlimited. - -distributor.limit-inflight-requests-using-grpc-method-limiter - [deprecated] When enabled, in-flight write requests limit is checked as soon as the gRPC request is received, before the request is decoded and parsed. (default true) -distributor.max-exemplars-per-series-per-request int [experimental] Maximum number of exemplars per series per request. 0 to disable limit in request. The exceeding exemplars are dropped. -distributor.max-otlp-request-size int @@ -1455,8 +1453,6 @@ Usage of ./cmd/mimir/mimir: Max series that this ingester can hold (across all tenants). Requests to create additional series will be rejected. 0 = unlimited. -ingester.instance-limits.max-tenants int Max tenants that this ingester can hold. Requests from additional tenants will be rejected. 0 = unlimited. - -ingester.limit-inflight-requests-using-grpc-method-limiter - [deprecated] When enabled, in-flight write requests limit is checked as soon as the gRPC request is received, before the request is decoded and parsed. (default true) -ingester.log-utilization-based-limiter-cpu-samples [experimental] Enable logging of utilization based limiter CPU samples. -ingester.max-global-exemplars-per-user int @@ -1917,8 +1913,6 @@ Usage of ./cmd/mimir/mimir: Maximum number of outstanding requests per tenant per frontend; requests beyond this error with HTTP 429. (default 100) -querier.max-partial-query-length duration Limit the time range for partial queries at the querier level. - -querier.max-query-into-future duration - [deprecated] Maximum duration into the future you can query. 0 to disable. (default 10m0s) -querier.max-query-lookback duration Limit how long back data (series and metadata) can be queried, up until duration ago. This limit is enforced in the query-frontend, querier and ruler for instant, range and remote read queries. For metadata queries like series, label names, label values queries the limit is enforced in the querier and ruler. If the requested time range is outside the allowed range, the request will not fail but will be manipulated to only query data within the allowed time range. 0 to disable. -querier.max-query-parallelism int diff --git a/cmd/mimir/main_test.go b/cmd/mimir/main_test.go index 4b863f2be89..4de3d9f0e22 100644 --- a/cmd/mimir/main_test.go +++ b/cmd/mimir/main_test.go @@ -104,12 +104,12 @@ func TestFlagParsing(t *testing.T) { defaults := mimir.Config{} flagext.DefaultValues(&defaults) - require.NotZero(t, defaults.Querier.MaxQueryIntoFuture, - "This test asserts that mimir.Config.Querier.MaxQueryIntoFuture default value is not zero. "+ + require.NotZero(t, defaults.Querier.QueryStoreAfter, + "This test asserts that mimir.Config.Querier.QueryStoreAfter default value is not zero. "+ "If it's zero, this test is useless. Please change it to use a config value with a non-zero default.", ) - require.Equal(t, cfg.Querier.MaxQueryIntoFuture, defaults.Querier.MaxQueryIntoFuture, + require.Equal(t, cfg.Querier.QueryStoreAfter, defaults.Querier.QueryStoreAfter, "YAML parser has set the [entire] Querier config to zero values by specifying an empty node."+ "If this happens again, check git history on how this was checked with previous YAML parser implementation.") }, diff --git a/docs/sources/mimir/configure/configuration-parameters/index.md b/docs/sources/mimir/configure/configuration-parameters/index.md index 0a345b3ee47..01742a22316 100644 --- a/docs/sources/mimir/configure/configuration-parameters/index.md +++ b/docs/sources/mimir/configure/configuration-parameters/index.md @@ -948,11 +948,6 @@ instance_limits: # CLI flag: -distributor.write-requests-buffer-pooling-enabled [write_requests_buffer_pooling_enabled: | default = true] -# (deprecated) When enabled, in-flight write requests limit is checked as soon -# as the gRPC request is received, before the request is decoded and parsed. -# CLI flag: -distributor.limit-inflight-requests-using-grpc-method-limiter -[limit_inflight_requests_using_grpc_method_limiter: | default = true] - # (advanced) Number of pre-allocated workers used to forward push requests to # the ingesters. If 0, no workers will be used and a new goroutine will be # spawned for each ingester push request. If not enough workers available, new @@ -1251,11 +1246,6 @@ instance_limits: # CLI flag: -ingester.log-utilization-based-limiter-cpu-samples [log_utilization_based_limiter_cpu_samples: | default = false] -# (deprecated) When enabled, in-flight write requests limit is checked as soon -# as the gRPC request is received, before the request is decoded and parsed. -# CLI flag: -ingester.limit-inflight-requests-using-grpc-method-limiter -[limit_inflight_requests_using_grpc_method_limiter: | default = true] - # (advanced) Each error will be logged once in this many times. Use 0 to log all # of them. # CLI flag: -ingester.error-sample-rate @@ -1365,10 +1355,6 @@ The `querier` block configures the querier. # CLI flag: -querier.query-store-after [query_store_after: | default = 12h] -# (deprecated) Maximum duration into the future you can query. 0 to disable. -# CLI flag: -querier.max-query-into-future -[max_query_into_future: | default = 10m] - store_gateway_client: # (advanced) Enable TLS for gRPC client connecting to store-gateway. # CLI flag: -querier.store-gateway-client.tls-enabled diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 0302b6bd5ad..47657fde0f9 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -220,9 +220,8 @@ type Config struct { // These functions will only receive samples that don't get dropped by HA deduplication. PushWrappers []PushWrapper `yaml:"-"` - WriteRequestsBufferPoolingEnabled bool `yaml:"write_requests_buffer_pooling_enabled" category:"experimental"` - LimitInflightRequestsUsingGrpcMethodLimiter bool `yaml:"limit_inflight_requests_using_grpc_method_limiter" category:"deprecated"` // TODO Remove the configuration option in Mimir 2.14, keeping the same behavior as if it's enabled - ReusableIngesterPushWorkers int `yaml:"reusable_ingester_push_workers" category:"advanced"` + WriteRequestsBufferPoolingEnabled bool `yaml:"write_requests_buffer_pooling_enabled" category:"experimental"` + ReusableIngesterPushWorkers int `yaml:"reusable_ingester_push_workers" category:"advanced"` // DirectOTLPTranslationEnabled allows reverting to the older way of translating from OTLP write requests via Prometheus, in case of problems. DirectOTLPTranslationEnabled bool `yaml:"direct_otlp_translation_enabled" category:"experimental"` @@ -243,7 +242,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet, logger log.Logger) { f.IntVar(&cfg.MaxRequestPoolBufferSize, "distributor.max-request-pool-buffer-size", 0, "Max size of the pooled buffers used for marshaling write requests. If 0, no max size is enforced.") f.DurationVar(&cfg.RemoteTimeout, "distributor.remote-timeout", 2*time.Second, "Timeout for downstream ingesters.") f.BoolVar(&cfg.WriteRequestsBufferPoolingEnabled, "distributor.write-requests-buffer-pooling-enabled", true, "Enable pooling of buffers used for marshaling write requests.") - f.BoolVar(&cfg.LimitInflightRequestsUsingGrpcMethodLimiter, "distributor.limit-inflight-requests-using-grpc-method-limiter", true, "When enabled, in-flight write requests limit is checked as soon as the gRPC request is received, before the request is decoded and parsed.") f.IntVar(&cfg.ReusableIngesterPushWorkers, "distributor.reusable-ingester-push-workers", 2000, "Number of pre-allocated workers used to forward push requests to the ingesters. If 0, no workers will be used and a new goroutine will be spawned for each ingester push request. If not enough workers available, new goroutine will be spawned. (Note: this is a performance optimization, not a limiting feature.)") f.BoolVar(&cfg.DirectOTLPTranslationEnabled, "distributor.direct-otlp-translation-enabled", true, "When enabled, OTLP write requests are directly translated to Mimir equivalents, for optimum performance.") diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 14fffdb761e..93462209303 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -199,8 +199,6 @@ type Config struct { ReadPathMemoryUtilizationLimit uint64 `yaml:"read_path_memory_utilization_limit" category:"experimental"` LogUtilizationBasedLimiterCPUSamples bool `yaml:"log_utilization_based_limiter_cpu_samples" category:"experimental"` - LimitInflightRequestsUsingGrpcMethodLimiter bool `yaml:"limit_inflight_requests_using_grpc_method_limiter" category:"deprecated"` // TODO Remove the configuration option in Mimir 2.14, keeping the same behavior as if it's enabled. - ErrorSampleRate int64 `yaml:"error_sample_rate" json:"error_sample_rate" category:"advanced"` UseIngesterOwnedSeriesForLimits bool `yaml:"use_ingester_owned_series_for_limits" category:"experimental"` @@ -236,7 +234,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet, logger log.Logger) { f.Float64Var(&cfg.ReadPathCPUUtilizationLimit, "ingester.read-path-cpu-utilization-limit", 0, "CPU utilization limit, as CPU cores, for CPU/memory utilization based read request limiting. Use 0 to disable it.") f.Uint64Var(&cfg.ReadPathMemoryUtilizationLimit, "ingester.read-path-memory-utilization-limit", 0, "Memory limit, in bytes, for CPU/memory utilization based read request limiting. Use 0 to disable it.") f.BoolVar(&cfg.LogUtilizationBasedLimiterCPUSamples, "ingester.log-utilization-based-limiter-cpu-samples", false, "Enable logging of utilization based limiter CPU samples.") - f.BoolVar(&cfg.LimitInflightRequestsUsingGrpcMethodLimiter, "ingester.limit-inflight-requests-using-grpc-method-limiter", true, "When enabled, in-flight write requests limit is checked as soon as the gRPC request is received, before the request is decoded and parsed.") f.Int64Var(&cfg.ErrorSampleRate, "ingester.error-sample-rate", 10, "Each error will be logged once in this many times. Use 0 to log all of them.") f.BoolVar(&cfg.UseIngesterOwnedSeriesForLimits, "ingester.use-ingester-owned-series-for-limits", false, "When enabled, only series currently owned by ingester according to the ring are used when checking user per-tenant series limit.") f.BoolVar(&cfg.UpdateIngesterOwnedSeries, "ingester.track-ingester-owned-series", false, "This option enables tracking of ingester-owned series based on ring state, even if -ingester.use-ingester-owned-series-for-limits is disabled.") @@ -1088,7 +1085,7 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, req *mimirpb.WriteReques // Only start/finish request here when the request comes NOT from grpc handlers (i.e., from ingest.Store). // NOTE: request coming from grpc handler may end up calling start multiple times during its lifetime (e.g., when migrating to ingest storage). // startPushRequest handles this. - if i.cfg.IngestStorageConfig.Enabled || !i.cfg.LimitInflightRequestsUsingGrpcMethodLimiter { + if i.cfg.IngestStorageConfig.Enabled { reqSize := int64(req.Size()) var ( shouldFinish bool diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 737b0693af1..7b43b9af992 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -4612,50 +4612,39 @@ func TestIngester_LabelNames_ShouldNotCreateTSDBIfDoesNotExists(t *testing.T) { } func TestIngester_Push_ShouldNotCreateTSDBIngesterServiceIsNotInRunningState(t *testing.T) { - for _, grpcLimitEnabled := range []bool{false, true} { - t.Run(fmt.Sprintf("gRPC limit enabled: %t", grpcLimitEnabled), func(t *testing.T) { - cfg := defaultIngesterTestConfig(t) - cfg.LimitInflightRequestsUsingGrpcMethodLimiter = grpcLimitEnabled + cfg := defaultIngesterTestConfig(t) - // Configure the lifecycler to not immediately leave the ring, to make sure - // the ingester service will stay in Stopping state for longer. - cfg.IngesterRing.FinalSleep = 5 * time.Second + // Configure the lifecycler to not immediately leave the ring, to make sure + // the ingester service will stay in Stopping state for longer. + cfg.IngesterRing.FinalSleep = 5 * time.Second - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil, nil) - require.NoError(t, err) + i, err := prepareIngesterWithBlocksStorage(t, cfg, nil, nil) + require.NoError(t, err) - // Start the ingester and then stop it. - require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) - i.StopAsync() + // Start the ingester and then stop it. + require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) + i.StopAsync() - // Wait until the ingester service switches to Stopping state. - require.Eventually(t, func() bool { - return i.State() == services.Stopping - }, time.Second, 10*time.Millisecond) + // Wait until the ingester service switches to Stopping state. + require.Eventually(t, func() bool { + return i.State() == services.Stopping + }, time.Second, 10*time.Millisecond) - // Mock request - userID := "test" - ctx := user.InjectOrgID(context.Background(), userID) - req, _, _, _ := mockWriteRequest(t, labels.FromStrings(labels.MetricName, "test"), 0, 0) - - var res *mimirpb.WriteResponse + // Mock request + userID := "test" + ctx := user.InjectOrgID(context.Background(), userID) + req, _, _, _ := mockWriteRequest(t, labels.FromStrings(labels.MetricName, "test"), 0, 0) - if grpcLimitEnabled { - res, err = pushWithSimulatedGRPCHandler(ctx, i, req) - } else { - res, err = i.Push(ctx, req) - } + res, err := pushWithSimulatedGRPCHandler(ctx, i, req) - assert.EqualError(t, err, newUnavailableError(services.Stopping).Error()) - assert.Nil(t, res) + assert.EqualError(t, err, newUnavailableError(services.Stopping).Error()) + assert.Nil(t, res) - // Check if the TSDB has been created - assert.Nil(t, i.getTSDB(userID)) + // Check if the TSDB has been created + assert.Nil(t, i.getTSDB(userID)) - // Wait until terminated. - require.NoError(t, i.AwaitTerminated(context.Background())) - }) - } + // Wait until terminated. + require.NoError(t, i.AwaitTerminated(context.Background())) } func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { @@ -7883,87 +7872,75 @@ func TestIngester_PushInstanceLimits(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - for _, grpcLimiterEnabled := range []bool{false, true} { - t.Run(fmt.Sprintf("with gRPC limiter: %t", grpcLimiterEnabled), func(t *testing.T) { + // Create a mocked ingester + cfg := defaultIngesterTestConfig(t) + cfg.InstanceLimitsFn = func() *InstanceLimits { + return &testData.limits + } - // Create a mocked ingester - cfg := defaultIngesterTestConfig(t) - cfg.LimitInflightRequestsUsingGrpcMethodLimiter = grpcLimiterEnabled - cfg.InstanceLimitsFn = func() *InstanceLimits { - return &testData.limits - } + i, err := prepareIngesterWithBlocksStorage(t, cfg, nil, nil) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) + defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil, nil) - require.NoError(t, err) - require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) - defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck + // Wait until the ingester is healthy + test.Poll(t, 100*time.Millisecond, 1, func() interface{} { + return i.lifecycler.HealthyInstancesCount() + }) - // Wait until the ingester is healthy - test.Poll(t, 100*time.Millisecond, 1, func() interface{} { - return i.lifecycler.HealthyInstancesCount() - }) + // Iterate through users in sorted order (by username). + uids := []string{} + totalPushes := 0 + for uid, requests := range testData.reqs { + uids = append(uids, uid) + totalPushes += len(requests) + } + slices.Sort(uids) - // Iterate through users in sorted order (by username). - uids := []string{} - totalPushes := 0 - for uid, requests := range testData.reqs { - uids = append(uids, uid) - totalPushes += len(requests) - } - slices.Sort(uids) + pushIdx := 0 + for _, uid := range uids { + ctx := user.InjectOrgID(context.Background(), uid) - pushIdx := 0 - for _, uid := range uids { - ctx := user.InjectOrgID(context.Background(), uid) + for _, origReq := range testData.reqs[uid] { + pushIdx++ - for _, origReq := range testData.reqs[uid] { - pushIdx++ + // Clone the request so that it's safe to be sent multiple times. + reqData, marshalErr := origReq.Marshal() + require.NoError(t, marshalErr) + req := &mimirpb.WriteRequest{} + require.NoError(t, req.Unmarshal(reqData)) - // Clone the request so that it's safe to be sent multiple times. - reqData, marshalErr := origReq.Marshal() - require.NoError(t, marshalErr) - req := &mimirpb.WriteRequest{} - require.NoError(t, req.Unmarshal(reqData)) + // We simulate the sequence of calls done by the gRPC handler. + _, err := pushWithSimulatedGRPCHandler(ctx, i, req) - var err error + if pushIdx < totalPushes { + require.NoError(t, err) + } else { + // Last push may expect error. + if testData.expectedErr != nil { + assert.ErrorIs(t, err, testData.expectedErr) - // If gRPC limiter is enabled we simulate the sequence of calls done by the gRPC handler. - if grpcLimiterEnabled { - _, err = pushWithSimulatedGRPCHandler(ctx, i, req) - } else { - _, err = i.Push(ctx, req) + if testData.expectedOptionalLoggingErr { + var optional middleware.OptionalLogging + assert.ErrorAs(t, err, &optional) } - if pushIdx < totalPushes { - require.NoError(t, err) - } else { - // Last push may expect error. - if testData.expectedErr != nil { - assert.ErrorIs(t, err, testData.expectedErr) - - if testData.expectedOptionalLoggingErr { - var optional middleware.OptionalLogging - assert.ErrorAs(t, err, &optional) - } - - if testData.expectedGRPCErr { - s, ok := grpcutil.ErrorToStatus(err) - require.True(t, ok, "expected to be able to convert to gRPC status") - assert.Equal(t, codes.Unavailable, s.Code()) - } - } else { - assert.NoError(t, err) - } + if testData.expectedGRPCErr { + s, ok := grpcutil.ErrorToStatus(err) + require.True(t, ok, "expected to be able to convert to gRPC status") + assert.Equal(t, codes.Unavailable, s.Code()) } - - // imitate time ticking between each push - i.ingestionRate.Tick() - - rate := testutil.ToFloat64(i.metrics.ingestionRate) - require.NotZero(t, rate) + } else { + assert.NoError(t, err) } } - }) + + // imitate time ticking between each push + i.ingestionRate.Tick() + + rate := testutil.ToFloat64(i.metrics.ingestionRate) + require.NotZero(t, rate) + } } }) } @@ -8038,31 +8015,27 @@ func TestIngester_instanceLimitsMetrics(t *testing.T) { } func TestIngester_inflightPushRequests(t *testing.T) { - for _, grpcLimitEnabled := range []bool{false, true} { - t.Run(fmt.Sprintf("gRPC limit enabled: %t", grpcLimitEnabled), func(t *testing.T) { - limits := InstanceLimits{MaxInflightPushRequests: 1} + t.Run("with classic ingester", func(t *testing.T) { + limits := InstanceLimits{MaxInflightPushRequests: 1} - cfg := defaultIngesterTestConfig(t) - cfg.LimitInflightRequestsUsingGrpcMethodLimiter = grpcLimitEnabled - cfg.InstanceLimitsFn = func() *InstanceLimits { return &limits } + cfg := defaultIngesterTestConfig(t) + cfg.InstanceLimitsFn = func() *InstanceLimits { return &limits } - // Create a mocked ingester - reg := prometheus.NewPedanticRegistry() - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil, reg) - require.NoError(t, err) + // Create a mocked ingester + reg := prometheus.NewPedanticRegistry() + i, err := prepareIngesterWithBlocksStorage(t, cfg, nil, reg) + require.NoError(t, err) - testIngesterInflightPushRequests(t, i, reg, grpcLimitEnabled) - }) - } + testIngesterInflightPushRequests(t, i, reg) + }) - t.Run("gRPC limit enabled with ingest storage enabled", func(t *testing.T) { + t.Run("with ingest storage enabled", func(t *testing.T) { limits := InstanceLimits{MaxInflightPushRequests: 1} overrides, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) cfg := defaultIngesterTestConfig(t) - cfg.LimitInflightRequestsUsingGrpcMethodLimiter = true cfg.InstanceLimitsFn = func() *InstanceLimits { return &limits } reg := prometheus.NewPedanticRegistry() @@ -8071,11 +8044,11 @@ func TestIngester_inflightPushRequests(t *testing.T) { // Re-enable push gRPC method to simulate migration period, when ingester can receive requests from gRPC i.cfg.PushGrpcMethodEnabled = true - testIngesterInflightPushRequests(t, i, reg, cfg.LimitInflightRequestsUsingGrpcMethodLimiter) + testIngesterInflightPushRequests(t, i, reg) }) } -func testIngesterInflightPushRequests(t *testing.T, i *Ingester, reg prometheus.Gatherer, grpcLimitEnabled bool) { +func testIngesterInflightPushRequests(t *testing.T, i *Ingester, reg prometheus.Gatherer) { require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) t.Cleanup(func() { services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -8099,14 +8072,7 @@ func testIngesterInflightPushRequests(t *testing.T, i *Ingester, reg prometheus. // Signal that we're going to do the real push now. close(startCh) - var err error - - if grpcLimitEnabled { - _, err = pushWithSimulatedGRPCHandler(ctx, i, req) - } else { - _, err = i.Push(ctx, req) - } - + _, err := pushWithSimulatedGRPCHandler(ctx, i, req) return err }) @@ -8124,23 +8090,8 @@ func testIngesterInflightPushRequests(t *testing.T, i *Ingester, reg prometheus. return i.inflightPushRequests.Load() }) - if grpcLimitEnabled { - _, err := pushWithSimulatedGRPCHandler(ctx, i, req) - require.ErrorIs(t, err, errMaxInflightRequestsReached) - } else { - _, err := i.Push(ctx, req) - require.ErrorIs(t, err, errMaxInflightRequestsReached) - - var optional middleware.OptionalLogging - require.ErrorAs(t, err, &optional) - - shouldLog, _ := optional.ShouldLog(ctx) - require.False(t, shouldLog, "expected not to log via .ShouldLog()") - - s, ok := grpcutil.ErrorToStatus(err) - require.True(t, ok, "expected to be able to convert to gRPC status") - require.Equal(t, codes.Unavailable, s.Code()) - } + _, err := pushWithSimulatedGRPCHandler(ctx, i, req) + require.ErrorIs(t, err, errMaxInflightRequestsReached) return nil }) @@ -8163,117 +8114,94 @@ func testIngesterInflightPushRequests(t *testing.T, i *Ingester, reg prometheus. } func TestIngester_inflightPushRequestsBytes(t *testing.T) { - for _, grpcLimitEnabled := range []bool{false, true} { - t.Run(fmt.Sprintf("gRPC limit enabled: %t", grpcLimitEnabled), func(t *testing.T) { - var limitsMx sync.Mutex - limits := InstanceLimits{MaxInflightPushRequestsBytes: 0} + var limitsMx sync.Mutex + limits := InstanceLimits{MaxInflightPushRequestsBytes: 0} - // Create a mocked ingester - cfg := defaultIngesterTestConfig(t) - cfg.LimitInflightRequestsUsingGrpcMethodLimiter = grpcLimitEnabled - cfg.InstanceLimitsFn = func() *InstanceLimits { - limitsMx.Lock() - defer limitsMx.Unlock() + // Create a mocked ingester + cfg := defaultIngesterTestConfig(t) + cfg.InstanceLimitsFn = func() *InstanceLimits { + limitsMx.Lock() + defer limitsMx.Unlock() - // Make a copy - il := limits - return &il - } + // Make a copy + il := limits + return &il + } - reg := prometheus.NewPedanticRegistry() - i, err := prepareIngesterWithBlocksStorage(t, cfg, nil, reg) - require.NoError(t, err) - require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) - defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck + reg := prometheus.NewPedanticRegistry() + i, err := prepareIngesterWithBlocksStorage(t, cfg, nil, reg) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) + defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck - // Wait until the ingester is healthy - test.Poll(t, 100*time.Millisecond, 1, func() interface{} { - return i.lifecycler.HealthyInstancesCount() - }) + // Wait until the ingester is healthy + test.Poll(t, 100*time.Millisecond, 1, func() interface{} { + return i.lifecycler.HealthyInstancesCount() + }) - ctx := user.InjectOrgID(context.Background(), "test") + ctx := user.InjectOrgID(context.Background(), "test") - startCh := make(chan int) + startCh := make(chan int) - const targetRequestDuration = time.Second + const targetRequestDuration = time.Second - g, ctx := errgroup.WithContext(ctx) - g.Go(func() error { - req := prepareRequestForTargetRequestDuration(ctx, t, i, targetRequestDuration) + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { + req := prepareRequestForTargetRequestDuration(ctx, t, i, targetRequestDuration) - // Update instance limits. Set limit to EXACTLY the request size. - limitsMx.Lock() - limits.MaxInflightPushRequestsBytes = int64(req.Size()) - limitsMx.Unlock() + // Update instance limits. Set limit to EXACTLY the request size. + limitsMx.Lock() + limits.MaxInflightPushRequestsBytes = int64(req.Size()) + limitsMx.Unlock() - // Signal that we're going to do the real push now. - startCh <- req.Size() - close(startCh) + // Signal that we're going to do the real push now. + startCh <- req.Size() + close(startCh) - var err error - if grpcLimitEnabled { - _, err = pushWithSimulatedGRPCHandler(ctx, i, req) - } else { - _, err = i.Push(ctx, req) - } - return err - }) + _, err := pushWithSimulatedGRPCHandler(ctx, i, req) + return err + }) - g.Go(func() error { - req := generateSamplesForLabel(labels.FromStrings(labels.MetricName, "testcase1"), 1, 1024) + g.Go(func() error { + req := generateSamplesForLabel(labels.FromStrings(labels.MetricName, "testcase1"), 1, 1024) - var requestSize int - select { - case <-ctx.Done(): - // failed to setup - case requestSize = <-startCh: - // we can start the test. - } + var requestSize int + select { + case <-ctx.Done(): + // failed to setup + case requestSize = <-startCh: + // we can start the test. + } - test.Poll(t, targetRequestDuration/3, int64(1), func() interface{} { - return i.inflightPushRequests.Load() - }) + test.Poll(t, targetRequestDuration/3, int64(1), func() interface{} { + return i.inflightPushRequests.Load() + }) - require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(fmt.Sprintf(` + require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(fmt.Sprintf(` # HELP cortex_ingester_inflight_push_requests_bytes Total sum of inflight push request sizes in ingester in bytes. # TYPE cortex_ingester_inflight_push_requests_bytes gauge cortex_ingester_inflight_push_requests_bytes %d `, requestSize)), "cortex_ingester_inflight_push_requests_bytes")) - // Starting push request fails - _, err = i.StartPushRequest(ctx, 100) - require.ErrorIs(t, err, errMaxInflightRequestsBytesReached) - - // Starting push request with unknown size fails - _, err = i.StartPushRequest(ctx, 0) - require.ErrorIs(t, err, errMaxInflightRequestsBytesReached) - - // Sending push request fails - if grpcLimitEnabled { - _, err := pushWithSimulatedGRPCHandler(ctx, i, req) - require.ErrorIs(t, err, errMaxInflightRequestsBytesReached) - } else { - _, err := i.Push(ctx, req) - require.ErrorIs(t, err, errMaxInflightRequestsBytesReached) + // Starting push request fails + _, err = i.StartPushRequest(ctx, 100) + require.ErrorIs(t, err, errMaxInflightRequestsBytesReached) - var optional middleware.OptionalLogging - require.ErrorAs(t, err, &optional) + // Starting push request with unknown size fails + _, err = i.StartPushRequest(ctx, 0) + require.ErrorIs(t, err, errMaxInflightRequestsBytesReached) - shouldLog, _ := optional.ShouldLog(ctx) - require.False(t, shouldLog, "expected not to log via .ShouldLog()") - - s, ok := grpcutil.ErrorToStatus(err) - require.True(t, ok, "expected to be able to convert to gRPC status") - require.Equal(t, codes.Unavailable, s.Code()) - } + // Sending push request fails + _, err := pushWithSimulatedGRPCHandler(ctx, i, req) + require.ErrorIs(t, err, errMaxInflightRequestsBytesReached) - return nil - }) + return nil + }) - require.NoError(t, g.Wait()) + require.NoError(t, g.Wait()) - // Ensure the rejected request has been tracked in a metric. - require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` + // Ensure the rejected request has been tracked in a metric. + require.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(` # HELP cortex_ingester_instance_rejected_requests_total Requests rejected for hitting per-instance limits # TYPE cortex_ingester_instance_rejected_requests_total counter cortex_ingester_instance_rejected_requests_total{reason="ingester_max_inflight_push_requests"} 0 @@ -8282,8 +8210,6 @@ func TestIngester_inflightPushRequestsBytes(t *testing.T) { cortex_ingester_instance_rejected_requests_total{reason="ingester_max_series"} 0 cortex_ingester_instance_rejected_requests_total{reason="ingester_max_tenants"} 0 `), "cortex_ingester_instance_rejected_requests_total")) - }) - } } func prepareRequestForTargetRequestDuration(ctx context.Context, t *testing.T, i *Ingester, targetRequestDuration time.Duration) *mimirpb.WriteRequest { diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index 6f92103496c..d5d90b8648c 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -292,26 +292,20 @@ func (t *Mimir) initServer() (services.Service, error) { // t.Ingester or t.Distributor will be available. There's no race condition here, because gRPC server (service returned by this method, ie. initServer) // is started only after t.Ingester and t.Distributor are set in initIngester or initDistributorService. - var ingFn func() pushReceiver - if t.Cfg.Ingester.LimitInflightRequestsUsingGrpcMethodLimiter { - ingFn = func() pushReceiver { - // Return explicit nil, if there's no ingester. We don't want to return typed-nil as interface value. - if t.Ingester == nil { - return nil - } - return t.Ingester + ingFn := func() pushReceiver { + // Return explicit nil if there's no ingester. We don't want to return typed-nil as interface value. + if t.Ingester == nil { + return nil } + return t.Ingester } - var distFn func() pushReceiver - if t.Cfg.Distributor.LimitInflightRequestsUsingGrpcMethodLimiter { - distFn = func() pushReceiver { - // Return explicit nil, if there's no distributor. We don't want to return typed-nil as interface value. - if t.Distributor == nil { - return nil - } - return t.Distributor + distFn := func() pushReceiver { + // Return explicit nil if there's no distributor. We don't want to return typed-nil as interface value. + if t.Distributor == nil { + return nil } + return t.Distributor } // Installing this allows us to reject push requests received via gRPC early -- before they are fully read into memory. diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 795e6b9bb24..94bba4de56b 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -44,8 +44,6 @@ import ( type Config struct { // QueryStoreAfter the time after which queries should also be sent to the store and not just ingesters. QueryStoreAfter time.Duration `yaml:"query_store_after" category:"advanced"` - // Deprecated in Mimir 2.12, remove in Mimir 2.14 - MaxQueryIntoFuture time.Duration `yaml:"max_query_into_future" category:"deprecated"` StoreGatewayClient ClientConfig `yaml:"store_gateway_client"` @@ -74,7 +72,6 @@ const ( func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.StoreGatewayClient.RegisterFlagsWithPrefix("querier.store-gateway-client", f) - f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") f.DurationVar(&cfg.QueryStoreAfter, queryStoreAfterFlag, 12*time.Hour, "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. If this option is enabled, the time range of the query sent to the store-gateway will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") f.BoolVar(&cfg.ShuffleShardingIngestersEnabled, "querier.shuffle-sharding-ingesters-enabled", true, fmt.Sprintf("Fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since -%s. If this setting is false or -%s is '0', queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).", validation.QueryIngestersWithinFlag, validation.QueryIngestersWithinFlag)) f.StringVar(&cfg.PreferAvailabilityZone, "querier.prefer-availability-zone", "", "Preferred availability zone to query ingesters from when using the ingest storage.") @@ -218,15 +215,14 @@ func newQueryable( ) storage.Queryable { return storage.QueryableFunc(func(minT, maxT int64) (storage.Querier, error) { return multiQuerier{ - distributor: distributor, - blockStore: blockStore, - queryMetrics: queryMetrics, - cfg: cfg, - minT: minT, - maxT: maxT, - maxQueryIntoFuture: cfg.MaxQueryIntoFuture, - limits: limits, - logger: logger, + distributor: distributor, + blockStore: blockStore, + queryMetrics: queryMetrics, + cfg: cfg, + minT: minT, + maxT: maxT, + limits: limits, + logger: logger, }, nil }) @@ -240,8 +236,7 @@ type multiQuerier struct { cfg Config minT, maxT int64 - maxQueryIntoFuture time.Duration - limits *validation.Overrides + limits *validation.Overrides logger log.Logger } @@ -262,7 +257,7 @@ func (mq multiQuerier) getQueriers(ctx context.Context) (context.Context, []stor mq.queryMetrics, )) - mq.minT, mq.maxT, err = validateQueryTimeRange(tenantID, mq.minT, mq.maxT, now.UnixMilli(), mq.limits, mq.cfg.MaxQueryIntoFuture, spanlogger.FromContext(ctx, mq.logger)) + mq.minT, mq.maxT, err = validateQueryTimeRange(tenantID, mq.minT, mq.maxT, now.UnixMilli(), mq.limits, spanlogger.FromContext(ctx, mq.logger)) if err != nil { return nil, nil, err } @@ -332,7 +327,7 @@ func (mq multiQuerier) Select(ctx context.Context, _ bool, sp *storage.SelectHin // Validate query time range. Even if the time range has already been validated when we created // the querier, we need to check it again here because the time range specified in hints may be // different. - startMs, endMs, err := validateQueryTimeRange(userID, sp.Start, sp.End, now.UnixMilli(), mq.limits, mq.maxQueryIntoFuture, spanLog) + startMs, endMs, err := validateQueryTimeRange(userID, sp.Start, sp.End, now.UnixMilli(), mq.limits, spanLog) if errors.Is(err, errEmptyTimeRange) { return storage.NoopSeriesSet() } else if err != nil { @@ -586,9 +581,7 @@ func (s *sliceSeriesSet) Warnings() annotations.Annotations { return nil } -func validateQueryTimeRange(userID string, startMs, endMs, now int64, limits *validation.Overrides, maxQueryIntoFuture time.Duration, spanLog *spanlogger.SpanLogger) (int64, int64, error) { - endMs = clampMaxTime(spanLog, endMs, now, maxQueryIntoFuture, "max query into future") - +func validateQueryTimeRange(userID string, startMs, endMs, now int64, limits *validation.Overrides, spanLog *spanlogger.SpanLogger) (int64, int64, error) { maxQueryLookback := limits.MaxQueryLookback(userID) startMs = clampMinTime(spanLog, startMs, now, -maxQueryLookback, "max query lookback") diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 27aedb68b40..17667195e11 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -642,45 +642,34 @@ func TestQuerier_QueryIngestersWithinConfig(t *testing.T) { } } -func TestQuerier_ValidateQueryTimeRange_MaxQueryIntoFuture(t *testing.T) { +func TestQuerier_ValidateQueryTimeRange(t *testing.T) { const engineLookbackDelta = 5 * time.Minute now := time.Now() tests := map[string]struct { - maxQueryIntoFuture time.Duration - queryStartTime time.Time - queryEndTime time.Time - expectedSkipped bool - expectedStartTime time.Time - expectedEndTime time.Time + queryStartTime time.Time + queryEndTime time.Time + expectedStartTime time.Time + expectedEndTime time.Time }{ "should manipulate query if end time is after the limit": { - maxQueryIntoFuture: 10 * time.Minute, - queryStartTime: now.Add(-5 * time.Hour), - queryEndTime: now.Add(1 * time.Hour), - expectedStartTime: now.Add(-5 * time.Hour).Add(-engineLookbackDelta), - expectedEndTime: now.Add(10 * time.Minute), + queryStartTime: now.Add(-5 * time.Hour), + queryEndTime: now.Add(1 * time.Hour), + expectedStartTime: now.Add(-5 * time.Hour).Add(-engineLookbackDelta), + expectedEndTime: now.Add(1 * time.Hour), }, - "should not manipulate query if end time is far in the future but limit is disabled": { - maxQueryIntoFuture: 0, - queryStartTime: now.Add(-5 * time.Hour), - queryEndTime: now.Add(100 * time.Hour), - expectedStartTime: now.Add(-5 * time.Hour).Add(-engineLookbackDelta), - expectedEndTime: now.Add(100 * time.Hour), + "should not manipulate query if end time is far in the future": { + queryStartTime: now.Add(-5 * time.Hour), + queryEndTime: now.Add(100 * time.Hour), + expectedStartTime: now.Add(-5 * time.Hour).Add(-engineLookbackDelta), + expectedEndTime: now.Add(100 * time.Hour), }, - "should not manipulate query if end time is in the future but below the limit": { - maxQueryIntoFuture: 10 * time.Minute, - queryStartTime: now.Add(-100 * time.Minute), - queryEndTime: now.Add(5 * time.Minute), - expectedStartTime: now.Add(-100 * time.Minute).Add(-engineLookbackDelta), - expectedEndTime: now.Add(5 * time.Minute), - }, - "should skip executing a query outside the allowed time range": { - maxQueryIntoFuture: 10 * time.Minute, - queryStartTime: now.Add(50 * time.Minute), - queryEndTime: now.Add(60 * time.Minute), - expectedSkipped: true, + "should manipulate query if start time is far in the future": { + queryStartTime: now.Add(50 * time.Minute), + queryEndTime: now.Add(60 * time.Minute), + expectedStartTime: now.Add(50 * time.Minute).Add(-engineLookbackDelta), + expectedEndTime: now.Add(60 * time.Minute), }, } @@ -695,7 +684,6 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryIntoFuture(t *testing.T) { flagext.DefaultValues(&cfg) for name, c := range tests { - cfg.MaxQueryIntoFuture = c.maxQueryIntoFuture t.Run(name, func(t *testing.T) { // We don't need to query any data for this test, so an empty store is fine. distributor := &mockDistributor{} @@ -718,16 +706,11 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryIntoFuture(t *testing.T) { _, err = r.Matrix() require.Nil(t, err) - if !c.expectedSkipped { - // Assert on the time range of the actual executed query (5s delta). - delta := float64(5000) - require.Len(t, distributor.Calls, 1) - assert.InDelta(t, util.TimeToMillis(c.expectedStartTime), int64(distributor.Calls[0].Arguments.Get(2).(model.Time)), delta) - assert.InDelta(t, util.TimeToMillis(c.expectedEndTime), int64(distributor.Calls[0].Arguments.Get(3).(model.Time)), delta) - } else { - // Ensure no query has been executed (because skipped). - assert.Len(t, distributor.Calls, 0) - } + // Assert on the time range of the actual executed query (5s delta). + delta := float64(5000) + require.Len(t, distributor.Calls, 1) + assert.InDelta(t, util.TimeToMillis(c.expectedStartTime), int64(distributor.Calls[0].Arguments.Get(2).(model.Time)), delta) + assert.InDelta(t, util.TimeToMillis(c.expectedEndTime), int64(distributor.Calls[0].Arguments.Get(3).(model.Time)), delta) }) } } @@ -1080,7 +1063,6 @@ func TestQuerier_ValidateQueryTimeRange_MaxLabelsQueryRange(t *testing.T) { var cfg Config flagext.DefaultValues(&cfg) - cfg.MaxQueryIntoFuture = 0 limits := defaultLimitsConfig() limits.MaxQueryLookback = model.Duration(thirtyDays * 2) From 6babbbc1b067bb882d947d3b8fb778b16136bf4d Mon Sep 17 00:00:00 2001 From: Vladimir Varankin Date: Thu, 26 Sep 2024 17:51:49 +0200 Subject: [PATCH 03/33] 2.14-rc.0: prepare version (#9427) * cut v2.14 changelog Signed-off-by: Vladimir Varankin * update version Signed-off-by: Vladimir Varankin --------- Signed-off-by: Vladimir Varankin --- CHANGELOG.md | 2 +- VERSION | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dc795b5d2e6..66e7d72c88a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## main / unreleased +## v2.14.0-rc.0 ### Grafana Mimir diff --git a/VERSION b/VERSION index fb2c0766b7c..b84d379ff65 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.13.0 +2.14.0-rc.0 From c2d4a8584cfafcdcab23f6eff3ddf57a2a57b72f Mon Sep 17 00:00:00 2001 From: "Grot (@grafanabot)" <43478413+grafanabot@users.noreply.github.com> Date: Fri, 27 Sep 2024 01:48:24 -0600 Subject: [PATCH 04/33] query-tee: fix behaviour of `-proxy.compare-skip-recent-samples` for long-running queries (#9416) (#9443) * query-tee: fix behaviour of `-proxy.compare-skip-recent-samples` for long-running queries * Add changelog entry (cherry picked from commit 3509c46e41f6a72b57b82285c522871680010658) Co-authored-by: Charles Korn --- CHANGELOG.md | 1 + tools/querytee/proxy_endpoint.go | 25 ++--- tools/querytee/proxy_endpoint_test.go | 2 +- tools/querytee/proxy_test.go | 2 +- tools/querytee/response_comparator.go | 103 ++++++++++++--------- tools/querytee/response_comparator_test.go | 15 +-- 6 files changed, 83 insertions(+), 65 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 66e7d72c88a..2ccbc279f18 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -218,6 +218,7 @@ * [ENHANCEMENT] Optionally consider equivalent error messages the same when comparing responses. Enabled by default, disable with `-proxy.require-exact-error-match=true`. #9143 #9350 * [BUGFIX] Ensure any errors encountered while forwarding a request to a backend (eg. DNS resolution failures) are logged. #8419 * [BUGFIX] The comparison of the results should not fail when either side contains extra samples from within SkipRecentSamples duration. #8920 +* [BUGFIX] When `-proxy.compare-skip-recent-samples` is enabled, compare sample timestamps with the time the query requests were made, rather than the time at which the comparison is occurring. #9416 ### Documentation diff --git a/tools/querytee/proxy_endpoint.go b/tools/querytee/proxy_endpoint.go index 51ddecb1740..f49f0374c96 100644 --- a/tools/querytee/proxy_endpoint.go +++ b/tools/querytee/proxy_endpoint.go @@ -24,7 +24,7 @@ import ( ) type ResponsesComparator interface { - Compare(expected, actual []byte) (ComparisonResult, error) + Compare(expected, actual []byte, queryEvaluationTime time.Time) (ComparisonResult, error) } type ProxyEndpoint struct { @@ -102,14 +102,15 @@ func (p *ProxyEndpoint) selectBackends() []ProxyBackendInterface { func (p *ProxyEndpoint) executeBackendRequests(req *http.Request, backends []ProxyBackendInterface, resCh chan *backendResponse) { var ( - wg = sync.WaitGroup{} - err error - body []byte - responses = make([]*backendResponse, 0, len(backends)) - responsesMtx = sync.Mutex{} - timingMtx = sync.Mutex{} - query = req.URL.RawQuery - logger, ctx = spanlogger.NewWithLogger(req.Context(), p.logger, "Incoming proxied request") + wg = sync.WaitGroup{} + err error + body []byte + responses = make([]*backendResponse, 0, len(backends)) + responsesMtx = sync.Mutex{} + timingMtx = sync.Mutex{} + query = req.URL.RawQuery + logger, ctx = spanlogger.NewWithLogger(req.Context(), p.logger, "Incoming proxied request") + evaluationTime = time.Now() ) defer logger.Finish() @@ -258,7 +259,7 @@ func (p *ProxyEndpoint) executeBackendRequests(req *http.Request, backends []Pro expectedResponse, actualResponse = actualResponse, expectedResponse } - result, err := p.compareResponses(expectedResponse, actualResponse) + result, err := p.compareResponses(expectedResponse, actualResponse, evaluationTime) if result == ComparisonFailed { level.Error(logger).Log( "msg", "response comparison failed", @@ -314,7 +315,7 @@ func (p *ProxyEndpoint) waitBackendResponseForDownstream(resCh chan *backendResp return firstResponse } -func (p *ProxyEndpoint) compareResponses(expectedResponse, actualResponse *backendResponse) (ComparisonResult, error) { +func (p *ProxyEndpoint) compareResponses(expectedResponse, actualResponse *backendResponse, queryEvaluationTime time.Time) (ComparisonResult, error) { if expectedResponse.err != nil { return ComparisonFailed, fmt.Errorf("skipped comparison of response because the request to the preferred backend failed: %w", expectedResponse.err) } @@ -335,7 +336,7 @@ func (p *ProxyEndpoint) compareResponses(expectedResponse, actualResponse *backe return ComparisonSkipped, fmt.Errorf("skipped comparison of response because the response from the secondary backend contained an unexpected content type '%s', expected 'application/json'", actualResponse.contentType) } - return p.comparator.Compare(expectedResponse.body, actualResponse.body) + return p.comparator.Compare(expectedResponse.body, actualResponse.body, queryEvaluationTime) } type backendResponse struct { diff --git a/tools/querytee/proxy_endpoint_test.go b/tools/querytee/proxy_endpoint_test.go index 86f59834ee9..2658f171d51 100644 --- a/tools/querytee/proxy_endpoint_test.go +++ b/tools/querytee/proxy_endpoint_test.go @@ -704,7 +704,7 @@ type mockComparator struct { comparisonError error } -func (m *mockComparator) Compare(_, _ []byte) (ComparisonResult, error) { +func (m *mockComparator) Compare(_, _ []byte, _ time.Time) (ComparisonResult, error) { return m.comparisonResult, m.comparisonError } diff --git a/tools/querytee/proxy_test.go b/tools/querytee/proxy_test.go index e0ead99df9a..85b04e121c8 100644 --- a/tools/querytee/proxy_test.go +++ b/tools/querytee/proxy_test.go @@ -47,7 +47,7 @@ var testRoutes = []Route{ type testComparator struct{} -func (testComparator) Compare(_, _ []byte) (ComparisonResult, error) { +func (testComparator) Compare(_, _ []byte, _ time.Time) (ComparisonResult, error) { return ComparisonSuccess, nil } diff --git a/tools/querytee/response_comparator.go b/tools/querytee/response_comparator.go index 302554d7155..d92026b359c 100644 --- a/tools/querytee/response_comparator.go +++ b/tools/querytee/response_comparator.go @@ -22,7 +22,7 @@ import ( ) // SamplesComparatorFunc helps with comparing different types of samples coming from /api/v1/query and /api/v1/query_range routes. -type SamplesComparatorFunc func(expected, actual json.RawMessage, opts SampleComparisonOptions) error +type SamplesComparatorFunc func(expected, actual json.RawMessage, queryEvaluationTime time.Time, opts SampleComparisonOptions) error type SamplesResponse struct { Status string @@ -64,7 +64,7 @@ func (s *SamplesComparator) RegisterSamplesType(samplesType string, comparator S s.sampleTypesComparator[samplesType] = comparator } -func (s *SamplesComparator) Compare(expectedResponse, actualResponse []byte) (ComparisonResult, error) { +func (s *SamplesComparator) Compare(expectedResponse, actualResponse []byte, queryEvaluationTime time.Time) (ComparisonResult, error) { var expected, actual SamplesResponse err := json.Unmarshal(expectedResponse, &expected) @@ -102,7 +102,7 @@ func (s *SamplesComparator) Compare(expectedResponse, actualResponse []byte) (Co return ComparisonFailed, fmt.Errorf("resultType %s not registered for comparison", expected.Data.ResultType) } - if err := comparator(expected.Data.Result, actual.Data.Result, s.opts); err != nil { + if err := comparator(expected.Data.Result, actual.Data.Result, queryEvaluationTime, s.opts); err != nil { return ComparisonFailed, err } @@ -201,7 +201,7 @@ func formatAnnotationsForErrorMessage(warnings []string) string { return "[" + strings.Join(formatted, ", ") + "]" } -func compareMatrix(expectedRaw, actualRaw json.RawMessage, opts SampleComparisonOptions) error { +func compareMatrix(expectedRaw, actualRaw json.RawMessage, queryEvaluationTime time.Time, opts SampleComparisonOptions) error { var expected, actual model.Matrix err := json.Unmarshal(expectedRaw, &expected) @@ -213,7 +213,7 @@ func compareMatrix(expectedRaw, actualRaw json.RawMessage, opts SampleComparison return err } - if allMatrixSamplesWithinRecentSampleWindow(expected, opts) && allMatrixSamplesWithinRecentSampleWindow(actual, opts) { + if allMatrixSamplesWithinRecentSampleWindow(expected, queryEvaluationTime, opts) && allMatrixSamplesWithinRecentSampleWindow(actual, queryEvaluationTime, opts) { return nil } @@ -233,7 +233,7 @@ func compareMatrix(expectedRaw, actualRaw json.RawMessage, opts SampleComparison } actualMetric := actual[actualMetricIndex] - err := compareMatrixSamples(expectedMetric, actualMetric, opts) + err := compareMatrixSamples(expectedMetric, actualMetric, queryEvaluationTime, opts) if err != nil { return fmt.Errorf("%w\nExpected result for series:\n%v\n\nActual result for series:\n%v", err, expectedMetric, actualMetric) } @@ -242,9 +242,9 @@ func compareMatrix(expectedRaw, actualRaw json.RawMessage, opts SampleComparison return nil } -func compareMatrixSamples(expected, actual *model.SampleStream, opts SampleComparisonOptions) error { +func compareMatrixSamples(expected, actual *model.SampleStream, queryEvaluationTime time.Time, opts SampleComparisonOptions) error { expectedSamplesTail, actualSamplesTail, err := comparePairs(expected.Values, actual.Values, func(p1 model.SamplePair, p2 model.SamplePair) error { - err := compareSamplePair(p1, p2, opts) + err := compareSamplePair(p1, p2, queryEvaluationTime, opts) if err != nil { return fmt.Errorf("float sample pair does not match for metric %s: %w", expected.Metric, err) } @@ -255,7 +255,7 @@ func compareMatrixSamples(expected, actual *model.SampleStream, opts SampleCompa } expectedHistogramSamplesTail, actualHistogramSamplesTail, err := comparePairs(expected.Histograms, actual.Histograms, func(p1 model.SampleHistogramPair, p2 model.SampleHistogramPair) error { - err := compareSampleHistogramPair(p1, p2, opts) + err := compareSampleHistogramPair(p1, p2, queryEvaluationTime, opts) if err != nil { return fmt.Errorf("histogram sample pair does not match for metric %s: %w", expected.Metric, err) } @@ -275,11 +275,11 @@ func compareMatrixSamples(expected, actual *model.SampleStream, opts SampleCompa } skipAllRecentFloatSamples := canSkipAllSamples(func(p model.SamplePair) bool { - return time.Since(p.Timestamp.Time())-opts.SkipRecentSamples < 0 + return queryEvaluationTime.Sub(p.Timestamp.Time())-opts.SkipRecentSamples < 0 }, expectedSamplesTail, actualSamplesTail) skipAllRecentHistogramSamples := canSkipAllSamples(func(p model.SampleHistogramPair) bool { - return time.Since(p.Timestamp.Time())-opts.SkipRecentSamples < 0 + return queryEvaluationTime.Sub(p.Timestamp.Time())-opts.SkipRecentSamples < 0 }, expectedHistogramSamplesTail, actualHistogramSamplesTail) if skipAllRecentFloatSamples && skipAllRecentHistogramSamples { @@ -349,20 +349,20 @@ func canSkipAllSamples[S ~[]M, M any](skip func(M) bool, ss ...S) bool { return true } -func allMatrixSamplesWithinRecentSampleWindow(m model.Matrix, opts SampleComparisonOptions) bool { +func allMatrixSamplesWithinRecentSampleWindow(m model.Matrix, queryEvaluationTime time.Time, opts SampleComparisonOptions) bool { if opts.SkipRecentSamples == 0 { return false } for _, series := range m { for _, sample := range series.Values { - if time.Since(sample.Timestamp.Time()) > opts.SkipRecentSamples { + if queryEvaluationTime.Sub(sample.Timestamp.Time()) > opts.SkipRecentSamples { return false } } for _, sample := range series.Histograms { - if time.Since(sample.Timestamp.Time()) > opts.SkipRecentSamples { + if queryEvaluationTime.Sub(sample.Timestamp.Time()) > opts.SkipRecentSamples { return false } } @@ -371,7 +371,7 @@ func allMatrixSamplesWithinRecentSampleWindow(m model.Matrix, opts SampleCompari return true } -func compareVector(expectedRaw, actualRaw json.RawMessage, opts SampleComparisonOptions) error { +func compareVector(expectedRaw, actualRaw json.RawMessage, queryEvaluationTime time.Time, opts SampleComparisonOptions) error { var expected, actual model.Vector err := json.Unmarshal(expectedRaw, &expected) @@ -384,7 +384,7 @@ func compareVector(expectedRaw, actualRaw json.RawMessage, opts SampleComparison return err } - if allVectorSamplesWithinRecentSampleWindow(expected, opts) && allVectorSamplesWithinRecentSampleWindow(actual, opts) { + if allVectorSamplesWithinRecentSampleWindow(expected, queryEvaluationTime, opts) && allVectorSamplesWithinRecentSampleWindow(actual, queryEvaluationTime, opts) { return nil } @@ -406,13 +406,18 @@ func compareVector(expectedRaw, actualRaw json.RawMessage, opts SampleComparison actualMetric := actual[actualMetricIndex] if expectedMetric.Histogram == nil && actualMetric.Histogram == nil { - err := compareSamplePair(model.SamplePair{ - Timestamp: expectedMetric.Timestamp, - Value: expectedMetric.Value, - }, model.SamplePair{ - Timestamp: actualMetric.Timestamp, - Value: actualMetric.Value, - }, opts) + err := compareSamplePair( + model.SamplePair{ + Timestamp: expectedMetric.Timestamp, + Value: expectedMetric.Value, + }, + model.SamplePair{ + Timestamp: actualMetric.Timestamp, + Value: actualMetric.Value, + }, + queryEvaluationTime, + opts, + ) if err != nil { return fmt.Errorf("float sample pair does not match for metric %s: %w", expectedMetric.Metric, err) } @@ -421,13 +426,18 @@ func compareVector(expectedRaw, actualRaw json.RawMessage, opts SampleComparison } else if expectedMetric.Histogram == nil && actualMetric.Histogram != nil { return fmt.Errorf("sample pair does not match for metric %s: expected float value but got histogram", expectedMetric.Metric) } else { // Expected value is a histogram and the actual value is a histogram. - err := compareSampleHistogramPair(model.SampleHistogramPair{ - Timestamp: expectedMetric.Timestamp, - Histogram: expectedMetric.Histogram, - }, model.SampleHistogramPair{ - Timestamp: actualMetric.Timestamp, - Histogram: actualMetric.Histogram, - }, opts) + err := compareSampleHistogramPair( + model.SampleHistogramPair{ + Timestamp: expectedMetric.Timestamp, + Histogram: expectedMetric.Histogram, + }, + model.SampleHistogramPair{ + Timestamp: actualMetric.Timestamp, + Histogram: actualMetric.Histogram, + }, + queryEvaluationTime, + opts, + ) if err != nil { return fmt.Errorf("histogram sample pair does not match for metric %s: %w", expectedMetric.Metric, err) } @@ -437,13 +447,13 @@ func compareVector(expectedRaw, actualRaw json.RawMessage, opts SampleComparison return nil } -func allVectorSamplesWithinRecentSampleWindow(v model.Vector, opts SampleComparisonOptions) bool { +func allVectorSamplesWithinRecentSampleWindow(v model.Vector, queryEvaluationTime time.Time, opts SampleComparisonOptions) bool { if opts.SkipRecentSamples == 0 { return false } for _, sample := range v { - if time.Since(sample.Timestamp.Time()) > opts.SkipRecentSamples { + if queryEvaluationTime.Sub(sample.Timestamp.Time()) > opts.SkipRecentSamples { return false } } @@ -451,7 +461,7 @@ func allVectorSamplesWithinRecentSampleWindow(v model.Vector, opts SampleCompari return true } -func compareScalar(expectedRaw, actualRaw json.RawMessage, opts SampleComparisonOptions) error { +func compareScalar(expectedRaw, actualRaw json.RawMessage, queryEvaluationTime time.Time, opts SampleComparisonOptions) error { var expected, actual model.Scalar err := json.Unmarshal(expectedRaw, &expected) if err != nil { @@ -463,20 +473,25 @@ func compareScalar(expectedRaw, actualRaw json.RawMessage, opts SampleComparison return err } - return compareSamplePair(model.SamplePair{ - Timestamp: expected.Timestamp, - Value: expected.Value, - }, model.SamplePair{ - Timestamp: actual.Timestamp, - Value: actual.Value, - }, opts) + return compareSamplePair( + model.SamplePair{ + Timestamp: expected.Timestamp, + Value: expected.Value, + }, + model.SamplePair{ + Timestamp: actual.Timestamp, + Value: actual.Value, + }, + queryEvaluationTime, + opts, + ) } -func compareSamplePair(expected, actual model.SamplePair, opts SampleComparisonOptions) error { +func compareSamplePair(expected, actual model.SamplePair, queryEvaluationTime time.Time, opts SampleComparisonOptions) error { if expected.Timestamp != actual.Timestamp { return fmt.Errorf("expected timestamp %v but got %v", expected.Timestamp, actual.Timestamp) } - if opts.SkipRecentSamples > 0 && time.Since(expected.Timestamp.Time()) < opts.SkipRecentSamples { + if opts.SkipRecentSamples > 0 && queryEvaluationTime.Sub(expected.Timestamp.Time()) < opts.SkipRecentSamples { return nil } if !compareSampleValue(float64(expected.Value), float64(actual.Value), opts) { @@ -500,12 +515,12 @@ func compareSampleValue(first, second float64, opts SampleComparisonOptions) boo return math.Abs(first-second) <= opts.Tolerance } -func compareSampleHistogramPair(expected, actual model.SampleHistogramPair, opts SampleComparisonOptions) error { +func compareSampleHistogramPair(expected, actual model.SampleHistogramPair, queryEvaluationTime time.Time, opts SampleComparisonOptions) error { if expected.Timestamp != actual.Timestamp { return fmt.Errorf("expected timestamp %v but got %v", expected.Timestamp, actual.Timestamp) } - if opts.SkipRecentSamples > 0 && time.Since(expected.Timestamp.Time()) < opts.SkipRecentSamples { + if opts.SkipRecentSamples > 0 && queryEvaluationTime.Sub(expected.Timestamp.Time()) < opts.SkipRecentSamples { return nil } diff --git a/tools/querytee/response_comparator_test.go b/tools/querytee/response_comparator_test.go index 90419c3b556..4603c087a3b 100644 --- a/tools/querytee/response_comparator_test.go +++ b/tools/querytee/response_comparator_test.go @@ -699,7 +699,7 @@ Count: 2.000000, Sum: 3.000000, Buckets: [[0,2):2] @[1]`, }, } { t.Run(tc.name, func(t *testing.T) { - err := compareMatrix(tc.expected, tc.actual, SampleComparisonOptions{}) + err := compareMatrix(tc.expected, tc.actual, time.Now(), SampleComparisonOptions{}) if tc.err == "" { require.NoError(t, err) return @@ -1078,7 +1078,7 @@ func TestCompareVector(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - err := compareVector(tc.expected, tc.actual, SampleComparisonOptions{}) + err := compareVector(tc.expected, tc.actual, time.Now(), SampleComparisonOptions{}) if tc.err == nil { require.NoError(t, err) return @@ -1115,7 +1115,7 @@ func TestCompareScalar(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - err := compareScalar(tc.expected, tc.actual, SampleComparisonOptions{}) + err := compareScalar(tc.expected, tc.actual, time.Now(), SampleComparisonOptions{}) if tc.err == nil { require.NoError(t, err) return @@ -1127,8 +1127,9 @@ func TestCompareScalar(t *testing.T) { } func TestCompareSamplesResponse(t *testing.T) { - now := model.Now().String() - overAnHourAgo := model.Now().Add(-61 * time.Minute).String() + nowT := model.TimeFromUnixNano(time.Date(2099, 1, 2, 3, 4, 5, 6, time.UTC).UnixNano()) + now := nowT.String() + overAnHourAgo := nowT.Add(-61 * time.Minute).String() for _, tc := range []struct { name string @@ -2172,7 +2173,7 @@ func TestCompareSamplesResponse(t *testing.T) { UseRelativeError: tc.useRelativeError, SkipRecentSamples: tc.skipRecentSamples, }) - result, err := samplesComparator.Compare(tc.expected, tc.actual) + result, err := samplesComparator.Compare(tc.expected, tc.actual, nowT.Time()) if tc.err == nil { require.NoError(t, err) require.Equal(t, ComparisonSuccess, result) @@ -2342,7 +2343,7 @@ func TestCompareSampleHistogramPair(t *testing.T) { field.mutator(expected.Histogram, model.FloatString(testCase.expected)) field.mutator(actual.Histogram, model.FloatString(testCase.actual)) - err := compareSampleHistogramPair(expected, actual, opts) + err := compareSampleHistogramPair(expected, actual, time.Now(), opts) if testCase.shouldFail { expectedError := field.expectedErrorMessageGenerator(&expected, &actual) From 3a78739e7042a0cc84eaccdfe8eeb17cc007578c Mon Sep 17 00:00:00 2001 From: "Grot (@grafanabot)" <43478413+grafanabot@users.noreply.github.com> Date: Fri, 27 Sep 2024 02:03:48 -0600 Subject: [PATCH 05/33] query-tee: add equivalent errors for string expression for range queries (#9366) (#9444) * query-tee: add equivalent errors for string expression for range queries * Add changelog entry (cherry picked from commit 47aae6ac43e09c756b036f35ded76844fbfec735) Co-authored-by: Charles Korn --- CHANGELOG.md | 2 +- tools/querytee/response_comparator.go | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ccbc279f18..6cf6774e95b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -215,7 +215,7 @@ * [ENHANCEMENT] Don't consider responses to be different during response comparison if both backends' responses contain different series, but all samples are within the recent sample window. #8749 #8894 * [ENHANCEMENT] When the expected and actual response for a matrix series is different, the full set of samples for that series from both backends will now be logged. #8947 * [ENHANCEMENT] Wait up to `-server.graceful-shutdown-timeout` for inflight requests to finish when shutting down, rather than immediately terminating inflight requests on shutdown. #8985 -* [ENHANCEMENT] Optionally consider equivalent error messages the same when comparing responses. Enabled by default, disable with `-proxy.require-exact-error-match=true`. #9143 #9350 +* [ENHANCEMENT] Optionally consider equivalent error messages the same when comparing responses. Enabled by default, disable with `-proxy.require-exact-error-match=true`. #9143 #9350 #9366 * [BUGFIX] Ensure any errors encountered while forwarding a request to a backend (eg. DNS resolution failures) are logged. #8419 * [BUGFIX] The comparison of the results should not fail when either side contains extra samples from within SkipRecentSamples duration. #8920 * [BUGFIX] When `-proxy.compare-skip-recent-samples` is enabled, compare sample timestamps with the time the query requests were made, rather than the time at which the comparison is occurring. #9416 diff --git a/tools/querytee/response_comparator.go b/tools/querytee/response_comparator.go index d92026b359c..f992a279fe8 100644 --- a/tools/querytee/response_comparator.go +++ b/tools/querytee/response_comparator.go @@ -120,12 +120,19 @@ func (s *SamplesComparator) Compare(expectedResponse, actualResponse []byte, que var errorEquivalenceClasses = [][]*regexp.Regexp{ { - // Invalid expression type for range query: MQE and Prometheus' engine return different error messages. + // Range vector expression for range query: MQE and Prometheus' engine return different error messages. // Prometheus' engine: regexp.MustCompile(`invalid parameter "query": invalid expression type "range vector" for range query, must be Scalar or instant Vector`), // MQE: regexp.MustCompile(`invalid parameter "query": query expression produces a range vector, but expression for range queries must produce an instant vector or scalar`), }, + { + // String expression for range query: MQE and Prometheus' engine return different error messages. + // Prometheus' engine: + regexp.MustCompile(`invalid parameter "query": invalid expression type "string" for range query, must be Scalar or instant Vector`), + // MQE: + regexp.MustCompile(`invalid parameter "query": query expression produces a string, but expression for range queries must produce an instant vector or scalar`), + }, { // Binary operation conflict on right (one-to-one) / many (one-to-many/many-to-one) side: MQE and Prometheus' engine return different error messages, and there's no guarantee they'll pick the same series as examples. // Even comparing Prometheus' engine to another instance of Prometheus' engine can produce different results: the series selected as examples are not deterministic. From 743504993cef93cac3389963dd2e9b03c5599a40 Mon Sep 17 00:00:00 2001 From: "Grot (@grafanabot)" <43478413+grafanabot@users.noreply.github.com> Date: Fri, 27 Sep 2024 02:13:26 -0600 Subject: [PATCH 06/33] Update rollout-operator to v0.19.1. (#9388) (#9442) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update rollout-operator to v0.19.1. Signed-off-by: Peter Štibraný * CHANGELOG.md, helm Signed-off-by: Peter Štibraný * Update helm. Signed-off-by: Peter Štibraný --------- Signed-off-by: Peter Štibraný (cherry picked from commit f3ad73ac1af2e733e326f7310c6cdef1c5ae10c5) Co-authored-by: Peter Štibraný --- CHANGELOG.md | 1 + operations/helm/charts/mimir-distributed/CHANGELOG.md | 1 + operations/helm/charts/mimir-distributed/Chart.lock | 6 +++--- operations/helm/charts/mimir-distributed/Chart.yaml | 2 +- operations/helm/charts/mimir-distributed/README.md | 2 +- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../charts/rollout_operator/templates/deployment.yaml | 6 +++--- .../charts/rollout_operator/templates/serviceaccount.yaml | 4 ++-- .../mimir-tests/test-automated-downscale-generated.yaml | 2 +- .../mimir-tests/test-automated-downscale-v2-generated.yaml | 2 +- .../test-compactor-concurrent-rollout-generated.yaml | 2 +- ...oncurrent-rollout-max-unavailable-percent-generated.yaml | 2 +- .../mimir-tests/test-consul-multi-zone-generated.yaml | 2 +- .../test-deployment-mode-migration-generated.yaml | 2 +- .../test-ingest-storage-migration-step-0-generated.yaml | 2 +- .../test-ingest-storage-migration-step-1-generated.yaml | 2 +- .../test-ingest-storage-migration-step-10-generated.yaml | 2 +- .../test-ingest-storage-migration-step-11-generated.yaml | 2 +- .../test-ingest-storage-migration-step-2-generated.yaml | 2 +- .../test-ingest-storage-migration-step-3-generated.yaml | 2 +- .../test-ingest-storage-migration-step-4-generated.yaml | 2 +- .../test-ingest-storage-migration-step-5a-generated.yaml | 2 +- .../test-ingest-storage-migration-step-5b-generated.yaml | 2 +- .../test-ingest-storage-migration-step-6-generated.yaml | 2 +- .../test-ingest-storage-migration-step-7-generated.yaml | 2 +- .../test-ingest-storage-migration-step-8-generated.yaml | 2 +- .../test-ingest-storage-migration-step-9-generated.yaml | 2 +- .../test-ingest-storage-migration-step-final-generated.yaml | 2 +- operations/mimir-tests/test-multi-zone-generated.yaml | 2 +- .../test-multi-zone-with-ongoing-migration-generated.yaml | 2 +- ...-with-store-gateway-automated-downscaling-generated.yaml | 2 +- .../mimir-tests/test-pvc-auto-deletion-generated.yaml | 2 +- ...-read-write-deployment-mode-s3-autoscaled-generated.yaml | 2 +- ...-write-deployment-mode-s3-caches-disabled-generated.yaml | 2 +- .../test-read-write-deployment-mode-s3-generated.yaml | 2 +- operations/mimir/images.libsonnet | 2 +- 75 files changed, 140 insertions(+), 138 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6cf6774e95b..649a64e2c7d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -177,6 +177,7 @@ * [ENHANCEMENT] Allow to override Kafka client ID on a per-component basis. #9026 * [ENHANCEMENT] Rollout-operator's access to ReplicaTemplate is now configured via config option `rollout_operator_replica_template_access_enabled`. #9252 * [ENHANCEMENT] Added support for new way of downscaling ingesters, using rollout-operator's resource-mirroring feature and read-only mode of ingesters. This can be enabled by using `ingester_automated_downscale_v2_enabled` config option. This is mutually exclusive with both `ingester_automated_downscale_enabled` (previous downscale mode) and `ingest_storage_ingester_autoscaling_enabled` (autoscaling for ingest-storage). +* [ENHANCEMENT] Update rollout-operator to `v0.19.1`. #9388 * [BUGFIX] Added missing node affinity matchers to write component. #8910 ### Mimirtool diff --git a/operations/helm/charts/mimir-distributed/CHANGELOG.md b/operations/helm/charts/mimir-distributed/CHANGELOG.md index 5d1254d5cfc..74f391be7f7 100644 --- a/operations/helm/charts/mimir-distributed/CHANGELOG.md +++ b/operations/helm/charts/mimir-distributed/CHANGELOG.md @@ -48,6 +48,7 @@ Entries should include a reference to the Pull Request that introduced the chang * [ENHANCEMENT] Add support for running continuous-test with GEM #8837 * [ENHANCEMENT] Alerts: `RequestErrors` and `RulerRemoteEvaluationFailing` have been enriched with a native histogram version. #9004 * [ENHANCEMENT] Ingester: set GOMAXPROCS to help with golang scheduling overhead when running on machines with a lot of cores. #9283 +* [ENHANCEMENT] Update rollout-operator to `v0.19.1` (Helm chart version `v0.18.0`). #9388 * [BUGFIX] Add missing container security context to run `continuous-test` under the restricted security policy. #8653 * [BUGFIX] Add `global.extraVolumeMounts` to the exporter container on memcached statefulsets #8787 * [BUGFIX] Fix helm releases failing when `querier.kedaAutoscaling.predictiveScalingEnabled=true`. #8731 diff --git a/operations/helm/charts/mimir-distributed/Chart.lock b/operations/helm/charts/mimir-distributed/Chart.lock index 3260988e681..04f63105e34 100644 --- a/operations/helm/charts/mimir-distributed/Chart.lock +++ b/operations/helm/charts/mimir-distributed/Chart.lock @@ -7,6 +7,6 @@ dependencies: version: 0.4.2 - name: rollout-operator repository: https://grafana.github.io/helm-charts - version: 0.17.0 -digest: sha256:8ff7d04e35b065734914d3cc8a1234667d4b1a79d7e992e032b4225d5b1beba6 -generated: "2024-09-16T03:04:33.982457274Z" + version: 0.18.0 +digest: sha256:ab34721ce3b0a3ace5cab4ecdb9eeafed4435491230fce407df3c742f1683b23 +generated: "2024-09-24T09:59:54.865425961Z" diff --git a/operations/helm/charts/mimir-distributed/Chart.yaml b/operations/helm/charts/mimir-distributed/Chart.yaml index 40c8d36460a..4ee5e701abd 100644 --- a/operations/helm/charts/mimir-distributed/Chart.yaml +++ b/operations/helm/charts/mimir-distributed/Chart.yaml @@ -20,5 +20,5 @@ dependencies: - name: rollout-operator alias: rollout_operator repository: https://grafana.github.io/helm-charts - version: 0.17.0 + version: 0.18.0 condition: rollout_operator.enabled diff --git a/operations/helm/charts/mimir-distributed/README.md b/operations/helm/charts/mimir-distributed/README.md index 71ebc96ffc9..c64ed8cedb9 100644 --- a/operations/helm/charts/mimir-distributed/README.md +++ b/operations/helm/charts/mimir-distributed/README.md @@ -26,7 +26,7 @@ Kubernetes: `^1.20.0-0` |------------|------|---------| | https://charts.min.io/ | minio(minio) | 5.2.0 | | https://grafana.github.io/helm-charts | grafana-agent-operator(grafana-agent-operator) | 0.4.2 | -| https://grafana.github.io/helm-charts | rollout_operator(rollout-operator) | 0.17.0 | +| https://grafana.github.io/helm-charts | rollout_operator(rollout-operator) | 0.18.0 | # Contributing and releasing diff --git a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index e01d1997858..94baefcca68 100644 --- a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: enterprise-https-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: enterprise-https-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index bd33219c037..edc96bf977f 100644 --- a/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/enterprise-https-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: enterprise-https-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: enterprise-https-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/gateway-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/gateway-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index fc137c1b0df..c86291ea551 100644 --- a/operations/helm/tests/gateway-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/gateway-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: gateway-enterprise-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: gateway-enterprise-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/gateway-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/gateway-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 17770ab84a5..2a6fed195be 100644 --- a/operations/helm/tests/gateway-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/gateway-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: gateway-enterprise-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: gateway-enterprise-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/gateway-nginx-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/gateway-nginx-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 1c0fad41467..4ac95298ffc 100644 --- a/operations/helm/tests/gateway-nginx-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/gateway-nginx-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: gateway-nginx-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: gateway-nginx-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/gateway-nginx-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/gateway-nginx-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 4b6d4bf0c9f..8e059db2f64 100644 --- a/operations/helm/tests/gateway-nginx-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/gateway-nginx-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: gateway-nginx-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: gateway-nginx-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 84c2c246a31..863f5c646b1 100644 --- a/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: keda-autoscaling-global-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: keda-autoscaling-global-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 43b382b6266..351675a0a73 100644 --- a/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/keda-autoscaling-global-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: keda-autoscaling-global-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: keda-autoscaling-global-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 1b8c43caaf7..447e69f1571 100644 --- a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: keda-autoscaling-metamonitoring-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 0c5944d04f9..2d49814a0ad 100644 --- a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: keda-autoscaling-metamonitoring-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 83e92f81643..34f159be03b 100644 --- a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: keda-autoscaling-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: keda-autoscaling-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 574c3f86ae1..acca7830f5c 100644 --- a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: keda-autoscaling-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: keda-autoscaling-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/large-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/large-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 8ad7436c0ef..db276589684 100644 --- a/operations/helm/tests/large-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/large-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: large-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: large-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/large-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/large-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 5a8fdf8e8dd..6f1a6389102 100644 --- a/operations/helm/tests/large-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/large-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: large-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: large-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/openshift-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/openshift-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 65fa781b30f..b61dfa800b8 100644 --- a/operations/helm/tests/openshift-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/openshift-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: openshift-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: openshift-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -40,7 +40,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/openshift-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/openshift-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 5f372ec7871..53262add652 100644 --- a/operations/helm/tests/openshift-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/openshift-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: openshift-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: openshift-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/scheduler-name-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/scheduler-name-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index c3c9410003d..f8bbb848ffc 100644 --- a/operations/helm/tests/scheduler-name-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/scheduler-name-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: scheduler-name-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: scheduler-name-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/scheduler-name-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/scheduler-name-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 62be5fd7246..e064379cb37 100644 --- a/operations/helm/tests/scheduler-name-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/scheduler-name-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: scheduler-name-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: scheduler-name-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/small-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/small-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 6e031411cdd..d76fed12c49 100644 --- a/operations/helm/tests/small-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/small-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: small-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: small-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/small-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/small-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 4c36a49502b..6b738a87862 100644 --- a/operations/helm/tests/small-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/small-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: small-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: small-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/test-enterprise-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/test-enterprise-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 48e70ebc7e4..f8f7719e1e8 100644 --- a/operations/helm/tests/test-enterprise-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/test-enterprise-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: test-enterprise-k8s-1.25-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-enterprise-k8s-1.25-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/test-enterprise-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/test-enterprise-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 19fbe963096..948a566593a 100644 --- a/operations/helm/tests/test-enterprise-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-enterprise-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: test-enterprise-k8s-1.25-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-enterprise-k8s-1.25-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/test-enterprise-legacy-label-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/test-enterprise-legacy-label-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 75cb9e88bc0..63454be41d9 100644 --- a/operations/helm/tests/test-enterprise-legacy-label-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/test-enterprise-legacy-label-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: test-enterprise-legacy-label-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-enterprise-legacy-label-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/test-enterprise-legacy-label-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/test-enterprise-legacy-label-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index e97ac23fd73..891955a7b40 100644 --- a/operations/helm/tests/test-enterprise-legacy-label-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-enterprise-legacy-label-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: test-enterprise-legacy-label-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-enterprise-legacy-label-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 4e3d3d38b3e..6c740d6f75e 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: test-enterprise-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-enterprise-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 3e729aa5975..b988a351029 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: test-enterprise-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-enterprise-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/test-ingress-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/test-ingress-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 89c994e9315..2183798f8df 100644 --- a/operations/helm/tests/test-ingress-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/test-ingress-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: test-ingress-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-ingress-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/test-ingress-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/test-ingress-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index b575e317cf4..c875252c05c 100644 --- a/operations/helm/tests/test-ingress-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-ingress-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: test-ingress-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-ingress-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 9cc909090d4..13ebe5b648f 100644 --- a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: test-oss-k8s-1.25-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-oss-k8s-1.25-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index f60a0648b5a..508c78d2c4f 100644 --- a/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-oss-k8s-1.25-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: test-oss-k8s-1.25-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-oss-k8s-1.25-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/test-oss-logical-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/test-oss-logical-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 88432afc789..4a1a22a6365 100644 --- a/operations/helm/tests/test-oss-logical-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/test-oss-logical-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: test-oss-logical-multizone-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-oss-logical-multizone-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/test-oss-logical-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/test-oss-logical-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index b6383b69da4..e93158338e6 100644 --- a/operations/helm/tests/test-oss-logical-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-oss-logical-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: test-oss-logical-multizone-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-oss-logical-multizone-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/test-oss-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/test-oss-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index ed18396139d..80c3d3fc706 100644 --- a/operations/helm/tests/test-oss-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/test-oss-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: test-oss-multizone-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-oss-multizone-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/test-oss-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/test-oss-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 93efb36cf5e..a6b70fef835 100644 --- a/operations/helm/tests/test-oss-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-oss-multizone-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: test-oss-multizone-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-oss-multizone-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/test-oss-topology-spread-constraints-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/test-oss-topology-spread-constraints-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 2ac5bba09aa..cb48dd46a5a 100644 --- a/operations/helm/tests/test-oss-topology-spread-constraints-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/test-oss-topology-spread-constraints-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: test-oss-topology-spread-constraints-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-oss-topology-spread-constraints-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/test-oss-topology-spread-constraints-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/test-oss-topology-spread-constraints-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 4d83d168ece..bef113f9699 100644 --- a/operations/helm/tests/test-oss-topology-spread-constraints-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-oss-topology-spread-constraints-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: test-oss-topology-spread-constraints-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-oss-topology-spread-constraints-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/test-requests-and-limits-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/test-requests-and-limits-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index d09a508763a..a5b30f7ba27 100644 --- a/operations/helm/tests/test-requests-and-limits-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/test-requests-and-limits-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: test-requests-and-limits-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-requests-and-limits-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/test-requests-and-limits-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/test-requests-and-limits-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index 9251e151dae..662cc6730f0 100644 --- a/operations/helm/tests/test-requests-and-limits-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-requests-and-limits-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: test-requests-and-limits-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-requests-and-limits-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/test-ruler-dedicated-query-path-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/test-ruler-dedicated-query-path-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index df3c706861a..b16d32798a9 100644 --- a/operations/helm/tests/test-ruler-dedicated-query-path-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/test-ruler-dedicated-query-path-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: test-ruler-dedicated-query-path-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-ruler-dedicated-query-path-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/test-ruler-dedicated-query-path-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/test-ruler-dedicated-query-path-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index d4618c508a8..6b18667b79b 100644 --- a/operations/helm/tests/test-ruler-dedicated-query-path-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-ruler-dedicated-query-path-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: test-ruler-dedicated-query-path-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-ruler-dedicated-query-path-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/test-vault-agent-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/test-vault-agent-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml index 1862c9387f1..3172a961057 100644 --- a/operations/helm/tests/test-vault-agent-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml +++ b/operations/helm/tests/test-vault-agent-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -5,10 +5,10 @@ kind: Deployment metadata: name: test-vault-agent-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-vault-agent-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -43,7 +43,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true - image: "grafana/rollout-operator:v0.18.0" + image: "grafana/rollout-operator:v0.19.1" imagePullPolicy: IfNotPresent args: - -kubernetes.namespace=citestns diff --git a/operations/helm/tests/test-vault-agent-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/test-vault-agent-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml index bffbf96ec60..f073902a145 100644 --- a/operations/helm/tests/test-vault-agent-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-vault-agent-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -5,8 +5,8 @@ kind: ServiceAccount metadata: name: test-vault-agent-values-rollout-operator labels: - helm.sh/chart: rollout-operator-0.17.0 + helm.sh/chart: rollout-operator-0.18.0 app.kubernetes.io/name: rollout-operator app.kubernetes.io/instance: test-vault-agent-values - app.kubernetes.io/version: "v0.18.0" + app.kubernetes.io/version: "v0.19.1" app.kubernetes.io/managed-by: Helm diff --git a/operations/mimir-tests/test-automated-downscale-generated.yaml b/operations/mimir-tests/test-automated-downscale-generated.yaml index f6f7f23911b..867119fe176 100644 --- a/operations/mimir-tests/test-automated-downscale-generated.yaml +++ b/operations/mimir-tests/test-automated-downscale-generated.yaml @@ -1016,7 +1016,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-automated-downscale-v2-generated.yaml b/operations/mimir-tests/test-automated-downscale-v2-generated.yaml index 2ac5bfdced0..8a753e43a71 100644 --- a/operations/mimir-tests/test-automated-downscale-v2-generated.yaml +++ b/operations/mimir-tests/test-automated-downscale-v2-generated.yaml @@ -1075,7 +1075,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-compactor-concurrent-rollout-generated.yaml b/operations/mimir-tests/test-compactor-concurrent-rollout-generated.yaml index 67a2832155e..5641a0ef763 100644 --- a/operations/mimir-tests/test-compactor-concurrent-rollout-generated.yaml +++ b/operations/mimir-tests/test-compactor-concurrent-rollout-generated.yaml @@ -836,7 +836,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-compactor-concurrent-rollout-max-unavailable-percent-generated.yaml b/operations/mimir-tests/test-compactor-concurrent-rollout-max-unavailable-percent-generated.yaml index 558d2a7beaf..6ac058b320b 100644 --- a/operations/mimir-tests/test-compactor-concurrent-rollout-max-unavailable-percent-generated.yaml +++ b/operations/mimir-tests/test-compactor-concurrent-rollout-max-unavailable-percent-generated.yaml @@ -836,7 +836,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-consul-multi-zone-generated.yaml b/operations/mimir-tests/test-consul-multi-zone-generated.yaml index 3467c46860b..f3deaa54587 100644 --- a/operations/mimir-tests/test-consul-multi-zone-generated.yaml +++ b/operations/mimir-tests/test-consul-multi-zone-generated.yaml @@ -1387,7 +1387,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-deployment-mode-migration-generated.yaml b/operations/mimir-tests/test-deployment-mode-migration-generated.yaml index 5a7450f20c9..b087ef91f4a 100644 --- a/operations/mimir-tests/test-deployment-mode-migration-generated.yaml +++ b/operations/mimir-tests/test-deployment-mode-migration-generated.yaml @@ -1417,7 +1417,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-0-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-0-generated.yaml index 8fdf1862b0c..b86c1fd5032 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-0-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-0-generated.yaml @@ -1138,7 +1138,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-1-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-1-generated.yaml index 25acba7d4e7..705760da7a2 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-1-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-1-generated.yaml @@ -1209,7 +1209,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-10-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-10-generated.yaml index c5a8a6c1502..5493a610ba9 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-10-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-10-generated.yaml @@ -1197,7 +1197,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-11-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-11-generated.yaml index 6a2281b1887..04b7c5145cf 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-11-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-11-generated.yaml @@ -1197,7 +1197,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-2-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-2-generated.yaml index d1c28493ffc..f1dd8905fe4 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-2-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-2-generated.yaml @@ -1216,7 +1216,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-3-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-3-generated.yaml index 9c6afb1e5f0..8c66ea46ecc 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-3-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-3-generated.yaml @@ -1227,7 +1227,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-4-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-4-generated.yaml index 8869af4400b..fa331a58566 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-4-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-4-generated.yaml @@ -1226,7 +1226,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-5a-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-5a-generated.yaml index 77309e5afec..9ff16c6f497 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-5a-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-5a-generated.yaml @@ -1226,7 +1226,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-5b-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-5b-generated.yaml index 6b4beadec05..71a1767b8ee 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-5b-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-5b-generated.yaml @@ -1226,7 +1226,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-6-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-6-generated.yaml index 889534d8b1e..533e044db21 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-6-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-6-generated.yaml @@ -1157,7 +1157,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-7-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-7-generated.yaml index 3a8a2d4f3d5..a56bf98d767 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-7-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-7-generated.yaml @@ -1161,7 +1161,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-8-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-8-generated.yaml index fd2cd8ce59a..3994bf7bb43 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-8-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-8-generated.yaml @@ -1161,7 +1161,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-9-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-9-generated.yaml index 8da0170c033..8028222f189 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-9-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-9-generated.yaml @@ -1138,7 +1138,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-final-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-final-generated.yaml index c134826535f..c65472b3c02 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-final-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-final-generated.yaml @@ -1220,7 +1220,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-multi-zone-generated.yaml b/operations/mimir-tests/test-multi-zone-generated.yaml index 0413a38933a..40cba8a70a7 100644 --- a/operations/mimir-tests/test-multi-zone-generated.yaml +++ b/operations/mimir-tests/test-multi-zone-generated.yaml @@ -1016,7 +1016,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml b/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml index b22c0cfc694..a68c3ae1259 100644 --- a/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml +++ b/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml @@ -1084,7 +1084,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-multi-zone-with-store-gateway-automated-downscaling-generated.yaml b/operations/mimir-tests/test-multi-zone-with-store-gateway-automated-downscaling-generated.yaml index 1f7d03c4d02..e621e14d280 100644 --- a/operations/mimir-tests/test-multi-zone-with-store-gateway-automated-downscaling-generated.yaml +++ b/operations/mimir-tests/test-multi-zone-with-store-gateway-automated-downscaling-generated.yaml @@ -1094,7 +1094,7 @@ spec: - args: - --server-tls.enabled=true - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-pvc-auto-deletion-generated.yaml b/operations/mimir-tests/test-pvc-auto-deletion-generated.yaml index d8a051a4057..a140f94d359 100644 --- a/operations/mimir-tests/test-pvc-auto-deletion-generated.yaml +++ b/operations/mimir-tests/test-pvc-auto-deletion-generated.yaml @@ -950,7 +950,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-read-write-deployment-mode-s3-autoscaled-generated.yaml b/operations/mimir-tests/test-read-write-deployment-mode-s3-autoscaled-generated.yaml index f36d6b77929..271247c09ff 100644 --- a/operations/mimir-tests/test-read-write-deployment-mode-s3-autoscaled-generated.yaml +++ b/operations/mimir-tests/test-read-write-deployment-mode-s3-autoscaled-generated.yaml @@ -619,7 +619,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-read-write-deployment-mode-s3-caches-disabled-generated.yaml b/operations/mimir-tests/test-read-write-deployment-mode-s3-caches-disabled-generated.yaml index fa8b41412ed..eb4eb06afa3 100644 --- a/operations/mimir-tests/test-read-write-deployment-mode-s3-caches-disabled-generated.yaml +++ b/operations/mimir-tests/test-read-write-deployment-mode-s3-caches-disabled-generated.yaml @@ -482,7 +482,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir-tests/test-read-write-deployment-mode-s3-generated.yaml b/operations/mimir-tests/test-read-write-deployment-mode-s3-generated.yaml index 5bc102d3e8e..d112c825120 100644 --- a/operations/mimir-tests/test-read-write-deployment-mode-s3-generated.yaml +++ b/operations/mimir-tests/test-read-write-deployment-mode-s3-generated.yaml @@ -620,7 +620,7 @@ spec: containers: - args: - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.17.0 + image: grafana/rollout-operator:v0.19.1 imagePullPolicy: IfNotPresent name: rollout-operator ports: diff --git a/operations/mimir/images.libsonnet b/operations/mimir/images.libsonnet index 57925e1ae81..0efda6305c0 100644 --- a/operations/mimir/images.libsonnet +++ b/operations/mimir/images.libsonnet @@ -28,6 +28,6 @@ mimir_backend: self.mimir, // See: https://github.com/grafana/rollout-operator - rollout_operator: 'grafana/rollout-operator:v0.17.0', + rollout_operator: 'grafana/rollout-operator:v0.19.1', }, } From 3e2d1e05099eddfc4a59af0e83cd1a347803ad03 Mon Sep 17 00:00:00 2001 From: Vladimir Varankin Date: Fri, 27 Sep 2024 11:54:26 +0200 Subject: [PATCH 07/33] typo in the changelog title (#9446) Signed-off-by: Vladimir Varankin --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 649a64e2c7d..a3ab2b3e6ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Changelog -## v2.14.0-rc.0 +## 2.14.0-rc.0 ### Grafana Mimir From 4c15f8b4eb864852ea35186d0bc431fe7fcc3caf Mon Sep 17 00:00:00 2001 From: "Grot (@grafanabot)" <43478413+grafanabot@users.noreply.github.com> Date: Mon, 30 Sep 2024 04:33:09 -0600 Subject: [PATCH 08/33] 2.14 release notes (#9406) (#9479) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * draft 2.14 release notes Signed-off-by: Vladimir Varankin * Apply suggestions from code review Co-authored-by: Taylor C <41653732+tacole02@users.noreply.github.com> * update release-notes Signed-off-by: Vladimir Varankin * note on deprecating redis Signed-off-by: Vladimir Varankin * Apply suggestions from code review Co-authored-by: Peter Štibraný --------- Signed-off-by: Vladimir Varankin Co-authored-by: Taylor C <41653732+tacole02@users.noreply.github.com> Co-authored-by: Peter Štibraný (cherry picked from commit b1cce377b8e682886e8952fe6228eed0f20151b1) Co-authored-by: Vladimir Varankin --- docs/sources/mimir/release-notes/v2.14.md | 113 ++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 docs/sources/mimir/release-notes/v2.14.md diff --git a/docs/sources/mimir/release-notes/v2.14.md b/docs/sources/mimir/release-notes/v2.14.md new file mode 100644 index 00000000000..9e9d34dee1f --- /dev/null +++ b/docs/sources/mimir/release-notes/v2.14.md @@ -0,0 +1,113 @@ +--- +title: "Grafana Mimir version 2.14 release notes" +menuTitle: "V2.14 release notes" +description: "Release notes for Grafana Mimir version 2.14" +weight: 1100 +--- + +# Grafana Mimir version 2.14 release notes + + + + + +Grafana Labs is excited to announce version 2.14 of Grafana Mimir. + +The highlights that follow include the top features, enhancements, and bug fixes in this release. +For the complete list of changes, refer to the [CHANGELOG](https://github.com/grafana/mimir/blob/main/CHANGELOG.md). + +## Features and enhancements + +The streaming of chunks from store-gateways to queriers is now enabled by default. +This reduces the memory usage in queriers. +This was an experimental feature since Mimir 2.10, and is now considered stable. + +Compactor adds a new `cortex_compactor_disk_out_of_space_errors_total` counter metric that tracks how many times a compaction fails +due to the compactor being out of disk. + +The distributor now replies with the `Retry-After` header on retryable errors by default. +This protects Mimir from clients, including Prometheus, that default to retrying very quickly, making recovering from an outage easier. +The feature was originally added as experimental in Mimir 2.11. + +Incoming OTLP requests were previously size-limited with the distributor's `-distributor.max-recv-msg-size` configuration. +The distributor has a new `-distributor.max-otlp-request-size` configuration for limiting OTLP requests. The default value is 100 MiB. + +Ingesters can be marked as read-only as part of their downscaling procedure. The new `prepare-instance-ring-downscale` endpoint updates the read-only +status of an ingester in the ring. + +## Important changes + +In Grafana Mimir 2.14, the following behavior has changed: + +When running a remote read request, the querier honors the time range specified in the read hints. + +The default inactivity timeout of active series in ingesters, controlled by the `-ingester.active-series-metrics-idle-timeout` configuration, +is increased from `10m` to `20m`. + +The following featues of store-gateway are changed: `-blocks-storage.bucket-store.max-concurrent-queue-timeout` is set to five seconds; +`-blocks-storage.bucket-store.index-header.lazy-loading-concurrency-queue-timeout` is set to five seconds; +`-blocks-storage.bucket-store.max-concurrent` is set to 200; + +The experimental support for Redis caching is now deprecated and set to be removed in the next major release. Users are encouraged +to switch to use Memcached. + +The following deprecated configuration options were removed in this release: + +- The `-ingester.return-only-grpc-errors` option in the ingester +- The `-ingester.client.circuit-breaker.*` options in the ingester +- The `-ingester.limit-inflight-requests-using-grpc-method-limiter` option in the ingester +- The `-ingester.client.report-grpc-codes-in-instrumentation-label-enabled` option in the distributor and ruler +- The `-distributor.limit-inflight-requests-using-grpc-method-limiter` option in the distributor +- The `-distributor.enable-otlp-metadata-storage` option in the distributor +- The `-ruler.drain-notification-queue-on-shutdown` option in the ruler +- The `-querier.max-query-into-future` option in the querier +- The `-querier.prefer-streaming-chunks-from-store-gateways` option in the querier and the store-gateway +- The `-query-scheduler.use-multi-algorithm-query-queue` option in the querier-scheduler +- The YAML configuration `frontend.align_queries_with_step` in the query-frontend + +## Experimental features + +Grafana Mimir 2.14 includes some features that are experimental and disabled by default. +Use these features with caution and report any issues that you encounter: + +The ingester added an experimental `-ingester.ignore-ooo-exemplars` configuration. When set, out-of-order exemplars are no longer reported +to the remote write client. + +The querier supports the experimental `limitk()` and `limit_ratio()` PromQL functions. This feature is disabled by default, +but you can enable it with the `-querier.promql-experimental-functions-enabled=true` setting in the query-frontend and the querier. + +## Bug fixes + +- Alertmanager: fix configuration validation gap around unreferenced templates. +- Alertmanager: fix goroutine leak when stored configuration fails to apply and there is no existing tenant alertmanager. +- Alertmanager: fix receiver firewall to detect `0.0.0.0` and IPv6 interface-local multicast address as local addresses. +- Alertmanager: fix per-tenant silence limits not reloaded during runtime. +- Alertmanager: fix bugs in silences that could cause an existing silence to expire/be deleted when updating the silence fails. This could happen when the updated silence was invalid or exceeded limits. +- Alertmanager: fix help message for utf-8-strict-mode. +- Compactor: fix a race condition between different compactor replicas that may cause a deleted block to be referenced as non-deleted in the bucket index. +- Configuration: multi-line environment variables are flattened during injection to be compatible with YAML syntax. +- HA Tracker: store correct timestamp for the last-received request from the elected replica. +- Ingester: fix the sporadic `not found` error causing an internal server error if label names are queried with matchers during head compaction. +- Ingester, store-gateway: fix case insensitive regular expressions not correctly matching some Unicode characters. +- Ingester: fixed timestamp reported in the "the sample has been rejected because its timestamp is too old" error when the write request contains only histograms. +- Query-frontend: fix `-querier.max-query-lookback` and `-compactor.blocks-retention-period` enforcement in query-frontend when one of the two is not set. +- Query-frontend: "query stats" log includes the actual `status_code` when the request fails due to an error occurring in the query-frontend itself. +- Query-frontend: ensure that internal errors result in an HTTP 500 response code instead of a 422 response code. +- Query-frontend: return annotations generated during evaluation of sharded queries. +- Query-scheduler: fix a panic in request queueing. +- Querier: fix the issue where "context canceled" is logged for trace spans for requests to store-gateways that return no series when chunks streaming is enabled. +- Querier: fix issue where queries can return incorrect results if a single store-gateway returns overlapping chunks for a series. +- Querier: do not return `grpc: the client connection is closing` errors as HTTP `499`. +- Querier: fix issue where some native histogram-related warnings were not emitted when `rate()` was used over native histograms. +- Querier: fix invalid query results when multiple chunks are merged. +- Querier: support optional start and end times on `/prometheus/api/v1/labels`, `/prometheus/api/v1/label/