diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 3d56ff2b227..89b2f4d0b6f 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -11,6 +11,7 @@ updates: go.opentelemetry.io: patterns: - "go.opentelemetry.io/*" + open-pull-requests-limit: 20 - package-ecosystem: "gomod" directory: "/documentation/examples/remote_storage" schedule: @@ -19,6 +20,7 @@ updates: directory: "/web/ui" schedule: interval: "monthly" + open-pull-requests-limit: 20 - package-ecosystem: "github-actions" directory: "/" schedule: diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index 806a706e146..acf91ea12ab 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -12,8 +12,8 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 0fbd01f533f..f52d20785fd 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -12,8 +12,8 @@ jobs: runs-on: ubuntu-latest if: github.repository_owner == 'prometheus' steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 714faf167db..978218dba2d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ jobs: # should also be updated. image: quay.io/prometheus/golang-builder:1.22-base steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/setup_environment - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 @@ -27,7 +27,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.22-base steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/setup_environment - run: go test --tags=dedupelabels ./... @@ -43,7 +43,7 @@ jobs: # The go version in this image should be N-1 wrt test_go. image: quay.io/prometheus/golang-builder:1.21-base steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - run: make build # Don't run NPM build; don't run race-detector. - run: make test GO_ONLY=1 test-flags="" @@ -57,7 +57,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.22-base steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/setup_environment with: @@ -74,8 +74,8 @@ jobs: name: Go tests on Windows runs-on: windows-latest steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version: 1.22.x - run: | @@ -91,7 +91,7 @@ jobs: container: image: quay.io/prometheus/golang-builder:1.22-base steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - run: go install ./cmd/promtool/. - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest @@ -114,7 +114,7 @@ jobs: matrix: thread: [ 0, 1, 2 ] steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/build with: @@ -137,32 +137,44 @@ jobs: # Whenever the Go version is updated here, .promu.yml # should also be updated. steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/build with: parallelism: 12 thread: ${{ matrix.thread }} + check_generated_parser: + name: Check generated parser + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - name: Install Go + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + with: + cache: false + go-version: 1.22.x + - name: Run goyacc and check for diff + run: make install-goyacc check-generated-parser golangci: name: golangci-lint runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Install Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: - cache: false go-version: 1.22.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0 + uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 with: args: --verbose # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. - version: v1.56.2 + version: v1.59.0 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' @@ -175,7 +187,7 @@ jobs: needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/publish_main with: @@ -189,7 +201,7 @@ jobs: needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: ./.github/promci/actions/publish_release with: @@ -204,7 +216,7 @@ jobs: needs: [test_ui, codeql] steps: - name: Checkout - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - name: Install nodejs uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 836fb256828..d85e9f8409c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -24,15 +24,15 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Initialize CodeQL - uses: github/codeql-action/init@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + uses: github/codeql-action/init@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + uses: github/codeql-action/autobuild@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12 + uses: github/codeql-action/analyze@f079b8493333aace61c81488f8bd40919487bd9f # v3.25.7 diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml index d0368eaa1cc..8ddbc34aebb 100644 --- a/.github/workflows/container_description.yml +++ b/.github/workflows/container_description.yml @@ -4,6 +4,7 @@ on: push: paths: - "README.md" + - "README-containers.md" - ".github/workflows/container_description.yml" branches: [ main, master ] @@ -17,7 +18,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Set docker hub repo name run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV - name: Push README to Dockerhub @@ -29,7 +30,9 @@ jobs: destination_container_repo: ${{ env.DOCKER_REPO_NAME }} provider: dockerhub short_description: ${{ env.DOCKER_REPO_NAME }} - readme_file: 'README.md' + # Empty string results in README-containers.md being pushed if it + # exists. Otherwise, README.md is pushed. + readme_file: '' PushQuayIoReadme: runs-on: ubuntu-latest @@ -37,7 +40,7 @@ jobs: if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Set quay.io org name run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV - name: Set quay.io repo name @@ -49,4 +52,6 @@ jobs: with: destination_container_repo: ${{ env.DOCKER_REPO_NAME }} provider: quay - readme_file: 'README.md' + # Empty string results in README-containers.md being pushed if it + # exists. Otherwise, README.md is pushed. + readme_file: '' diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index 4c19563eb43..dc510e5966c 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -21,7 +21,7 @@ jobs: fuzz-seconds: 600 dry-run: false - name: Upload Crash - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml index 3458d7b1190..537e9abd84d 100644 --- a/.github/workflows/repo_sync.yml +++ b/.github/workflows/repo_sync.yml @@ -13,7 +13,7 @@ jobs: container: image: quay.io/prometheus/golang-builder steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - run: ./scripts/sync_repo_files.sh env: GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 51ff643ab20..bb799ddd94d 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -21,12 +21,12 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # tag=v4.1.2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 with: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # tag=v2.3.1 + uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # tag=v2.3.3 with: results_file: results.sarif results_format: sarif @@ -37,7 +37,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # tag=v4.3.1 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3 with: name: SARIF file path: results.sarif @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # tag=v3.22.12 + uses: github/codeql-action/upload-sarif@f079b8493333aace61c81488f8bd40919487bd9f # tag=v3.25.7 with: sarif_file: results.sarif diff --git a/.gitpod.Dockerfile b/.gitpod.Dockerfile index d645db5de15..2370ec5f5c2 100644 --- a/.gitpod.Dockerfile +++ b/.gitpod.Dockerfile @@ -1,15 +1,33 @@ FROM gitpod/workspace-full +# Set Node.js version as an environment variable. ENV CUSTOM_NODE_VERSION=16 -ENV CUSTOM_GO_VERSION=1.19 -ENV GOPATH=$HOME/go-packages -ENV GOROOT=$HOME/go -ENV PATH=$GOROOT/bin:$GOPATH/bin:$PATH +# Install and use the specified Node.js version via nvm. RUN bash -c ". .nvm/nvm.sh && nvm install ${CUSTOM_NODE_VERSION} && nvm use ${CUSTOM_NODE_VERSION} && nvm alias default ${CUSTOM_NODE_VERSION}" +# Ensure nvm uses the default Node.js version in all new shells. RUN echo "nvm use default &>/dev/null" >> ~/.bashrc.d/51-nvm-fix -RUN curl -fsSL https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz | tar xzs \ - && printf '%s\n' 'export GOPATH=/workspace/go' \ - 'export PATH=$GOPATH/bin:$PATH' > $HOME/.bashrc.d/300-go +# Remove any existing Go installation in $HOME path. +RUN rm -rf $HOME/go $HOME/go-packages + +# Export go environment variables. +RUN echo "export GOPATH=/workspace/go" >> ~/.bashrc.d/300-go && \ + echo "export GOBIN=\$GOPATH/bin" >> ~/.bashrc.d/300-go && \ + echo "export GOROOT=${HOME}/go" >> ~/.bashrc.d/300-go && \ + echo "export PATH=\$GOROOT/bin:\$GOBIN:\$PATH" >> ~/.bashrc + +# Reload the environment variables to ensure go environment variables are +# available in subsequent commands. +RUN bash -c "source ~/.bashrc && source ~/.bashrc.d/300-go" + +# Fetch the Go version dynamically from the Prometheus go.mod file and Install Go in $HOME path. +RUN export CUSTOM_GO_VERSION=$(curl -sSL "https://raw.githubusercontent.com/prometheus/prometheus/main/go.mod" | awk '/^go/{print $2".0"}') && \ + curl -fsSL "https://dl.google.com/go/go${CUSTOM_GO_VERSION}.linux-amd64.tar.gz" | \ + tar -xz -C $HOME + +# Fetch the goyacc parser version dynamically from the Prometheus Makefile +# and install it globally in $GOBIN path. +RUN GOYACC_VERSION=$(curl -fsSL "https://raw.githubusercontent.com/prometheus/prometheus/main/Makefile" | awk -F'=' '/GOYACC_VERSION \?=/{gsub(/ /, "", $2); print $2}') && \ + go install "golang.org/x/tools/cmd/goyacc@${GOYACC_VERSION}" diff --git a/.golangci.yml b/.golangci.yml index a85a76cdf11..f81b29ed2d9 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -21,6 +21,7 @@ linters: - goimports - misspell - nolintlint + - perfsprint - predeclared - revive - testifylint @@ -44,7 +45,9 @@ issues: - linters: - godot source: "^// ===" - + - linters: + - perfsprint + text: "fmt.Sprintf can be replaced with string concatenation" linters-settings: depguard: rules: @@ -85,6 +88,9 @@ linters-settings: local-prefixes: github.com/prometheus/prometheus gofumpt: extra-rules: true + perfsprint: + # Optimizes `fmt.Errorf`. + errorf: false revive: # By default, revive will enable only the linting rules that are named in the configuration file. # So, it's needed to explicitly set in configuration all required rules. diff --git a/CHANGELOG.md b/CHANGELOG.md index 97554138a1c..d5a91e90099 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,36 @@ ## unreleased +## 2.53.1 / 2024-07-10 + +Fix a bug which would drop samples in remote-write if the sending flow stalled +for longer than it takes to write one "WAL segment". How long this takes depends on the size +of your Prometheus; as a rough guide with 10 million series it is about 2-3 minutes. + +* [BUGFIX] Remote-write: stop dropping samples in catch-up #14446 + +## 2.53.0 / 2024-06-16 + +This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75. + +* [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 #14048 +* [CHANGE] Runtime: Change GOGC threshold from 100 to 75 #14176 #14285 +* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 #14216 #14273 +* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` metric to measure the time it takes to restore a rule group. #13974 +* [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991 +* [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620 +* [BUGFIX] OTLP: Don't generate target_info unless there are metrics and at least one identifying label is defined. #13991 +* [BUGFIX] Scrape: Do no try to ingest native histograms when the native histograms feature is turned off. This happened when protobuf scrape was enabled by for example the created time feature. #13987 +* [BUGFIX] Scaleway SD: Use the instance's public IP if no private IP is available as the `__address__` meta label. #13941 +* [BUGFIX] Query logger: Do not leak file descriptors on error. #13948 +* [BUGFIX] TSDB: Let queries with heavy regex matches be cancelled and not use up the CPU. #14096 #14103 #14118 #14199 +* [BUGFIX] API: Do not warn if result count is equal to the limit, only when exceeding the limit for the series, label-names and label-values APIs. #14116 +* [BUGFIX] TSDB: Fix head stats and hooks when replaying a corrupted snapshot. #14079 + +## 2.52.1 / 2024-05-29 + +* [BUGFIX] Linode SD: Fix partial fetch when discovery would return more than 500 elements. #14141 + ## 2.52.0 / 2024-05-07 * [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7687826ba45..9b1b286ccf4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -42,7 +42,12 @@ go build ./cmd/prometheus/ make test # Make sure all the tests pass before you commit and push :) ``` -We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. +To run a collection of Go linters through [`golangci-lint`](https://github.com/golangci/golangci-lint), do: +```bash +make lint +``` + +If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. See [this section of the golangci-lint documentation](https://golangci-lint.run/usage/false-positives/#nolint-directive) for more information. All our issues are regularly tagged so that you can also filter down the issues involving the components you want to work on. For our labeling policy refer [the wiki page](https://github.com/prometheus/prometheus/wiki/Label-Names-and-Descriptions). diff --git a/Makefile b/Makefile index 61e8f4377cd..5dcebfd1af9 100644 --- a/Makefile +++ b/Makefile @@ -24,6 +24,7 @@ TSDB_BENCHMARK_DATASET ?= ./tsdb/testdata/20kseries.json TSDB_BENCHMARK_OUTPUT_DIR ?= ./benchout GOLANGCI_LINT_OPTS ?= --timeout 4m +GOYACC_VERSION ?= v0.6.0 include Makefile.common @@ -78,24 +79,42 @@ assets-tarball: assets @echo '>> packaging assets' scripts/package_assets.sh -# We only want to generate the parser when there's changes to the grammar. .PHONY: parser parser: @echo ">> running goyacc to generate the .go file." ifeq (, $(shell command -v goyacc 2> /dev/null)) @echo "goyacc not installed so skipping" - @echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0" + @echo "To install: \"go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION)\" or run \"make install-goyacc\"" else - goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y + $(MAKE) promql/parser/generated_parser.y.go endif +promql/parser/generated_parser.y.go: promql/parser/generated_parser.y + @echo ">> running goyacc to generate the .go file." + @goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y + +.PHONY: clean-parser +clean-parser: + @echo ">> cleaning generated parser" + @rm -f promql/parser/generated_parser.y.go + +.PHONY: check-generated-parser +check-generated-parser: clean-parser promql/parser/generated_parser.y.go + @echo ">> checking generated parser" + @git diff --exit-code -- promql/parser/generated_parser.y.go || (echo "Generated parser is out of date. Please run 'make parser' and commit the changes." && false) + +.PHONY: install-goyacc +install-goyacc: + @echo ">> installing goyacc $(GOYACC_VERSION)" + @go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION) + .PHONY: test # If we only want to only test go code we have to change the test target # which is called by all. ifeq ($(GO_ONLY),1) test: common-test check-go-mod-version else -test: common-test ui-build-module ui-test ui-lint check-go-mod-version +test: check-generated-parser common-test ui-build-module ui-test ui-lint check-go-mod-version endif .PHONY: npm_licenses diff --git a/Makefile.common b/Makefile.common index 3474b36423b..f71e80bcb3e 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.56.2 +GOLANGCI_LINT_VERSION ?= v1.59.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/RELEASE.md b/RELEASE.md index f313c4172d6..f9a42be6b88 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -149,6 +149,8 @@ Changes for a patch release or release candidate should be merged into the previ Bump the version in the `VERSION` file and update `CHANGELOG.md`. Do this in a proper PR pointing to the release branch as this gives others the opportunity to chime in on the release in general and on the addition to the changelog in particular. For a release candidate, append something like `-rc.0` to the version (with the corresponding changes to the tag name, the release name etc.). +When updating the `CHANGELOG.md` look at all PRs included in the release since the last release and verify if they need a changelog entry. + Note that `CHANGELOG.md` should only document changes relevant to users of Prometheus, including external API changes, performance improvements, and new features. Do not document changes of internal interfaces, code refactorings and clean-ups, changes to the build process, etc. People interested in these are asked to refer to the git history. For release candidates still update `CHANGELOG.md`, but when you cut the final release later, merge all the changes from the pre-releases into the one final update. diff --git a/VERSION b/VERSION index cfa53dc0160..f419e2c6f15 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.52.0 +2.53.1 diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 0e15d5ca5fd..8db2f2c5ebc 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -28,6 +28,8 @@ import ( "os/signal" "path/filepath" "runtime" + "runtime/debug" + "strconv" "strings" "sync" "syscall" @@ -42,6 +44,7 @@ import ( "github.com/mwitkow/go-conntrack" "github.com/oklog/run" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/common/model" "github.com/prometheus/common/promlog" @@ -217,6 +220,7 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") case "native-histograms": c.tsdb.EnableNativeHistograms = true + c.scrape.EnableNativeHistogramsIngestion = true // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols @@ -251,6 +255,18 @@ func main() { newFlagRetentionDuration model.Duration ) + // Unregister the default GoCollector, and reregister with our defaults. + if prometheus.Unregister(collectors.NewGoCollector()) { + prometheus.MustRegister( + collectors.NewGoCollector( + collectors.WithGoCollectorRuntimeMetrics( + collectors.MetricsGC, + collectors.MetricsScheduler, + ), + ), + ) + } + cfg := flagConfig{ notifier: notifier.Options{ Registerer: prometheus.DefaultRegisterer, @@ -417,7 +433,7 @@ func main() { serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager."). Default("1m").SetValue(&cfg.resendDelay) - serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently."). + serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently. When set, \"query.max-concurrency\" may need to be adjusted accordingly."). Default("4").Int64Var(&cfg.maxConcurrentEvals) a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release."). @@ -771,6 +787,9 @@ func main() { ResendDelay: time.Duration(cfg.resendDelay), MaxConcurrentEvals: cfg.maxConcurrentEvals, ConcurrentEvalsEnabled: cfg.enableConcurrentRuleEval, + DefaultRuleQueryOffset: func() time.Duration { + return time.Duration(cfgFile.GlobalConfig.RuleQueryOffset) + }, }) } @@ -1367,6 +1386,17 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename) } + oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC) + if oldGoGC != conf.Runtime.GoGC { + level.Info(logger).Log("msg", "updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC) + } + // Write the new setting out to the ENV var for runtime API output. + if conf.Runtime.GoGC >= 0 { + os.Setenv("GOGC", strconv.Itoa(conf.Runtime.GoGC)) + } else { + os.Setenv("GOGC", "off") + } + noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval) l := []interface{}{"msg", "Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)} level.Info(logger).Log(append(l, timings...)...) diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 03f3a9bc392..89c171bb5b7 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -24,6 +24,7 @@ import ( "os/exec" "path/filepath" "runtime" + "strconv" "strings" "syscall" "testing" @@ -189,7 +190,7 @@ func TestSendAlerts(t *testing.T) { for i, tc := range testCases { tc := tc - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { senderFunc := senderFunc(func(alerts ...*notifier.Alert) { require.NotEmpty(t, tc.in, "sender called with 0 alert") require.Equal(t, tc.exp, alerts) diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index dd6b56672c0..9a7a3ed855e 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -296,7 +296,7 @@ func (p *queryLogTest) run(t *testing.T) { if p.exactQueryCount() { require.Equal(t, 1, qc) } else { - require.Greater(t, qc, 0, "no queries logged") + require.Positive(t, qc, "no queries logged") } p.validateLastQuery(t, ql) @@ -366,7 +366,7 @@ func (p *queryLogTest) run(t *testing.T) { if p.exactQueryCount() { require.Equal(t, 1, qc) } else { - require.Greater(t, qc, 0, "no queries logged") + require.Positive(t, qc, "no queries logged") } } diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 601c3ced9f2..79db428c717 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -88,7 +88,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn blockDuration := getCompatibleBlockDuration(maxBlockDuration) mint = blockDuration * (mint / blockDuration) - db, err := tsdb.OpenDBReadOnly(outputDir, nil) + db, err := tsdb.OpenDBReadOnly(outputDir, "", nil) if err != nil { return err } diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index a62ae4fbf45..e1d275e97ed 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -56,8 +56,8 @@ import ( "github.com/prometheus/prometheus/model/rulefmt" "github.com/prometheus/prometheus/notifier" _ "github.com/prometheus/prometheus/plugins" // Register plugins. - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/util/documentcli" ) @@ -235,12 +235,14 @@ func main() { tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.") dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() + dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String() dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() - tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics format. Native histograms are not dumped.") + tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.") dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() + dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String() dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings() @@ -377,7 +379,7 @@ func main() { case testRulesCmd.FullCommand(): os.Exit(RulesUnitTest( - promql.LazyLoaderOpts{ + promqltest.LazyLoaderOpts{ EnableAtModifier: true, EnableNegativeOffset: true, }, @@ -396,9 +398,9 @@ func main() { os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable))) case tsdbDumpCmd.FullCommand(): - os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet))) + os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet))) case tsdbDumpOpenMetricsCmd.FullCommand(): - os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics))) + os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics))) // TODO(aSquare14): Work on adding support for custom block size. case openMetricsImportCmd.FullCommand(): os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 7306a3e6484..78500fe937f 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -25,6 +25,7 @@ import ( "os/exec" "path/filepath" "runtime" + "strconv" "strings" "syscall" "testing" @@ -410,7 +411,7 @@ func TestExitCodes(t *testing.T) { } { t.Run(c.file, func(t *testing.T) { for _, lintFatal := range []bool{true, false} { - t.Run(fmt.Sprintf("%t", lintFatal), func(t *testing.T) { + t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) { args := []string{"-test.main", "check", "config", "testdata/" + c.file} if lintFatal { args = append(args, "--lint-fatal") diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 2ad969438c3..2ed7244b1c6 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -338,7 +338,7 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) { } func listBlocks(path string, humanReadable bool) error { - db, err := tsdb.OpenDBReadOnly(path, nil) + db, err := tsdb.OpenDBReadOnly(path, "", nil) if err != nil { return err } @@ -393,7 +393,7 @@ func getFormatedBytes(bytes int64, humanReadable bool) string { } func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) { - db, err := tsdb.OpenDBReadOnly(path, nil) + db, err := tsdb.OpenDBReadOnly(path, "", nil) if err != nil { return nil, nil, err } @@ -708,8 +708,8 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb. type SeriesSetFormatter func(series storage.SeriesSet) error -func dumpSamples(ctx context.Context, path string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) { - db, err := tsdb.OpenDBReadOnly(path, nil) +func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) { + db, err := tsdb.OpenDBReadOnly(dbDir, sandboxDirRoot, nil) if err != nil { return err } @@ -856,9 +856,9 @@ func displayHistogram(dataType string, datas []int, total int) { } avg := sum / len(datas) fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1]) - maxLeftLen := strconv.Itoa(len(fmt.Sprintf("%d", end))) - maxRightLen := strconv.Itoa(len(fmt.Sprintf("%d", end+step))) - maxCountLen := strconv.Itoa(len(fmt.Sprintf("%d", maxCount))) + maxLeftLen := strconv.Itoa(len(strconv.Itoa(end))) + maxRightLen := strconv.Itoa(len(strconv.Itoa(end + step))) + maxCountLen := strconv.Itoa(len(strconv.Itoa(maxCount))) for bucket, count := range buckets { percentage := 100.0 * count / total fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage)) diff --git a/cmd/promtool/tsdb_test.go b/cmd/promtool/tsdb_test.go index 36a65d73e4b..75089b168bd 100644 --- a/cmd/promtool/tsdb_test.go +++ b/cmd/promtool/tsdb_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/tsdb" ) @@ -64,6 +64,7 @@ func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []strin err := dumpSamples( context.Background(), path, + t.TempDir(), mint, maxt, match, @@ -88,7 +89,7 @@ func normalizeNewLine(b []byte) []byte { } func TestTSDBDump(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m metric{foo="bar", baz="abc"} 1 2 3 4 5 heavy_metric{foo="bar"} 5 4 3 2 1 @@ -158,7 +159,7 @@ func TestTSDBDump(t *testing.T) { } func TestTSDBDumpOpenMetrics(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m my_counter{foo="bar", baz="abc"} 1 2 3 4 5 my_gauge{bar="foo", abc="baz"} 9 8 0 4 7 diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 6d6683a9346..5451c5296cc 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -36,13 +36,14 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/storage" ) // RulesUnitTest does unit testing of rules based on the unit testing files provided. // More info about the file format can be found in the docs. -func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { +func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { failed := false var run *regexp.Regexp @@ -69,7 +70,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, diffFla return successExitCode } -func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error { +func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error { fmt.Println("Unit Testing: ", filename) b, err := os.ReadFile(filename) @@ -175,9 +176,9 @@ type testGroup struct { } // test performs the unit tests. -func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) { +func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) { // Setup testing suite. - suite, err := promql.NewLazyLoader(tg.seriesLoadingString(), queryOpts) + suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts) if err != nil { return []error{err} } @@ -413,7 +414,7 @@ Outer: gotSamples = append(gotSamples, parsedSample{ Labels: s.Metric.Copy(), Value: s.F, - Histogram: promql.HistogramTestExpression(s.H), + Histogram: promqltest.HistogramTestExpression(s.H), }) } @@ -443,7 +444,7 @@ Outer: expSamples = append(expSamples, parsedSample{ Labels: lb, Value: s.Value, - Histogram: promql.HistogramTestExpression(hist), + Histogram: promqltest.HistogramTestExpression(hist), }) } @@ -572,7 +573,7 @@ func (la labelsAndAnnotations) String() string { } s := "[\n0:" + indentLines("\n"+la[0].String(), " ") for i, l := range la[1:] { - s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ") + s += ",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " ") } s += "\n]" diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go index 971ddb40c5b..2dbd5a4e512 100644 --- a/cmd/promtool/unittest_test.go +++ b/cmd/promtool/unittest_test.go @@ -18,7 +18,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/promqltest" ) func TestRulesUnitTest(t *testing.T) { @@ -28,7 +28,7 @@ func TestRulesUnitTest(t *testing.T) { tests := []struct { name string args args - queryOpts promql.LazyLoaderOpts + queryOpts promqltest.LazyLoaderOpts want int }{ { @@ -92,7 +92,7 @@ func TestRulesUnitTest(t *testing.T) { args: args{ files: []string{"./testdata/at-modifier-test.yml"}, }, - queryOpts: promql.LazyLoaderOpts{ + queryOpts: promqltest.LazyLoaderOpts{ EnableAtModifier: true, }, want: 0, @@ -109,7 +109,7 @@ func TestRulesUnitTest(t *testing.T) { args: args{ files: []string{"./testdata/negative-offset-test.yml"}, }, - queryOpts: promql.LazyLoaderOpts{ + queryOpts: promqltest.LazyLoaderOpts{ EnableNegativeOffset: true, }, want: 0, @@ -119,7 +119,7 @@ func TestRulesUnitTest(t *testing.T) { args: args{ files: []string{"./testdata/no-test-group-interval.yml"}, }, - queryOpts: promql.LazyLoaderOpts{ + queryOpts: promqltest.LazyLoaderOpts{ EnableNegativeOffset: true, }, want: 0, @@ -142,7 +142,7 @@ func TestRulesUnitTestRun(t *testing.T) { tests := []struct { name string args args - queryOpts promql.LazyLoaderOpts + queryOpts promqltest.LazyLoaderOpts want int }{ { diff --git a/config/config.go b/config/config.go index 1cfd588643d..9defa10d485 100644 --- a/config/config.go +++ b/config/config.go @@ -20,6 +20,7 @@ import ( "os" "path/filepath" "sort" + "strconv" "strings" "time" @@ -145,11 +146,17 @@ var ( ScrapeInterval: model.Duration(1 * time.Minute), ScrapeTimeout: model.Duration(10 * time.Second), EvaluationInterval: model.Duration(1 * time.Minute), + RuleQueryOffset: model.Duration(0 * time.Minute), // When native histogram feature flag is enabled, ScrapeProtocols default // changes to DefaultNativeHistogramScrapeProtocols. ScrapeProtocols: DefaultScrapeProtocols, } + DefaultRuntimeConfig = RuntimeConfig{ + // Go runtime tuning. + GoGC: 75, + } + // DefaultScrapeConfig is the default scrape configuration. DefaultScrapeConfig = ScrapeConfig{ // ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals. @@ -224,6 +231,7 @@ var ( // Config is the top-level configuration for Prometheus's config files. type Config struct { GlobalConfig GlobalConfig `yaml:"global"` + Runtime RuntimeConfig `yaml:"runtime,omitempty"` AlertingConfig AlertingConfig `yaml:"alerting,omitempty"` RuleFiles []string `yaml:"rule_files,omitempty"` ScrapeConfigFiles []string `yaml:"scrape_config_files,omitempty"` @@ -334,6 +342,14 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { c.GlobalConfig = DefaultGlobalConfig } + // If a runtime block was open but empty the default runtime config is overwritten. + // We have to restore it here. + if c.Runtime.isZero() { + c.Runtime = DefaultRuntimeConfig + // Use the GOGC env var value if the runtime section is empty. + c.Runtime.GoGC = getGoGCEnv() + } + for _, rf := range c.RuleFiles { if !patRulePath.MatchString(rf) { return fmt.Errorf("invalid rule file path %q", rf) @@ -397,6 +413,8 @@ type GlobalConfig struct { ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` // How frequently to evaluate rules by default. EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"` + // Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received. + RuleQueryOffset model.Duration `yaml:"rule_query_offset,omitempty"` // File to which PromQL queries are logged. QueryLogFile string `yaml:"query_log_file,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. @@ -556,10 +574,22 @@ func (c *GlobalConfig) isZero() bool { c.ScrapeInterval == 0 && c.ScrapeTimeout == 0 && c.EvaluationInterval == 0 && + c.RuleQueryOffset == 0 && c.QueryLogFile == "" && c.ScrapeProtocols == nil } +// RuntimeConfig configures the values for the process behavior. +type RuntimeConfig struct { + // The Go garbage collection target percentage. + GoGC int `yaml:"gogc,omitempty"` +} + +// isZero returns true iff the global config is the zero value. +func (c *RuntimeConfig) isZero() bool { + return c.GoGC == 0 +} + type ScrapeConfigs struct { ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` } @@ -1207,3 +1237,19 @@ func filePath(filename string) string { func fileErr(filename string, err error) error { return fmt.Errorf("%q: %w", filePath(filename), err) } + +func getGoGCEnv() int { + goGCEnv := os.Getenv("GOGC") + // If the GOGC env var is set, use the same logic as upstream Go. + if goGCEnv != "" { + // Special case for GOGC=off. + if strings.ToLower(goGCEnv) == "off" { + return -1 + } + i, err := strconv.Atoi(goGCEnv) + if err == nil { + return i + } + } + return DefaultRuntimeConfig.GoGC +} diff --git a/config/config_default_test.go b/config/config_default_test.go index 26623590d96..31133f1e04b 100644 --- a/config/config_default_test.go +++ b/config/config_default_test.go @@ -19,6 +19,7 @@ const ruleFilesConfigFile = "testdata/rules_abs_path.good.yml" var ruleFilesExpectedConf = &Config{ GlobalConfig: DefaultGlobalConfig, + Runtime: DefaultRuntimeConfig, RuleFiles: []string{ "testdata/first.rules", "testdata/rules/second.rules", diff --git a/config/config_test.go b/config/config_test.go index 14981d25f0c..ff056a2676c 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -76,6 +76,7 @@ const ( globLabelLimit = 30 globLabelNameLengthLimit = 200 globLabelValueLengthLimit = 200 + globalGoGC = 42 ) var expectedConf = &Config{ @@ -96,6 +97,10 @@ var expectedConf = &Config{ ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols, }, + Runtime: RuntimeConfig{ + GoGC: globalGoGC, + }, + RuleFiles: []string{ filepath.FromSlash("testdata/first.rules"), filepath.FromSlash("testdata/my/*.rules"), @@ -2081,6 +2086,7 @@ func TestEmptyGlobalBlock(t *testing.T) { c, err := Load("global:\n", false, log.NewNopLogger()) require.NoError(t, err) exp := DefaultConfig + exp.Runtime = DefaultRuntimeConfig require.Equal(t, exp, *c) } diff --git a/config/config_windows_test.go b/config/config_windows_test.go index 7fd1d46f6d1..db4d46ef13a 100644 --- a/config/config_windows_test.go +++ b/config/config_windows_test.go @@ -17,6 +17,7 @@ const ruleFilesConfigFile = "testdata/rules_abs_path_windows.good.yml" var ruleFilesExpectedConf = &Config{ GlobalConfig: DefaultGlobalConfig, + Runtime: DefaultRuntimeConfig, RuleFiles: []string{ "testdata\\first.rules", "testdata\\rules\\second.rules", diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index b584301649e..184e6363ce5 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -14,6 +14,9 @@ global: monitor: codelab foo: bar +runtime: + gogc: 42 + rule_files: - "first.rules" - "my/*.rules" diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index aa79fd9c62e..a6a0a825776 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net" + "strconv" "strings" "time" @@ -279,7 +280,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error if inst.PrivateDnsName != nil { labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName) } - addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port)) + addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port)) labels[model.AddressLabel] = model.LabelValue(addr) if inst.Platform != nil { diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index 86b138be555..0ad7f2d5416 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net" + "strconv" "strings" "time" @@ -229,7 +230,7 @@ func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, lightsailLabelRegion: model.LabelValue(d.cfg.Region), } - addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port)) + addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port)) labels[model.AddressLabel] = model.LabelValue(addr) if inst.PublicIpAddress != nil { diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index 7c2ece2c7bd..70d95b9f3a0 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -20,6 +20,7 @@ import ( "math/rand" "net" "net/http" + "strconv" "strings" "sync" "time" @@ -492,7 +493,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM } if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil { labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress) - address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port)) + address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, strconv.Itoa(d.port)) labels[model.AddressLabel] = model.LabelValue(address) return labels, nil } diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index 40eed7697a5..bdc1fc8dce4 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -539,9 +539,9 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr // since the service may be registered remotely through a different node. var addr string if serviceNode.Service.Address != "" { - addr = net.JoinHostPort(serviceNode.Service.Address, fmt.Sprintf("%d", serviceNode.Service.Port)) + addr = net.JoinHostPort(serviceNode.Service.Address, strconv.Itoa(serviceNode.Service.Port)) } else { - addr = net.JoinHostPort(serviceNode.Node.Address, fmt.Sprintf("%d", serviceNode.Service.Port)) + addr = net.JoinHostPort(serviceNode.Node.Address, strconv.Itoa(serviceNode.Service.Port)) } labels := model.LabelSet{ diff --git a/discovery/digitalocean/digitalocean.go b/discovery/digitalocean/digitalocean.go index 18380b7296f..ecee60cb1f0 100644 --- a/discovery/digitalocean/digitalocean.go +++ b/discovery/digitalocean/digitalocean.go @@ -177,7 +177,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { } labels := model.LabelSet{ - doLabelID: model.LabelValue(fmt.Sprintf("%d", droplet.ID)), + doLabelID: model.LabelValue(strconv.Itoa(droplet.ID)), doLabelName: model.LabelValue(droplet.Name), doLabelImage: model.LabelValue(droplet.Image.Slug), doLabelImageName: model.LabelValue(droplet.Image.Name), diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go index cf56a2ad025..314c3d38cd5 100644 --- a/discovery/dns/dns.go +++ b/discovery/dns/dns.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net" + "strconv" "strings" "sync" "time" @@ -200,7 +201,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ tg := &targetgroup.Group{} hostPort := func(a string, p int) model.LabelValue { - return model.LabelValue(net.JoinHostPort(a, fmt.Sprintf("%d", p))) + return model.LabelValue(net.JoinHostPort(a, strconv.Itoa(p))) } for _, record := range response.Answer { @@ -209,7 +210,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ switch addr := record.(type) { case *dns.SRV: dnsSrvRecordTarget = model.LabelValue(addr.Target) - dnsSrvRecordPort = model.LabelValue(fmt.Sprintf("%d", addr.Port)) + dnsSrvRecordPort = model.LabelValue(strconv.Itoa(int(addr.Port))) // Remove the final dot from rooted DNS names to make them look more usual. addr.Target = strings.TrimRight(addr.Target, ".") diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go index 6d0599dfa24..df56f94c5fb 100644 --- a/discovery/hetzner/hcloud.go +++ b/discovery/hetzner/hcloud.go @@ -15,7 +15,6 @@ package hetzner import ( "context" - "fmt" "net" "net/http" "strconv" @@ -92,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er for i, server := range servers { labels := model.LabelSet{ hetznerLabelRole: model.LabelValue(HetznerRoleHcloud), - hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)), + hetznerLabelServerID: model.LabelValue(strconv.FormatInt(server.ID, 10)), hetznerLabelServerName: model.LabelValue(server.Name), hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name), hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()), @@ -102,10 +101,10 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name), hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone), hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name), - hetznerLabelHcloudCPUCores: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Cores)), + hetznerLabelHcloudCPUCores: model.LabelValue(strconv.Itoa(server.ServerType.Cores)), hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType), - hetznerLabelHcloudMemoryGB: model.LabelValue(fmt.Sprintf("%d", int(server.ServerType.Memory))), - hetznerLabelHcloudDiskGB: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Disk)), + hetznerLabelHcloudMemoryGB: model.LabelValue(strconv.Itoa(int(server.ServerType.Memory))), + hetznerLabelHcloudDiskGB: model.LabelValue(strconv.Itoa(server.ServerType.Disk)), model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))), } diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index b862c33f5b2..516470b05ad 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -112,7 +112,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP), hetznerLabelServerStatus: model.LabelValue(server.Server.Status), hetznerLabelRobotProduct: model.LabelValue(server.Server.Product), - hetznerLabelRobotCancelled: model.LabelValue(fmt.Sprintf("%t", server.Server.Canceled)), + hetznerLabelRobotCancelled: model.LabelValue(strconv.FormatBool(server.Server.Canceled)), model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))), } diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go index 1ed699645d1..a455a8e3413 100644 --- a/discovery/legacymanager/manager_test.go +++ b/discovery/legacymanager/manager_test.go @@ -720,7 +720,7 @@ func staticConfig(addrs ...string) discovery.StaticConfig { var cfg discovery.StaticConfig for i, addr := range addrs { cfg = append(cfg, &targetgroup.Group{ - Source: fmt.Sprint(i), + Source: strconv.Itoa(i), Targets: []model.LabelSet{ {model.AddressLabel: model.LabelValue(addr)}, }, diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 2a5475b8542..634a6b1d4bb 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -186,12 +186,12 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { if d.lastResults != nil && d.eventPollingEnabled { // Check to see if there have been any events. If so, refresh our data. - opts := linodego.ListOptions{ + eventsOpts := linodego.ListOptions{ PageOptions: &linodego.PageOptions{Page: 1}, PageSize: 25, Filter: fmt.Sprintf(filterTemplate, d.lastRefreshTimestamp.Format("2006-01-02T15:04:05")), } - events, err := d.client.ListEvents(ctx, &opts) + events, err := d.client.ListEvents(ctx, &eventsOpts) if err != nil { var e *linodego.Error if errors.As(err, &e) && e.Code == http.StatusUnauthorized { @@ -232,31 +232,40 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro tg := &targetgroup.Group{ Source: "Linode", } - opts := linodego.ListOptions{ + // We need 3 of these because Linodego writes into the structure during pagination + listInstancesOpts := linodego.ListOptions{ + PageSize: 500, + } + listIPAddressesOpts := linodego.ListOptions{ + PageSize: 500, + } + listIPv6RangesOpts := linodego.ListOptions{ PageSize: 500, } // If region filter provided, use it to constrain results. if d.region != "" { - opts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) + listInstancesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) + listIPAddressesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) + listIPv6RangesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region) } // Gather all linode instances. - instances, err := d.client.ListInstances(ctx, &opts) + instances, err := d.client.ListInstances(ctx, &listInstancesOpts) if err != nil { d.metrics.failuresCount.Inc() return nil, err } // Gather detailed IP address info for all IPs on all linode instances. - detailedIPs, err := d.client.ListIPAddresses(ctx, &opts) + detailedIPs, err := d.client.ListIPAddresses(ctx, &listIPAddressesOpts) if err != nil { d.metrics.failuresCount.Inc() return nil, err } // Gather detailed IPv6 Range info for all linode instances. - ipv6RangeList, err := d.client.ListIPv6Ranges(ctx, &opts) + ipv6RangeList, err := d.client.ListIPv6Ranges(ctx, &listIPv6RangesOpts) if err != nil { d.metrics.failuresCount.Inc() return nil, err @@ -325,7 +334,7 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro } labels := model.LabelSet{ - linodeLabelID: model.LabelValue(fmt.Sprintf("%d", instance.ID)), + linodeLabelID: model.LabelValue(strconv.Itoa(instance.ID)), linodeLabelName: model.LabelValue(instance.Label), linodeLabelImage: model.LabelValue(instance.Image), linodeLabelPrivateIPv4: model.LabelValue(privateIPv4), @@ -338,13 +347,13 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro linodeLabelType: model.LabelValue(instance.Type), linodeLabelStatus: model.LabelValue(instance.Status), linodeLabelGroup: model.LabelValue(instance.Group), - linodeLabelGPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.GPUs)), + linodeLabelGPUs: model.LabelValue(strconv.Itoa(instance.Specs.GPUs)), linodeLabelHypervisor: model.LabelValue(instance.Hypervisor), linodeLabelBackups: model.LabelValue(backupsStatus), - linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)), - linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Memory)<<20)), - linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)), - linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Transfer)<<20)), + linodeLabelSpecsDiskBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Disk)<<20, 10)), + linodeLabelSpecsMemoryBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Memory)<<20, 10)), + linodeLabelSpecsVCPUs: model.LabelValue(strconv.Itoa(instance.Specs.VCPUs)), + linodeLabelSpecsTransferBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Transfer)<<20, 10)), } addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10)) diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 656d7c3c668..be07edbdb40 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -720,7 +720,7 @@ func staticConfig(addrs ...string) StaticConfig { var cfg StaticConfig for i, addr := range addrs { cfg = append(cfg, &targetgroup.Group{ - Source: fmt.Sprint(i), + Source: strconv.Itoa(i), Targets: []model.LabelSet{ {model.AddressLabel: model.LabelValue(addr)}, }, diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go index 3e9e15967fd..38b47accffb 100644 --- a/discovery/marathon/marathon.go +++ b/discovery/marathon/marathon.go @@ -505,7 +505,7 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string { host = task.Host } - return net.JoinHostPort(host, fmt.Sprintf("%d", port)) + return net.JoinHostPort(host, strconv.Itoa(int(port))) } // Get a list of ports and a list of labels from a PortMapping. diff --git a/discovery/metrics.go b/discovery/metrics.go index e738331a180..356be1ddcbe 100644 --- a/discovery/metrics.go +++ b/discovery/metrics.go @@ -19,16 +19,6 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -var ( - clientGoRequestMetrics = &clientGoRequestMetricAdapter{} - clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{} -) - -func init() { - clientGoRequestMetrics.RegisterWithK8sGoClient() - clientGoWorkloadMetrics.RegisterWithK8sGoClient() -} - // Metrics to be used with a discovery manager. type Metrics struct { FailedConfigs prometheus.Gauge diff --git a/discovery/metrics_k8s_client.go b/discovery/metrics_k8s_client.go index f16245684bb..c13ce533178 100644 --- a/discovery/metrics_k8s_client.go +++ b/discovery/metrics_k8s_client.go @@ -35,6 +35,11 @@ const ( workqueueMetricsNamespace = KubernetesMetricsNamespace + "_workqueue" ) +var ( + clientGoRequestMetrics = &clientGoRequestMetricAdapter{} + clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{} +) + var ( // Metrics for client-go's HTTP requests. clientGoRequestResultMetricVec = prometheus.NewCounterVec( @@ -135,6 +140,9 @@ func clientGoMetrics() []prometheus.Collector { } func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error { + clientGoRequestMetrics.RegisterWithK8sGoClient() + clientGoWorkloadMetrics.RegisterWithK8sGoClient() + for _, collector := range clientGoMetrics() { err := registerer.Register(collector) if err != nil { diff --git a/discovery/moby/network.go b/discovery/moby/network.go index 0e0d0041de8..794d2e607d0 100644 --- a/discovery/moby/network.go +++ b/discovery/moby/network.go @@ -15,7 +15,7 @@ package moby import ( "context" - "fmt" + "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/client" @@ -44,8 +44,8 @@ func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix s labelPrefix + labelNetworkID: network.ID, labelPrefix + labelNetworkName: network.Name, labelPrefix + labelNetworkScope: network.Scope, - labelPrefix + labelNetworkInternal: fmt.Sprintf("%t", network.Internal), - labelPrefix + labelNetworkIngress: fmt.Sprintf("%t", network.Ingress), + labelPrefix + labelNetworkInternal: strconv.FormatBool(network.Internal), + labelPrefix + labelNetworkIngress: strconv.FormatBool(network.Ingress), } for k, v := range network.Labels { ln := strutil.SanitizeLabelName(k) diff --git a/discovery/moby/nodes.go b/discovery/moby/nodes.go index a7c5551c021..b5be844eda6 100644 --- a/discovery/moby/nodes.go +++ b/discovery/moby/nodes.go @@ -66,7 +66,7 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err swarmLabelNodeAddress: model.LabelValue(n.Status.Addr), } if n.ManagerStatus != nil { - labels[swarmLabelNodeManagerLeader] = model.LabelValue(fmt.Sprintf("%t", n.ManagerStatus.Leader)) + labels[swarmLabelNodeManagerLeader] = model.LabelValue(strconv.FormatBool(n.ManagerStatus.Leader)) labels[swarmLabelNodeManagerReachability] = model.LabelValue(n.ManagerStatus.Reachability) labels[swarmLabelNodeManagerAddr] = model.LabelValue(n.ManagerStatus.Addr) } diff --git a/discovery/moby/services.go b/discovery/moby/services.go index 1d472b5c006..c61b499259c 100644 --- a/discovery/moby/services.go +++ b/discovery/moby/services.go @@ -116,7 +116,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group, labels[model.LabelName(k)] = model.LabelValue(v) } - addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port)) + addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) diff --git a/discovery/moby/tasks.go b/discovery/moby/tasks.go index 2505a7b07a9..38b9d33de26 100644 --- a/discovery/moby/tasks.go +++ b/discovery/moby/tasks.go @@ -150,7 +150,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err labels[model.LabelName(k)] = model.LabelValue(v) } - addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port)) + addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port)) labels[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, labels) diff --git a/discovery/openstack/hypervisor.go b/discovery/openstack/hypervisor.go index 16964cfb620..8964da9294f 100644 --- a/discovery/openstack/hypervisor.go +++ b/discovery/openstack/hypervisor.go @@ -17,6 +17,7 @@ import ( "context" "fmt" "net" + "strconv" "github.com/go-kit/log" "github.com/gophercloud/gophercloud" @@ -72,7 +73,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group } tg := &targetgroup.Group{ - Source: fmt.Sprintf("OS_" + h.region), + Source: "OS_" + h.region, } // OpenStack API reference // https://developer.openstack.org/api-ref/compute/#list-hypervisors-details @@ -84,7 +85,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group } for _, hypervisor := range hypervisorList { labels := model.LabelSet{} - addr := net.JoinHostPort(hypervisor.HostIP, fmt.Sprintf("%d", h.port)) + addr := net.JoinHostPort(hypervisor.HostIP, strconv.Itoa(h.port)) labels[model.AddressLabel] = model.LabelValue(addr) labels[openstackLabelHypervisorID] = model.LabelValue(hypervisor.ID) labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname) diff --git a/discovery/openstack/instance.go b/discovery/openstack/instance.go index 9b28c1d6e1c..750d414a2b3 100644 --- a/discovery/openstack/instance.go +++ b/discovery/openstack/instance.go @@ -17,6 +17,7 @@ import ( "context" "fmt" "net" + "strconv" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -120,7 +121,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, } pager := servers.List(client, opts) tg := &targetgroup.Group{ - Source: fmt.Sprintf("OS_" + i.region), + Source: "OS_" + i.region, } err = pager.EachPage(func(page pagination.Page) (bool, error) { if ctx.Err() != nil { @@ -194,7 +195,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok { lbls[openstackLabelPublicIP] = model.LabelValue(val) } - addr = net.JoinHostPort(addr, fmt.Sprintf("%d", i.port)) + addr = net.JoinHostPort(addr, strconv.Itoa(i.port)) lbls[model.AddressLabel] = model.LabelValue(addr) tg.Targets = append(tg.Targets, lbls) diff --git a/discovery/ovhcloud/dedicated_server.go b/discovery/ovhcloud/dedicated_server.go index bb5dadcd7b2..a70857a08b2 100644 --- a/discovery/ovhcloud/dedicated_server.go +++ b/discovery/ovhcloud/dedicated_server.go @@ -144,12 +144,12 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou model.InstanceLabel: model.LabelValue(server.Name), dedicatedServerLabelPrefix + "state": model.LabelValue(server.State), dedicatedServerLabelPrefix + "commercial_range": model.LabelValue(server.CommercialRange), - dedicatedServerLabelPrefix + "link_speed": model.LabelValue(fmt.Sprintf("%d", server.LinkSpeed)), + dedicatedServerLabelPrefix + "link_speed": model.LabelValue(strconv.Itoa(server.LinkSpeed)), dedicatedServerLabelPrefix + "rack": model.LabelValue(server.Rack), dedicatedServerLabelPrefix + "no_intervention": model.LabelValue(strconv.FormatBool(server.NoIntervention)), dedicatedServerLabelPrefix + "os": model.LabelValue(server.Os), dedicatedServerLabelPrefix + "support_level": model.LabelValue(server.SupportLevel), - dedicatedServerLabelPrefix + "server_id": model.LabelValue(fmt.Sprintf("%d", server.ServerID)), + dedicatedServerLabelPrefix + "server_id": model.LabelValue(strconv.FormatInt(server.ServerID, 10)), dedicatedServerLabelPrefix + "reverse": model.LabelValue(server.Reverse), dedicatedServerLabelPrefix + "datacenter": model.LabelValue(server.Datacenter), dedicatedServerLabelPrefix + "name": model.LabelValue(server.Name), diff --git a/discovery/ovhcloud/ovhcloud_test.go b/discovery/ovhcloud/ovhcloud_test.go index 53ec9b44595..9c95bf90e67 100644 --- a/discovery/ovhcloud/ovhcloud_test.go +++ b/discovery/ovhcloud/ovhcloud_test.go @@ -66,7 +66,7 @@ endpoint: %s _, err := createClient(&conf) - require.ErrorContains(t, err, "missing application key") + require.ErrorContains(t, err, "missing authentication information") } func TestParseIPs(t *testing.T) { diff --git a/discovery/ovhcloud/vps.go b/discovery/ovhcloud/vps.go index e2d1dee3647..58ceeabd87a 100644 --- a/discovery/ovhcloud/vps.go +++ b/discovery/ovhcloud/vps.go @@ -19,6 +19,7 @@ import ( "net/netip" "net/url" "path" + "strconv" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -161,21 +162,21 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { model.InstanceLabel: model.LabelValue(server.Name), vpsLabelPrefix + "offer": model.LabelValue(server.Model.Offer), vpsLabelPrefix + "datacenter": model.LabelValue(fmt.Sprintf("%+v", server.Model.Datacenter)), - vpsLabelPrefix + "model_vcore": model.LabelValue(fmt.Sprintf("%d", server.Model.Vcore)), - vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(fmt.Sprintf("%d", server.Model.MaximumAdditionalIP)), + vpsLabelPrefix + "model_vcore": model.LabelValue(strconv.Itoa(server.Model.Vcore)), + vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(strconv.Itoa(server.Model.MaximumAdditionalIP)), vpsLabelPrefix + "version": model.LabelValue(server.Model.Version), vpsLabelPrefix + "model_name": model.LabelValue(server.Model.Name), - vpsLabelPrefix + "disk": model.LabelValue(fmt.Sprintf("%d", server.Model.Disk)), - vpsLabelPrefix + "memory": model.LabelValue(fmt.Sprintf("%d", server.Model.Memory)), + vpsLabelPrefix + "disk": model.LabelValue(strconv.Itoa(server.Model.Disk)), + vpsLabelPrefix + "memory": model.LabelValue(strconv.Itoa(server.Model.Memory)), vpsLabelPrefix + "zone": model.LabelValue(server.Zone), vpsLabelPrefix + "display_name": model.LabelValue(server.DisplayName), vpsLabelPrefix + "cluster": model.LabelValue(server.Cluster), vpsLabelPrefix + "state": model.LabelValue(server.State), vpsLabelPrefix + "name": model.LabelValue(server.Name), vpsLabelPrefix + "netboot_mode": model.LabelValue(server.NetbootMode), - vpsLabelPrefix + "memory_limit": model.LabelValue(fmt.Sprintf("%d", server.MemoryLimit)), + vpsLabelPrefix + "memory_limit": model.LabelValue(strconv.Itoa(server.MemoryLimit)), vpsLabelPrefix + "offer_type": model.LabelValue(server.OfferType), - vpsLabelPrefix + "vcore": model.LabelValue(fmt.Sprintf("%d", server.Vcore)), + vpsLabelPrefix + "vcore": model.LabelValue(strconv.Itoa(server.Vcore)), vpsLabelPrefix + "ipv4": model.LabelValue(ipv4), vpsLabelPrefix + "ipv6": model.LabelValue(ipv6), } diff --git a/discovery/puppetdb/puppetdb.go b/discovery/puppetdb/puppetdb.go index 8c9ccde0a43..8f89acbf936 100644 --- a/discovery/puppetdb/puppetdb.go +++ b/discovery/puppetdb/puppetdb.go @@ -237,7 +237,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { pdbLabelResource: model.LabelValue(resource.Resource), pdbLabelType: model.LabelValue(resource.Type), pdbLabelTitle: model.LabelValue(resource.Title), - pdbLabelExported: model.LabelValue(fmt.Sprintf("%t", resource.Exported)), + pdbLabelExported: model.LabelValue(strconv.FormatBool(resource.Exported)), pdbLabelFile: model.LabelValue(resource.File), pdbLabelEnvironment: model.LabelValue(resource.Environment), } diff --git a/discovery/scaleway/instance.go b/discovery/scaleway/instance.go index 9dd786c804f..2542c632533 100644 --- a/discovery/scaleway/instance.go +++ b/discovery/scaleway/instance.go @@ -174,20 +174,25 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, labels[instanceTagsLabel] = model.LabelValue(tags) } - if server.IPv6 != nil { - labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String()) + addr := "" + if server.IPv6 != nil { //nolint:staticcheck + labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String()) //nolint:staticcheck + addr = server.IPv6.Address.String() //nolint:staticcheck } - if server.PublicIP != nil { - labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String()) + if server.PublicIP != nil { //nolint:staticcheck + labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String()) //nolint:staticcheck + addr = server.PublicIP.Address.String() //nolint:staticcheck } if server.PrivateIP != nil { labels[instancePrivateIPv4Label] = model.LabelValue(*server.PrivateIP) + addr = *server.PrivateIP + } - addr := net.JoinHostPort(*server.PrivateIP, strconv.FormatUint(uint64(d.port), 10)) + if addr != "" { + addr := net.JoinHostPort(addr, strconv.FormatUint(uint64(d.port), 10)) labels[model.AddressLabel] = model.LabelValue(addr) - targets = append(targets, labels) } } diff --git a/discovery/scaleway/instance_test.go b/discovery/scaleway/instance_test.go index d2449d00c98..ae70a9ed253 100644 --- a/discovery/scaleway/instance_test.go +++ b/discovery/scaleway/instance_test.go @@ -60,7 +60,7 @@ api_url: %s tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Len(t, tg.Targets, 2) + require.Len(t, tg.Targets, 3) for i, lbls := range []model.LabelSet{ { @@ -110,6 +110,28 @@ api_url: %s "__meta_scaleway_instance_type": "DEV1-S", "__meta_scaleway_instance_zone": "fr-par-1", }, + { + "__address__": "51.158.183.115:80", + "__meta_scaleway_instance_boot_type": "local", + "__meta_scaleway_instance_hostname": "routed-dualstack", + "__meta_scaleway_instance_id": "4904366a-7e26-4b65-b97b-6392c761247a", + "__meta_scaleway_instance_image_arch": "x86_64", + "__meta_scaleway_instance_image_id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160", + "__meta_scaleway_instance_image_name": "Ubuntu 22.04 Jammy Jellyfish", + "__meta_scaleway_instance_location_cluster_id": "19", + "__meta_scaleway_instance_location_hypervisor_id": "1201", + "__meta_scaleway_instance_location_node_id": "24", + "__meta_scaleway_instance_name": "routed-dualstack", + "__meta_scaleway_instance_organization_id": "20b3d507-96ac-454c-a795-bc731b46b12f", + "__meta_scaleway_instance_project_id": "20b3d507-96ac-454c-a795-bc731b46b12f", + "__meta_scaleway_instance_public_ipv4": "51.158.183.115", + "__meta_scaleway_instance_region": "nl-ams", + "__meta_scaleway_instance_security_group_id": "984414da-9fc2-49c0-a925-fed6266fe092", + "__meta_scaleway_instance_security_group_name": "Default security group", + "__meta_scaleway_instance_status": "running", + "__meta_scaleway_instance_type": "DEV1-S", + "__meta_scaleway_instance_zone": "nl-ams-1", + }, } { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) diff --git a/discovery/scaleway/testdata/instance.json b/discovery/scaleway/testdata/instance.json index f8d35b215cf..b433f7598e7 100644 --- a/discovery/scaleway/testdata/instance.json +++ b/discovery/scaleway/testdata/instance.json @@ -216,6 +216,146 @@ "placement_group": null, "private_nics": [], "zone": "fr-par-1" + }, + { + "id": "4904366a-7e26-4b65-b97b-6392c761247a", + "name": "routed-dualstack", + "arch": "x86_64", + "commercial_type": "DEV1-S", + "boot_type": "local", + "organization": "20b3d507-96ac-454c-a795-bc731b46b12f", + "project": "20b3d507-96ac-454c-a795-bc731b46b12f", + "hostname": "routed-dualstack", + "image": { + "id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160", + "name": "Ubuntu 22.04 Jammy Jellyfish", + "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "project": "51b656e3-4865-41e8-adbc-0c45bdd780db", + "root_volume": { + "id": "13d945b9-5e78-4f9d-8ac4-c4bc2fa7c31a", + "name": "Ubuntu 22.04 Jammy Jellyfish", + "volume_type": "unified", + "size": 10000000000 + }, + "extra_volumes": {}, + "public": true, + "arch": "x86_64", + "creation_date": "2024-02-22T15:52:56.037007+00:00", + "modification_date": "2024-02-22T15:52:56.037007+00:00", + "default_bootscript": null, + "from_server": null, + "state": "available", + "tags": [], + "zone": "nl-ams-1" + }, + "volumes": { + "0": { + "boot": false, + "id": "fe85c817-e67e-4e24-8f13-bde3e9f42620", + "name": "Ubuntu 22.04 Jammy Jellyfish", + "volume_type": "l_ssd", + "export_uri": null, + "organization": "20b3d507-96ac-454c-a795-bc731b46b12f", + "project": "20b3d507-96ac-454c-a795-bc731b46b12f", + "server": { + "id": "4904366a-7e26-4b65-b97b-6392c761247a", + "name": "routed-dualstack" + }, + "size": 20000000000, + "state": "available", + "creation_date": "2024-04-19T14:50:14.019739+00:00", + "modification_date": "2024-04-19T14:50:14.019739+00:00", + "tags": [], + "zone": "nl-ams-1" + } + }, + "tags": [], + "state": "running", + "protected": false, + "state_detail": "booted", + "public_ip": { + "id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0", + "address": "51.158.183.115", + "dynamic": false, + "gateway": "62.210.0.1", + "netmask": "32", + "family": "inet", + "provisioning_mode": "dhcp", + "tags": [], + "state": "attached", + "ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e" + }, + "public_ips": [ + { + "id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0", + "address": "51.158.183.115", + "dynamic": false, + "gateway": "62.210.0.1", + "netmask": "32", + "family": "inet", + "provisioning_mode": "dhcp", + "tags": [], + "state": "attached", + "ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e" + }, + { + "id": "f52a8c81-0875-4aee-b96e-eccfc6bec367", + "address": "2001:bc8:1640:1568:dc00:ff:fe21:91b", + "dynamic": false, + "gateway": "fe80::dc00:ff:fe21:91c", + "netmask": "64", + "family": "inet6", + "provisioning_mode": "slaac", + "tags": [], + "state": "attached", + "ipam_id": "40d1e6ea-e932-42f9-8acb-55398bec7ad6" + } + ], + "mac_address": "de:00:00:21:09:1b", + "routed_ip_enabled": true, + "ipv6": null, + "extra_networks": [], + "dynamic_ip_required": false, + "enable_ipv6": false, + "private_ip": null, + "creation_date": "2024-04-19T14:50:14.019739+00:00", + "modification_date": "2024-04-19T14:52:21.181670+00:00", + "bootscript": { + "id": "5a520dda-96d6-4ed2-acd1-1d526b6859fe", + "public": true, + "title": "x86_64 mainline 4.4.182 rev1", + "architecture": "x86_64", + "organization": "11111111-1111-4111-8111-111111111111", + "project": "11111111-1111-4111-8111-111111111111", + "kernel": "http://10.196.2.9/kernel/x86_64-mainline-lts-4.4-4.4.182-rev1/vmlinuz-4.4.182", + "dtb": "", + "initrd": "http://10.196.2.9/initrd/initrd-Linux-x86_64-v3.14.6.gz", + "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16", + "default": true, + "zone": "nl-ams-1" + }, + "security_group": { + "id": "984414da-9fc2-49c0-a925-fed6266fe092", + "name": "Default security group" + }, + "location": { + "zone_id": "ams1", + "platform_id": "23", + "cluster_id": "19", + "hypervisor_id": "1201", + "node_id": "24" + }, + "maintenances": [], + "allowed_actions": [ + "poweroff", + "terminate", + "reboot", + "stop_in_place", + "backup" + ], + "placement_group": null, + "private_nics": [], + "zone": "nl-ams-1" } ] -} +} \ No newline at end of file diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index e885ef2e80b..c8af2f15878 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -20,6 +20,7 @@ import ( "net/http" "net/url" "path" + "strconv" "strings" "time" @@ -269,7 +270,7 @@ func (d *Discovery) getEndpointLabels( model.AddressLabel: model.LabelValue(addr), uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname), uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN), - uyuniLablelSystemID: model.LabelValue(fmt.Sprintf("%d", endpoint.SystemID)), + uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)), uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)), uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName), uyuniLablelExporter: model.LabelValue(endpoint.ExporterName), diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go index 303c7ca6d03..92904dd71c8 100644 --- a/discovery/zookeeper/zookeeper.go +++ b/discovery/zookeeper/zookeeper.go @@ -280,17 +280,17 @@ func parseServersetMember(data []byte, path string) (model.LabelSet, error) { labels := model.LabelSet{} labels[serversetPathLabel] = model.LabelValue(path) labels[model.AddressLabel] = model.LabelValue( - net.JoinHostPort(member.ServiceEndpoint.Host, fmt.Sprintf("%d", member.ServiceEndpoint.Port))) + net.JoinHostPort(member.ServiceEndpoint.Host, strconv.Itoa(member.ServiceEndpoint.Port))) labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host) - labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.ServiceEndpoint.Port)) + labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.ServiceEndpoint.Port)) for name, endpoint := range member.AdditionalEndpoints { cleanName := model.LabelName(strutil.SanitizeLabelName(name)) labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue( endpoint.Host) labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue( - fmt.Sprintf("%d", endpoint.Port)) + strconv.Itoa(endpoint.Port)) } labels[serversetStatusLabel] = model.LabelValue(member.Status) @@ -321,10 +321,10 @@ func parseNerveMember(data []byte, path string) (model.LabelSet, error) { labels := model.LabelSet{} labels[nervePathLabel] = model.LabelValue(path) labels[model.AddressLabel] = model.LabelValue( - net.JoinHostPort(member.Host, fmt.Sprintf("%d", member.Port))) + net.JoinHostPort(member.Host, strconv.Itoa(member.Port))) labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host) - labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.Port)) + labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.Port)) labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name) return labels, nil diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 93eaf251d0b..aa9bf3bfb00 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -48,7 +48,7 @@ The Prometheus monitoring server | <code class="text-nowrap">--rules.alert.for-outage-tolerance</code> | Max time to tolerate prometheus outage for restoring "for" state of alert. Use with server mode only. | `1h` | | <code class="text-nowrap">--rules.alert.for-grace-period</code> | Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period. Use with server mode only. | `10m` | | <code class="text-nowrap">--rules.alert.resend-delay</code> | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` | -| <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. Use with server mode only. | `4` | +| <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. When set, "query.max-concurrency" may need to be adjusted accordingly. Use with server mode only. | `4` | | <code class="text-nowrap">--alertmanager.notification-queue-capacity</code> | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` | | <code class="text-nowrap">--query.lookback-delta</code> | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` | | <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 3eceed48f2b..443cd3f0cbf 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -566,6 +566,7 @@ Dump samples from a TSDB. | Flag | Description | Default | | --- | --- | --- | +| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | | <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` | | <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` | | <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | @@ -584,7 +585,7 @@ Dump samples from a TSDB. ##### `promtool tsdb dump-openmetrics` -[Experimental] Dump samples from a TSDB into OpenMetrics format. Native histograms are not dumped. +[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics. @@ -592,6 +593,7 @@ Dump samples from a TSDB. | Flag | Description | Default | | --- | --- | --- | +| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | | <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` | | <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` | | <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 2f2e07a0c25..f0e13cf1366 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -70,6 +70,10 @@ global: # How frequently to evaluate rules. [ evaluation_interval: <duration> | default = 1m ] + + # Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received. + # Metric availability delays are more likely to occur when Prometheus is running as a remote write target, but can also occur when there's anomalies with scraping. + [ rule_query_offset: <duration> | default = 0s ] # The labels to add to any time series or alerts when communicating with # external systems (federation, remote storage, Alertmanager). @@ -117,6 +121,12 @@ global: # that will be kept in memory. 0 means no limit. [ keep_dropped_targets: <int> | default = 0 ] +runtime: + # Configure the Go garbage collector GOGC parameter + # See: https://tip.golang.org/doc/gc-guide#GOGC + # Lowering this number increases CPU usage. + [ gogc: <int> | default = 75 ] + # Rule files specifies a list of globs. Rules and alerts are read from # all matching files. rule_files: @@ -1349,7 +1359,7 @@ interface. The following meta labels are available on targets during [relabeling](#relabel_config): * `__meta_openstack_address_pool`: the pool of the private IP. -* `__meta_openstack_instance_flavor`: the flavor of the OpenStack instance. +* `__meta_openstack_instance_flavor`: the flavor ID of the OpenStack instance. * `__meta_openstack_instance_id`: the OpenStack instance ID. * `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using. * `__meta_openstack_instance_name`: the OpenStack instance name. @@ -1357,7 +1367,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_openstack_private_ip`: the private IP of the OpenStack instance. * `__meta_openstack_project_id`: the project (tenant) owning this instance. * `__meta_openstack_public_ip`: the public IP of the OpenStack instance. -* `__meta_openstack_tag_<tagkey>`: each tag value of the instance. +* `__meta_openstack_tag_<key>`: each metadata item of the instance, with any unsupported characters converted to an underscore. * `__meta_openstack_user_id`: the user account owning the tenant. See below for the configuration options for OpenStack discovery: @@ -1467,6 +1477,7 @@ For OVHcloud's [public cloud instances](https://www.ovhcloud.com/en/public-cloud * `__meta_ovhcloud_dedicated_server_ipv6`: the IPv6 of the server * `__meta_ovhcloud_dedicated_server_link_speed`: the link speed of the server * `__meta_ovhcloud_dedicated_server_name`: the name of the server +* `__meta_ovhcloud_dedicated_server_no_intervention`: whether datacenter intervention is disabled for the server * `__meta_ovhcloud_dedicated_server_os`: the operating system of the server * `__meta_ovhcloud_dedicated_server_rack`: the rack of the server * `__meta_ovhcloud_dedicated_server_reverse`: the reverse DNS name of the server @@ -2952,9 +2963,10 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_scaleway_instance_type`: commercial type of the server * `__meta_scaleway_instance_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction)) -This role uses the private IPv4 address by default. This can be +This role uses the first address it finds in the following order: private IPv4, public IPv4, public IPv6. This can be changed with relabeling, as demonstrated in [the Prometheus scaleway-sd configuration file](/documentation/examples/prometheus-scaleway.yml). +Should an instance have no address before relabeling, it will not be added to the target list and you will not be able to relabel it. #### Baremetal role @@ -3672,7 +3684,8 @@ queue_config: [ min_shards: <int> | default = 1 ] # Maximum number of samples per send. [ max_samples_per_send: <int> | default = 2000] - # Maximum time a sample will wait in buffer. + # Maximum time a sample will wait for a send. The sample might wait less + # if the buffer is full. Further time might pass due to potential retries. [ batch_send_deadline: <duration> | default = 5s ] # Initial retry delay. Gets doubled for every retry. [ min_backoff: <duration> | default = 30ms ] diff --git a/docs/configuration/recording_rules.md b/docs/configuration/recording_rules.md index 48ab951f942..9aa226bbc0b 100644 --- a/docs/configuration/recording_rules.md +++ b/docs/configuration/recording_rules.md @@ -86,6 +86,9 @@ name: <string> # rule can produce. 0 is no limit. [ limit: <int> | default = 0 ] +# Offset the rule evaluation timestamp of this particular group by the specified duration into the past. +[ query_offset: <duration> | default = global.rule_query_offset ] + rules: [ - <rule> ... ] ``` @@ -148,6 +151,9 @@ the rule, active, pending, or inactive, are cleared as well. The event will be recorded as an error in the evaluation, and as such no stale markers are written. +# Rule query offset +This is useful to ensure the underlying metrics have been received and stored in Prometheus. Metric availability delays are more likely to occur when Prometheus is running as a remote write target due to the nature of distributed systems, but can also occur when there's anomalies with scraping and/or short evaluation intervals. + # Failed rule evaluations due to slow evaluation If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group. diff --git a/docs/querying/api.md b/docs/querying/api.md index 46e79181ed0..71e01b3b952 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -473,6 +473,9 @@ Range vectors are returned as result type `matrix`. The corresponding Each series could have the `"values"` key, or the `"histograms"` key, or both. For a given timestamp, there will only be one sample of either float or histogram type. +Series are returned sorted by `metric`. Functions such as [`sort`](functions.md#sort) +and [`sort_by_label`](functions.md#sort_by_label) have no effect for range vectors. + ### Instant vectors Instant vectors are returned as result type `vector`. The corresponding @@ -491,6 +494,10 @@ Instant vectors are returned as result type `vector`. The corresponding Each series could have the `"value"` key, or the `"histogram"` key, but not both. +Series are not guaranteed to be returned in any particular order unless a function +such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)` +is used. + ### Scalars Scalar results are returned as result type `scalar`. The corresponding diff --git a/docs/querying/functions.md b/docs/querying/functions.md index c9e65fe6cca..c8fda286551 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -596,10 +596,14 @@ have exactly one element, `scalar` will return `NaN`. `sort(v instant-vector)` returns vector elements sorted by their sample values, in ascending order. Native histograms are sorted by their sum of observations. +Please note that `sort` only affects the results of instant queries, as range query results always have a fixed output ordering. + ## `sort_desc()` Same as `sort`, but sorts in descending order. +Like `sort`, `sort_desc` only affects the results of instant queries, as range query results always have a fixed output ordering. + ## `sort_by_label()` **This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** diff --git a/docs/querying/remote_read_api.md b/docs/querying/remote_read_api.md index e3dd1330690..efbd08e9846 100644 --- a/docs/querying/remote_read_api.md +++ b/docs/querying/remote_read_api.md @@ -5,63 +5,7 @@ sort_rank: 7 # Remote Read API -This is not currently considered part of the stable API and is subject to change -even between non-major version releases of Prometheus. - -## Format overview - -The API response format is JSON. Every successful API request returns a `2xx` -status code. - -Invalid requests that reach the API handlers return a JSON error object -and one of the following HTTP response codes: - -- `400 Bad Request` when parameters are missing or incorrect. -- `422 Unprocessable Entity` when an expression can't be executed - ([RFC4918](https://tools.ietf.org/html/rfc4918#page-78)). -- `503 Service Unavailable` when queries time out or abort. - -Other non-`2xx` codes may be returned for errors occurring before the API -endpoint is reached. - -An array of warnings may be returned if there are errors that do -not inhibit the request execution. All of the data that was successfully -collected will be returned in the data field. - -The JSON response envelope format is as follows: - -``` -{ - "status": "success" | "error", - "data": <data>, - - // Only set if status is "error". The data field may still hold - // additional data. - "errorType": "<string>", - "error": "<string>", - - // Only if there were warnings while executing the request. - // There will still be data in the data field. - "warnings": ["<string>"] -} -``` - -Generic placeholders are defined as follows: - -* `<rfc3339 | unix_timestamp>`: Input timestamps may be provided either in -[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format or as a Unix timestamp -in seconds, with optional decimal places for sub-second precision. Output -timestamps are always represented as Unix timestamps in seconds. -* `<series_selector>`: Prometheus [time series -selectors](basics.md#time-series-selectors) like `http_requests_total` or -`http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded. -* `<duration>`: [Prometheus duration strings](basics.md#time_durations). -For example, `5m` refers to a duration of 5 minutes. -* `<bool>`: boolean values (strings `true` and `false`). - -Note: Names of query parameters that may be repeated end with `[]`. - -## Remote Read API +> This is not currently considered part of the stable API and is subject to change even between non-major version releases of Prometheus. This API provides data read functionality from Prometheus. This interface expects [snappy](https://github.com/google/snappy) compression. The API definition is located [here](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto). @@ -79,5 +23,3 @@ This returns a message that includes a list of raw samples. These streamed chunks utilize an XOR algorithm inspired by the [Gorilla](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) compression to encode the chunks. However, it provides resolution to the millisecond instead of to the second. - - diff --git a/docs/storage.md b/docs/storage.md index b4c5b6adade..b66f2062af2 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -84,8 +84,10 @@ or 31 days, whichever is smaller. Prometheus has several flags that configure local storage. The most important are: - `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`. -- `--storage.tsdb.retention.time`: When to remove old data. Defaults to `15d`. - Overrides `storage.tsdb.retention` if this flag is set to anything other than default. +- `--storage.tsdb.retention.time`: How long to retain samples in storage. When this flag is + set, it overrides `storage.tsdb.retention`. If neither this flag nor `storage.tsdb.retention` + nor `storage.tsdb.retention.size` is set, the retention time defaults to `15d`. + Supported units: y, w, d, h, m, s, ms. - `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. The oldest data will be removed first. Defaults to `0` or disabled. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only @@ -195,6 +197,9 @@ or time-series database to Prometheus. To do so, the user must first convert the source data into [OpenMetrics](https://openmetrics.io/) format, which is the input format for the backfilling as described below. +Note that native histograms and staleness markers are not supported by this +procedure, as they cannot be represented in the OpenMetrics format. + ### Usage Backfilling can be used via the Promtool command line. Promtool will write the blocks diff --git a/documentation/examples/custom-sd/adapter-usage/main.go b/documentation/examples/custom-sd/adapter-usage/main.go index bfbca7b70cf..8ccbafe6f1c 100644 --- a/documentation/examples/custom-sd/adapter-usage/main.go +++ b/documentation/examples/custom-sd/adapter-usage/main.go @@ -127,9 +127,9 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target // since the service may be registered remotely through a different node. var addr string if node.ServiceAddress != "" { - addr = net.JoinHostPort(node.ServiceAddress, fmt.Sprintf("%d", node.ServicePort)) + addr = net.JoinHostPort(node.ServiceAddress, strconv.Itoa(node.ServicePort)) } else { - addr = net.JoinHostPort(node.Address, fmt.Sprintf("%d", node.ServicePort)) + addr = net.JoinHostPort(node.Address, strconv.Itoa(node.ServicePort)) } target := model.LabelSet{model.AddressLabel: model.LabelValue(addr)} diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index dff988131bb..5278cae0963 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -8,36 +8,35 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.5 - github.com/prometheus/client_golang v1.19.0 - github.com/prometheus/common v0.53.0 - github.com/prometheus/prometheus v0.51.1 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/common v0.54.0 + github.com/prometheus/prometheus v0.52.1 github.com/stretchr/testify v1.9.0 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect - github.com/aws/aws-sdk-go v1.50.32 // indirect + github.com/aws/aws-sdk-go v1.51.25 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v5 v5.2.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.7 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -45,33 +44,32 @@ require ( github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - go.opentelemetry.io/collector/featuregate v1.3.0 // indirect - go.opentelemetry.io/collector/pdata v1.3.0 // indirect - go.opentelemetry.io/collector/semconv v0.96.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect + go.opentelemetry.io/collector/featuregate v1.5.0 // indirect + go.opentelemetry.io/collector/pdata v1.5.0 // indirect + go.opentelemetry.io/collector/semconv v0.98.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect + go.opentelemetry.io/otel v1.25.0 // indirect + go.opentelemetry.io/otel/metric v1.25.0 // indirect + go.opentelemetry.io/otel/trace v1.25.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/net v0.22.0 // indirect - golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/sys v0.18.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/net v0.24.0 // indirect + golang.org/x/oauth2 v0.19.0 // indirect + golang.org/x/sys v0.19.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect - google.golang.org/grpc v1.62.1 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect + google.golang.org/grpc v1.63.2 // indirect + google.golang.org/protobuf v1.34.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apimachinery v0.29.2 // indirect - k8s.io/client-go v0.29.2 // indirect + k8s.io/apimachinery v0.29.3 // indirect + k8s.io/client-go v0.29.3 // indirect k8s.io/klog/v2 v2.120.1 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect ) diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index b145f362f4c..9717fceaed6 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,17 +1,17 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 h1:MxA59PGoCFb+vCwRQi3PhQEwHj4+r2dhuv9HG+vM7iM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g= -github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= +github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= @@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.50.32 h1:POt81DvegnpQKM4DMDLlHz1CO6OBnEoQ1gRhYFd7QRY= -github.com/aws/aws-sdk-go v1.50.32/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls= +github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -35,8 +35,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -46,14 +46,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqYI5SU= -github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= +github.com/digitalocean/godo v1.113.0 h1:CLtCxlP4wDAjKIQ+Hshht/UNbgAp8/J/XBH1ZtDCF9Y= +github.com/digitalocean/godo v1.113.0/go.mod h1:Z2mTP848Vi3IXXl5YbPekUgr4j4tOePomA+OE1Ag98w= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ= -github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.0.1+incompatible h1:t39Hm6lpXuXtgkF0dm1t9a5HkbUfdGy6XbWexmGr+hA= +github.com/docker/docker v26.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -95,16 +95,16 @@ github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdX github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= -github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8= -github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A= +github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA= +github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= -github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -115,10 +115,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -137,8 +135,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= -github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuENaAIfEx7OM= +github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= @@ -165,12 +163,12 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 h1:fI1LXuBaS1d9z1kmb++Og6YD8uMRwadXorCwE+xgOFA= -github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702/go.mod h1:z71gkJdrkAt/Rl6C7Q79VE7AwJ5lUF+M+fzFTyIHYB0= +github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7 h1:pjE59CS2C9Bg+Xby0ROrnZSSBWtKwx3Sf9gqsrvIFSA= +github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928= -github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA= +github.com/hetznercloud/hcloud-go/v2 v2.7.2 h1:UlE7n1GQZacCfyjv9tDVUN7HZfOXErPIfM/M039u9A0= +github.com/hetznercloud/hcloud-go/v2 v2.7.2/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/influxdata/influxdb v1.11.5 h1:+em5VOl6lhAZubXj5o6SobCwvrRs3XDlBx/MUI4schI= @@ -194,8 +192,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -210,8 +208,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.29.0 h1:gDSQWAbKMAQX8db9FDCXHhodQPrJmLcmthjx6m+PyV4= -github.com/linode/linodego v1.29.0/go.mod h1:3k6WvCM10gillgYcnoLqIL23ST27BD9HhMsCJWb3Bpk= +github.com/linode/linodego v1.32.0 h1:OmZzB3iON6uu84VtLFf64uKmAQqJJarvmsVguroioPI= +github.com/linode/linodego v1.32.0/go.mod h1:y8GDP9uLVH4jTB9qyrgw79qfKdYJmNCGUOJmfuiOcmI= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -219,12 +217,14 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= +github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -258,19 +258,19 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= -github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= +github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -279,12 +279,12 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/prometheus/prometheus v0.51.1 h1:V2e7x2oiUC0Megp26+xjffxBf9EGkyP1iQuGd4VjUSU= -github.com/prometheus/prometheus v0.51.1/go.mod h1:yv4MwOn3yHMQ6MZGHPg/U7Fcyqf+rxqiZfSur6myVtc= +github.com/prometheus/prometheus v0.52.1 h1:BrQ29YG+mzdGh8DgHPirHbeMGNqtL+INe0rqg7ttBJ4= +github.com/prometheus/prometheus v0.52.1/go.mod h1:3z74cVsmVH0iXOR5QBjB7Pa6A0KJeEAK5A6UsmAFb1g= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 h1:F+GIVtGqCFxPxO46ujf8cEOP574MBoRm3gNbPXECbxs= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -306,21 +306,20 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8 github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opentelemetry.io/collector/featuregate v1.3.0 h1:nrFSx+zfjdisjE9oCx25Aep3nJ9RaUjeE1qFL6eovoU= -go.opentelemetry.io/collector/featuregate v1.3.0/go.mod h1:mm8+xyQfgDmqhyegZRNIQmoKsNnDTwWKFLsdMoXAb7A= -go.opentelemetry.io/collector/pdata v1.3.0 h1:JRYN7tVHYFwmtQhIYbxWeiKSa2L1nCohyAs8sYqKFZo= -go.opentelemetry.io/collector/pdata v1.3.0/go.mod h1:t7W0Undtes53HODPdSujPLTnfSR5fzT+WpL+RTaaayo= -go.opentelemetry.io/collector/semconv v0.96.0 h1:DrZy8BpzJDnN2zFxXRj6BhfGYxNlqpFHBqyuS9fVHRY= -go.opentelemetry.io/collector/semconv v0.96.0/go.mod h1:zOm/U3pgMIWcvrcnPbR9Xx2HinoXj46ERMK8PUV9wrs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM= +go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= +go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE= +go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw= +go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY= +go.opentelemetry.io/collector/semconv v0.98.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8= +go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= +go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= +go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= +go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= +go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM= +go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -331,16 +330,14 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -353,24 +350,21 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= +golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -385,22 +379,16 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -409,24 +397,21 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= -golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 h1:8eadJkXbwDEMNwcB5O0s5Y5eCfyuCLdvaiOIaGTrWmQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU= +google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -434,9 +419,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4= +google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -457,16 +441,16 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= -k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= -k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= -k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= -k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/documentation/prometheus-mixin/config.libsonnet b/documentation/prometheus-mixin/config.libsonnet index ab9079a5e3d..70d46a2212b 100644 --- a/documentation/prometheus-mixin/config.libsonnet +++ b/documentation/prometheus-mixin/config.libsonnet @@ -44,5 +44,10 @@ // The default refresh time for all dashboards, default to 60s refresh: '60s', }, + + // Opt-out of multi-cluster dashboards by overriding this. + showMultiCluster: true, + // The cluster label to infer the cluster name from. + clusterLabel: 'cluster', }, } diff --git a/documentation/prometheus-mixin/dashboards.libsonnet b/documentation/prometheus-mixin/dashboards.libsonnet index efe53dbac9c..2bdd168cc95 100644 --- a/documentation/prometheus-mixin/dashboards.libsonnet +++ b/documentation/prometheus-mixin/dashboards.libsonnet @@ -10,21 +10,32 @@ local template = grafana.template; { grafanaDashboards+:: { 'prometheus.json': - g.dashboard( + local showMultiCluster = $._config.showMultiCluster; + local dashboard = g.dashboard( '%(prefix)sOverview' % $._config.grafanaPrometheus - ) - .addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'cluster') - .addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job') - .addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance') + ); + local templatedDashboard = if showMultiCluster then + dashboard + .addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, $._config.clusterLabel) + .addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job') + .addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance') + else + dashboard + .addMultiTemplate('job', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'job') + .addMultiTemplate('instance', 'prometheus_build_info{job=~"$job"}', 'instance'); + templatedDashboard .addRow( g.row('Prometheus Stats') .addPanel( g.panel('Prometheus Stats') + - g.tablePanel([ + g.tablePanel(if showMultiCluster then [ 'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})', 'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})', + ] else [ + 'count by (job, instance, version) (prometheus_build_info{job=~"$job", instance=~"$instance"})', + 'max by (job, instance) (time() - process_start_time_seconds{job=~"$job", instance=~"$instance"})', ], { - cluster: { alias: 'Cluster' }, + cluster: { alias: if showMultiCluster then 'Cluster' else '' }, job: { alias: 'Job' }, instance: { alias: 'Instance' }, version: { alias: 'Version' }, @@ -37,12 +48,18 @@ local template = grafana.template; g.row('Discovery') .addPanel( g.panel('Target Sync') + - g.queryPanel('sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3', '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}') + + g.queryPanel(if showMultiCluster then 'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3' + else 'sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3', + if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}' + else '{{scrape_job}}') + { yaxes: g.yaxes('ms') } ) .addPanel( g.panel('Targets') + - g.queryPanel('sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})', '{{cluster}}:{{job}}:{{instance}}') + + g.queryPanel(if showMultiCluster then 'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})' + else 'sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})', + if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}' + else 'Targets') + g.stack ) ) @@ -50,29 +67,47 @@ local template = grafana.template; g.row('Retrieval') .addPanel( g.panel('Average Scrape Interval Duration') + - g.queryPanel('rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3', '{{cluster}}:{{job}}:{{instance}} {{interval}} configured') + + g.queryPanel(if showMultiCluster then 'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3' + else 'rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3', + if showMultiCluster then '{{cluster}}:{{job}}:{{instance}} {{interval}} configured' + else '{{interval}} configured') + { yaxes: g.yaxes('ms') } ) .addPanel( g.panel('Scrape failures') + - g.queryPanel([ + g.queryPanel(if showMultiCluster then [ 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', 'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))', - ], [ + ] else [ + 'sum by (job) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total[1m]))', + 'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))', + 'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))', + 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))', + 'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))', + ], if showMultiCluster then [ 'exceeded body size limit: {{cluster}} {{job}} {{instance}}', 'exceeded sample limit: {{cluster}} {{job}} {{instance}}', 'duplicate timestamp: {{cluster}} {{job}} {{instance}}', 'out of bounds: {{cluster}} {{job}} {{instance}}', 'out of order: {{cluster}} {{job}} {{instance}}', + ] else [ + 'exceeded body size limit: {{job}}', + 'exceeded sample limit: {{job}}', + 'duplicate timestamp: {{job}}', + 'out of bounds: {{job}}', + 'out of order: {{job}}', ]) + g.stack ) .addPanel( g.panel('Appended Samples') + - g.queryPanel('rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])', '{{cluster}} {{job}} {{instance}}') + + g.queryPanel(if showMultiCluster then 'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])' + else 'rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])', + if showMultiCluster then '{{cluster}} {{job}} {{instance}}' + else '{{job}} {{instance}}') + g.stack ) ) @@ -80,12 +115,18 @@ local template = grafana.template; g.row('Storage') .addPanel( g.panel('Head Series') + - g.queryPanel('prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}', '{{cluster}} {{job}} {{instance}} head series') + + g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' + else 'prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}', + if showMultiCluster then '{{cluster}} {{job}} {{instance}} head series' + else '{{job}} {{instance}} head series') + g.stack ) .addPanel( g.panel('Head Chunks') + - g.queryPanel('prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}', '{{cluster}} {{job}} {{instance}} head chunks') + + g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}' + else 'prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}', + if showMultiCluster then '{{cluster}} {{job}} {{instance}} head chunks' + else '{{job}} {{instance}} head chunks') + g.stack ) ) @@ -93,12 +134,18 @@ local template = grafana.template; g.row('Query') .addPanel( g.panel('Query Rate') + - g.queryPanel('rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])', '{{cluster}} {{job}} {{instance}}') + + g.queryPanel(if showMultiCluster then 'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])' + else 'rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])', + if showMultiCluster then '{{cluster}} {{job}} {{instance}}' + else '{{job}} {{instance}}') + g.stack, ) .addPanel( g.panel('Stage Duration') + - g.queryPanel('max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3', '{{slice}}') + + g.queryPanel(if showMultiCluster then 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3' + else 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3', + if showMultiCluster then '{{slice}}' + else '{{slice}}') + { yaxes: g.yaxes('ms') } + g.stack, ) diff --git a/go.mod b/go.mod index 165540e7f9d..8caf80727b7 100644 --- a/go.mod +++ b/go.mod @@ -5,18 +5,18 @@ go 1.21 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 github.com/Code-Hex/go-generics-cache v1.5.1 - github.com/KimMachineGun/automemlimit v0.6.0 + github.com/KimMachineGun/automemlimit v0.6.1 github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 - github.com/aws/aws-sdk-go v1.51.25 + github.com/aws/aws-sdk-go v1.53.16 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.113.0 - github.com/docker/docker v26.0.1+incompatible + github.com/digitalocean/godo v1.117.0 + github.com/docker/docker v26.1.3+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.12.0 github.com/envoyproxy/protoc-gen-validate v1.0.4 @@ -29,61 +29,60 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.6.0 - github.com/google/pprof v0.0.0-20240416155748-26353dc0451f + github.com/google/pprof v0.0.0-20240528025155-186aa0362fba github.com/google/uuid v1.6.0 - github.com/gophercloud/gophercloud v1.11.0 - github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd + github.com/gophercloud/gophercloud v1.12.0 + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.28.2 - github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7 - github.com/hetznercloud/hcloud-go/v2 v2.7.2 + github.com/hashicorp/consul/api v1.29.1 + github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d + github.com/hetznercloud/hcloud-go/v2 v2.9.0 github.com/ionos-cloud/sdk-go/v6 v6.1.11 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.8 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.32.0 + github.com/linode/linodego v1.35.0 github.com/miekg/dns v1.1.59 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 - github.com/ovh/go-ovh v1.4.3 + github.com/ovh/go-ovh v1.5.1 github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.19.0 + github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.53.0 + github.com/prometheus/common v0.54.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.11.0 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/featuregate v1.5.0 - go.opentelemetry.io/collector/pdata v1.5.0 - go.opentelemetry.io/collector/semconv v0.98.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 - go.opentelemetry.io/otel v1.25.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.25.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 - go.opentelemetry.io/otel/sdk v1.25.0 - go.opentelemetry.io/otel/trace v1.25.0 + go.opentelemetry.io/collector/pdata v1.8.0 + go.opentelemetry.io/collector/semconv v0.101.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 + go.opentelemetry.io/otel v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 + go.opentelemetry.io/otel/sdk v1.27.0 + go.opentelemetry.io/otel/trace v1.27.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 - golang.org/x/net v0.24.0 - golang.org/x/oauth2 v0.19.0 + golang.org/x/net v0.26.0 + golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.19.0 + golang.org/x/sys v0.21.0 golang.org/x/time v0.5.0 - golang.org/x/tools v0.20.0 - google.golang.org/api v0.174.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be - google.golang.org/grpc v1.63.2 - google.golang.org/protobuf v1.33.0 + golang.org/x/tools v0.22.0 + google.golang.org/api v0.183.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 + google.golang.org/grpc v1.64.0 + google.golang.org/protobuf v1.34.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.29.3 @@ -94,10 +93,10 @@ require ( ) require ( - cloud.google.com/go/auth v0.2.0 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.0 // indirect + cloud.google.com/go/auth v0.5.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect @@ -105,7 +104,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cilium/ebpf v0.11.0 // indirect - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect + github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -115,7 +114,7 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/fatih/color v1.15.0 // indirect + github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-kit/kit v0.12.0 // indirect @@ -129,7 +128,7 @@ require ( github.com/go-openapi/spec v0.20.14 // indirect github.com/go-openapi/swag v0.22.9 // indirect github.com/go-openapi/validate v0.23.0 // indirect - github.com/go-resty/resty/v2 v2.12.0 // indirect + github.com/go-resty/resty/v2 v2.13.1 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/glog v1.2.0 // indirect @@ -140,9 +139,9 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.3 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -151,7 +150,6 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.4 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/imdario/mergo v0.3.16 // indirect @@ -162,7 +160,7 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect @@ -184,14 +182,14 @@ require ( github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.25.0 // indirect - go.opentelemetry.io/proto/otlp v1.1.0 // indirect - golang.org/x/crypto v0.22.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/proto/otlp v1.2.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/term v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect + golang.org/x/mod v0.18.0 // indirect + golang.org/x/term v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect diff --git a/go.sum b/go.sum index 4b8602b053b..06db002f55b 100644 --- a/go.sum +++ b/go.sum @@ -12,10 +12,10 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.2.0 h1:y6oTcpMSbOcXbwYgUUrvI+mrQ2xbrcdpPgtVbCGTLTk= -cloud.google.com/go/auth v0.2.0/go.mod h1:+yb+oy3/P0geX6DLKlqiGHARGR6EX2GRtYCzWOCQSbU= -cloud.google.com/go/auth/oauth2adapt v0.2.0 h1:FR8zevgQwu+8CqiOT5r6xCmJa3pJC/wdXEEPF1OkNhA= -cloud.google.com/go/auth/oauth2adapt v0.2.0/go.mod h1:AfqujpDAlTfLfeCIl/HJZZlIxD8+nJoZ5e0x1IxGq5k= +cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= +cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -40,10 +40,10 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqb github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= @@ -59,8 +59,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/KimMachineGun/automemlimit v0.6.0 h1:p/BXkH+K40Hax+PuWWPQ478hPjsp9h1CPDhLlA3Z37E= -github.com/KimMachineGun/automemlimit v0.6.0/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY= +github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8= +github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= @@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls= -github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc= +github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= @@ -120,8 +120,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= @@ -143,14 +143,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/digitalocean/godo v1.113.0 h1:CLtCxlP4wDAjKIQ+Hshht/UNbgAp8/J/XBH1ZtDCF9Y= -github.com/digitalocean/godo v1.113.0/go.mod h1:Z2mTP848Vi3IXXl5YbPekUgr4j4tOePomA+OE1Ag98w= +github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw= +github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.0.1+incompatible h1:t39Hm6lpXuXtgkF0dm1t9a5HkbUfdGy6XbWexmGr+hA= -github.com/docker/docker v26.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -180,8 +180,8 @@ github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -232,8 +232,8 @@ github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZC github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= -github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA= -github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0= +github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= +github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= @@ -319,8 +319,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240416155748-26353dc0451f h1:WpZiq8iqvGjJ3m3wzAVKL6+0vz7VkE79iSy9GII00II= -github.com/google/pprof v0.0.0-20240416155748-26353dc0451f/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g= +github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= @@ -332,10 +332,10 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= -github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuENaAIfEx7OM= -github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= +github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -343,21 +343,23 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= -github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8= -github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE= +github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= +github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= +github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= +github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= -github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= +github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= +github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -408,13 +410,13 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7 h1:pjE59CS2C9Bg+Xby0ROrnZSSBWtKwx3Sf9gqsrvIFSA= -github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= +github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc= +github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.7.2 h1:UlE7n1GQZacCfyjv9tDVUN7HZfOXErPIfM/M039u9A0= -github.com/hetznercloud/hcloud-go/v2 v2.7.2/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k= +github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY= +github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -471,8 +473,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.32.0 h1:OmZzB3iON6uu84VtLFf64uKmAQqJJarvmsVguroioPI= -github.com/linode/linodego v1.32.0/go.mod h1:y8GDP9uLVH4jTB9qyrgw79qfKdYJmNCGUOJmfuiOcmI= +github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do= +github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -490,8 +492,8 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= @@ -572,8 +574,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0= -github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY= +github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI= +github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -607,8 +609,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -624,8 +626,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= -github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= +github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= @@ -644,13 +646,13 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 h1:F+GIVtGqCFxPxO46ujf8cEOP574MBoRm3gNbPXECbxs= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= @@ -722,30 +724,28 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM= -go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w= -go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE= -go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw= -go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY= -go.opentelemetry.io/collector/semconv v0.98.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8= -go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= -go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0 h1:dT33yIHtmsqpixFsSQPwNeY5drM9wTcoL8h0FWF4oGM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.25.0/go.mod h1:h95q0LBGh7hlAC08X2DhSeyIG02YQ0UyioTCVAqRPmc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.25.0 h1:vOL89uRfOCCNIjkisd0r7SEdJF3ZJFyCNY34fdZs8eU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.25.0/go.mod h1:8GlBGcDk8KKi7n+2S4BT/CPZQYH3erLu0/k64r1MYgo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0 h1:Mbi5PKN7u322woPa85d7ebZ+SOvEoPvoiBu+ryHWgfA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.25.0/go.mod h1:e7ciERRhZaOZXVjx5MiL8TK5+Xv7G5Gv5PA2ZDEJdL8= -go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= -go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= -go.opentelemetry.io/otel/sdk v1.25.0 h1:PDryEJPC8YJZQSyLY5eqLeafHtG+X7FWnf3aXMtxbqo= -go.opentelemetry.io/otel/sdk v1.25.0/go.mod h1:oFgzCM2zdsxKzz6zwpTZYLLQsFwc+K0daArPdIhuxkw= -go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM= -go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= -go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= -go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= +go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= +go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= +go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= +go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -773,9 +773,9 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -810,8 +810,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -856,17 +856,17 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= -golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -946,17 +946,17 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -966,8 +966,10 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1024,8 +1026,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= -golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1045,8 +1047,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.174.0 h1:zB1BWl7ocxfTea2aQ9mgdzXjnfPySllpPOskdnO+q34= -google.golang.org/api v0.174.0/go.mod h1:aC7tB6j0HR1Nl0ni5ghpx6iLasmAX78Zkh/wgxAAjLg= +google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= +google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1083,10 +1085,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU= -google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1105,8 +1107,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= -google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1118,8 +1120,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/model/histogram/float_histogram_test.go b/model/histogram/float_histogram_test.go index 49fb77ab0b7..759da6540c3 100644 --- a/model/histogram/float_histogram_test.go +++ b/model/histogram/float_histogram_test.go @@ -14,9 +14,9 @@ package histogram import ( - "fmt" "math" "math/rand" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -2134,7 +2134,7 @@ func TestAllFloatBucketIterator(t *testing.T) { } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { var expBuckets, actBuckets []Bucket[float64] if c.includeNeg { @@ -2360,7 +2360,7 @@ func TestAllReverseFloatBucketIterator(t *testing.T) { } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { var expBuckets, actBuckets []Bucket[float64] if c.includePos { diff --git a/model/histogram/generic.go b/model/histogram/generic.go index 7e1cc4b6052..67abe7b0aaf 100644 --- a/model/histogram/generic.go +++ b/model/histogram/generic.go @@ -422,7 +422,7 @@ func getBound(idx, schema int32) float64 { // bucket results in precisely that. It is either frac=1.0 & exp=1024 // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, // by the way, a power of two where the exponent itself is a power of - // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // two, 2¹⁰ in fact, which coincides with a bucket boundary in all // schemas.) So these are the special cases we have to catch below. if schema < 0 { exp := int(idx) << -schema diff --git a/model/histogram/histogram_test.go b/model/histogram/histogram_test.go index 14a948e644e..d1a074135cb 100644 --- a/model/histogram/histogram_test.go +++ b/model/histogram/histogram_test.go @@ -14,8 +14,8 @@ package histogram import ( - "fmt" "math" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -72,7 +72,7 @@ func TestHistogramString(t *testing.T) { } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { actualString := c.histogram.String() require.Equal(t, c.expectedString, actualString) }) @@ -211,7 +211,7 @@ func TestCumulativeBucketIterator(t *testing.T) { } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { it := c.histogram.CumulativeBucketIterator() actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets)) for it.Next() { @@ -371,7 +371,7 @@ func TestRegularBucketIterator(t *testing.T) { } for i, c := range cases { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { it := c.histogram.PositiveBucketIterator() actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets)) for it.Next() { diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index 3d6e7659f4e..6464d007d38 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -17,6 +17,7 @@ import ( "encoding/json" "fmt" "net/http" + "strconv" "strings" "testing" @@ -732,7 +733,7 @@ func TestScratchBuilder(t *testing.T) { want: FromStrings("ddd", "444"), }, } { - t.Run(fmt.Sprint(i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { b := NewScratchBuilder(len(tcase.add)) for _, lbl := range tcase.add { b.Add(lbl.Name, lbl.Value) diff --git a/model/labels/matcher.go b/model/labels/matcher.go index 1282f80d639..8e220e392d8 100644 --- a/model/labels/matcher.go +++ b/model/labels/matcher.go @@ -14,7 +14,8 @@ package labels import ( - "fmt" + "bytes" + "strconv" ) // MatchType is an enum for label matching types. @@ -78,7 +79,29 @@ func MustNewMatcher(mt MatchType, name, val string) *Matcher { } func (m *Matcher) String() string { - return fmt.Sprintf("%s%s%q", m.Name, m.Type, m.Value) + // Start a buffer with a pre-allocated size on stack to cover most needs. + var bytea [1024]byte + b := bytes.NewBuffer(bytea[:0]) + + if m.shouldQuoteName() { + b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Name)) + } else { + b.WriteString(m.Name) + } + b.WriteString(m.Type.String()) + b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Value)) + + return b.String() +} + +func (m *Matcher) shouldQuoteName() bool { + for i, c := range m.Name { + if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (i > 0 && c >= '0' && c <= '9') { + continue + } + return true + } + return false } // Matches returns whether the matcher matches the given string value. diff --git a/model/labels/matcher_test.go b/model/labels/matcher_test.go index c23deafe610..ff39d40d0f8 100644 --- a/model/labels/matcher_test.go +++ b/model/labels/matcher_test.go @@ -15,6 +15,7 @@ package labels import ( "fmt" + "math/rand" "testing" "github.com/stretchr/testify/require" @@ -225,3 +226,128 @@ func BenchmarkNewMatcher(b *testing.B) { } }) } + +func BenchmarkMatcher_String(b *testing.B) { + type benchCase struct { + name string + matchers []*Matcher + } + cases := []benchCase{ + { + name: "short name equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, "foo", "bar"), + MustNewMatcher(MatchEqual, "bar", "baz"), + MustNewMatcher(MatchEqual, "abc", "def"), + MustNewMatcher(MatchEqual, "ghi", "klm"), + MustNewMatcher(MatchEqual, "nop", "qrs"), + }, + }, + { + name: "short quoted name not equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, "f.o", "bar"), + MustNewMatcher(MatchEqual, "b.r", "baz"), + MustNewMatcher(MatchEqual, "a.c", "def"), + MustNewMatcher(MatchEqual, "g.i", "klm"), + MustNewMatcher(MatchEqual, "n.p", "qrs"), + }, + }, + { + name: "short quoted name with quotes not equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, `"foo"`, "bar"), + MustNewMatcher(MatchEqual, `"foo"`, "baz"), + MustNewMatcher(MatchEqual, `"foo"`, "def"), + MustNewMatcher(MatchEqual, `"foo"`, "klm"), + MustNewMatcher(MatchEqual, `"foo"`, "qrs"), + }, + }, + { + name: "short name value with quotes equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, "foo", `"bar"`), + MustNewMatcher(MatchEqual, "bar", `"baz"`), + MustNewMatcher(MatchEqual, "abc", `"def"`), + MustNewMatcher(MatchEqual, "ghi", `"klm"`), + MustNewMatcher(MatchEqual, "nop", `"qrs"`), + }, + }, + { + name: "short name and long value regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "foo", "five_six_seven_eight_nine_ten_one_two_three_four"), + MustNewMatcher(MatchRegexp, "bar", "one_two_three_four_five_six_seven_eight_nine_ten"), + MustNewMatcher(MatchRegexp, "abc", "two_three_four_five_six_seven_eight_nine_ten_one"), + MustNewMatcher(MatchRegexp, "ghi", "three_four_five_six_seven_eight_nine_ten_one_two"), + MustNewMatcher(MatchRegexp, "nop", "four_five_six_seven_eight_nine_ten_one_two_three"), + }, + }, + { + name: "short name and long value with quotes equal", + matchers: []*Matcher{ + MustNewMatcher(MatchEqual, "foo", `five_six_seven_eight_nine_ten_"one"_two_three_four`), + MustNewMatcher(MatchEqual, "bar", `one_two_three_four_five_six_"seven"_eight_nine_ten`), + MustNewMatcher(MatchEqual, "abc", `two_three_four_five_six_seven_"eight"_nine_ten_one`), + MustNewMatcher(MatchEqual, "ghi", `three_four_five_six_seven_eight_"nine"_ten_one_two`), + MustNewMatcher(MatchEqual, "nop", `four_five_six_seven_eight_nine_"ten"_one_two_three`), + }, + }, + { + name: "long name regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "one_two_three_four_five_six_seven_eight_nine_ten", "val"), + MustNewMatcher(MatchRegexp, "two_three_four_five_six_seven_eight_nine_ten_one", "val"), + MustNewMatcher(MatchRegexp, "three_four_five_six_seven_eight_nine_ten_one_two", "val"), + MustNewMatcher(MatchRegexp, "four_five_six_seven_eight_nine_ten_one_two_three", "val"), + MustNewMatcher(MatchRegexp, "five_six_seven_eight_nine_ten_one_two_three_four", "val"), + }, + }, + { + name: "long quoted name regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "one.two.three.four.five.six.seven.eight.nine.ten", "val"), + MustNewMatcher(MatchRegexp, "two.three.four.five.six.seven.eight.nine.ten.one", "val"), + MustNewMatcher(MatchRegexp, "three.four.five.six.seven.eight.nine.ten.one.two", "val"), + MustNewMatcher(MatchRegexp, "four.five.six.seven.eight.nine.ten.one.two.three", "val"), + MustNewMatcher(MatchRegexp, "five.six.seven.eight.nine.ten.one.two.three.four", "val"), + }, + }, + { + name: "long name and long value regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "one_two_three_four_five_six_seven_eight_nine_ten", "five_six_seven_eight_nine_ten_one_two_three_four"), + MustNewMatcher(MatchRegexp, "two_three_four_five_six_seven_eight_nine_ten_one", "one_two_three_four_five_six_seven_eight_nine_ten"), + MustNewMatcher(MatchRegexp, "three_four_five_six_seven_eight_nine_ten_one_two", "two_three_four_five_six_seven_eight_nine_ten_one"), + MustNewMatcher(MatchRegexp, "four_five_six_seven_eight_nine_ten_one_two_three", "three_four_five_six_seven_eight_nine_ten_one_two"), + MustNewMatcher(MatchRegexp, "five_six_seven_eight_nine_ten_one_two_three_four", "four_five_six_seven_eight_nine_ten_one_two_three"), + }, + }, + { + name: "long quoted name and long value regexp", + matchers: []*Matcher{ + MustNewMatcher(MatchRegexp, "one.two.three.four.five.six.seven.eight.nine.ten", "five.six.seven.eight.nine.ten.one.two.three.four"), + MustNewMatcher(MatchRegexp, "two.three.four.five.six.seven.eight.nine.ten.one", "one.two.three.four.five.six.seven.eight.nine.ten"), + MustNewMatcher(MatchRegexp, "three.four.five.six.seven.eight.nine.ten.one.two", "two.three.four.five.six.seven.eight.nine.ten.one"), + MustNewMatcher(MatchRegexp, "four.five.six.seven.eight.nine.ten.one.two.three", "three.four.five.six.seven.eight.nine.ten.one.two"), + MustNewMatcher(MatchRegexp, "five.six.seven.eight.nine.ten.one.two.three.four", "four.five.six.seven.eight.nine.ten.one.two.three"), + }, + }, + } + + var mixed []*Matcher + for _, bc := range cases { + mixed = append(mixed, bc.matchers...) + } + rand.Shuffle(len(mixed), func(i, j int) { mixed[i], mixed[j] = mixed[j], mixed[i] }) + cases = append(cases, benchCase{name: "mixed", matchers: mixed}) + + for _, bc := range cases { + b.Run(bc.name, func(b *testing.B) { + for i := 0; i <= b.N; i++ { + m := bc.matchers[i%len(bc.matchers)] + _ = m.String() + } + }) + } +} diff --git a/model/labels/regexp.go b/model/labels/regexp.go index f35dc76f60b..b484e27168f 100644 --- a/model/labels/regexp.go +++ b/model/labels/regexp.go @@ -16,6 +16,7 @@ package labels import ( "slices" "strings" + "unicode/utf8" "github.com/grafana/regexp" "github.com/grafana/regexp/syntax" @@ -827,8 +828,12 @@ type zeroOrOneCharacterStringMatcher struct { } func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool { - // Zero or one. - if len(s) > 1 { + // If there's more than one rune in the string, then it can't match. + if r, size := utf8.DecodeRuneInString(s); r == utf8.RuneError { + // Size is 0 for empty strings, 1 for invalid rune. + // Empty string matches, invalid rune matches if there isn't anything else. + return size == len(s) + } else if size < len(s) { return false } diff --git a/model/labels/regexp_test.go b/model/labels/regexp_test.go index 3a15b52b401..1db90a473db 100644 --- a/model/labels/regexp_test.go +++ b/model/labels/regexp_test.go @@ -19,6 +19,7 @@ import ( "strings" "testing" "time" + "unicode/utf8" "github.com/grafana/regexp" "github.com/grafana/regexp/syntax" @@ -36,6 +37,7 @@ var ( ".*foo", "^.*foo$", "^.+foo$", + ".?", ".*", ".+", "foo.+", @@ -84,10 +86,16 @@ var ( "foo", " foo bar", "bar", "buzz\nbar", "bar foo", "bfoo", "\n", "\nfoo", "foo\n", "hello foo world", "hello foo\n world", "", "FOO", "Foo", "OO", "Oo", "\nfoo\n", strings.Repeat("f", 20), "prometheus", "prometheus_api_v1", "prometheus_api_v1_foo", "10.0.1.20", "10.0.2.10", "10.0.3.30", "10.0.4.40", - "foofoo0", "foofoo", + "foofoo0", "foofoo", "😀foo0", // Values matching / not matching the test regexps on long alternations. "zQPbMkNO", "zQPbMkNo", "jyyfj00j0061", "jyyfj00j006", "jyyfj00j00612", "NNSPdvMi", "NNSPdvMiXXX", "NNSPdvMixxx", "nnSPdvMi", "nnSPdvMiXXX", + + // Invalid utf8 + "\xfefoo", + "foo\xfe", + "\xfd", + "\xff\xff", } ) @@ -926,19 +934,91 @@ func BenchmarkOptimizeEqualStringMatchers(b *testing.B) { } func TestZeroOrOneCharacterStringMatcher(t *testing.T) { + t.Run("match newline", func(t *testing.T) { + matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} + require.True(t, matcher.Matches("")) + require.True(t, matcher.Matches("x")) + require.True(t, matcher.Matches("\n")) + require.False(t, matcher.Matches("xx")) + require.False(t, matcher.Matches("\n\n")) + }) + + t.Run("do not match newline", func(t *testing.T) { + matcher := &zeroOrOneCharacterStringMatcher{matchNL: false} + require.True(t, matcher.Matches("")) + require.True(t, matcher.Matches("x")) + require.False(t, matcher.Matches("\n")) + require.False(t, matcher.Matches("xx")) + require.False(t, matcher.Matches("\n\n")) + }) + + t.Run("unicode", func(t *testing.T) { + // Just for documentation purposes, emoji1 is 1 rune, emoji2 is 2 runes. + // Having this in mind, will make future readers fixing tests easier. + emoji1 := "😀" + emoji2 := "❤️" + require.Equal(t, 1, utf8.RuneCountInString(emoji1)) + require.Equal(t, 2, utf8.RuneCountInString(emoji2)) + + matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} + require.True(t, matcher.Matches(emoji1)) + require.False(t, matcher.Matches(emoji2)) + require.False(t, matcher.Matches(emoji1+emoji1)) + require.False(t, matcher.Matches("x"+emoji1)) + require.False(t, matcher.Matches(emoji1+"x")) + require.False(t, matcher.Matches(emoji1+emoji2)) + }) + + t.Run("invalid unicode", func(t *testing.T) { + // Just for reference, we also compare to what `^.?$` regular expression matches. + re := regexp.MustCompile("^.?$") + matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} + + requireMatches := func(s string, expected bool) { + t.Helper() + require.Equal(t, expected, matcher.Matches(s)) + require.Equal(t, re.MatchString(s), matcher.Matches(s)) + } + + requireMatches("\xff", true) + requireMatches("x\xff", false) + requireMatches("\xffx", false) + requireMatches("\xff\xfe", false) + }) +} + +func BenchmarkZeroOrOneCharacterStringMatcher(b *testing.B) { + type benchCase struct { + str string + matches bool + } + + emoji1 := "😀" + emoji2 := "❤️" + cases := []benchCase{ + {"", true}, + {"x", true}, + {"\n", true}, + {"xx", false}, + {"\n\n", false}, + {emoji1, true}, + {emoji2, false}, + {emoji1 + emoji1, false}, + {strings.Repeat("x", 100), false}, + {strings.Repeat(emoji1, 100), false}, + {strings.Repeat(emoji2, 100), false}, + } + matcher := &zeroOrOneCharacterStringMatcher{matchNL: true} - require.True(t, matcher.Matches("")) - require.True(t, matcher.Matches("x")) - require.True(t, matcher.Matches("\n")) - require.False(t, matcher.Matches("xx")) - require.False(t, matcher.Matches("\n\n")) - - matcher = &zeroOrOneCharacterStringMatcher{matchNL: false} - require.True(t, matcher.Matches("")) - require.True(t, matcher.Matches("x")) - require.False(t, matcher.Matches("\n")) - require.False(t, matcher.Matches("xx")) - require.False(t, matcher.Matches("\n\n")) + b.ResetTimer() + + for n := 0; n < b.N; n++ { + c := cases[n%len(cases)] + got := matcher.Matches(c.str) + if got != c.matches { + b.Fatalf("unexpected result for %q: got %t, want %t", c.str, got, c.matches) + } + } } func TestLiteralPrefixStringMatcher(t *testing.T) { diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index d29c3d07aeb..db2e6ce2ff2 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -17,6 +17,7 @@ import ( "crypto/md5" "encoding/binary" "fmt" + "strconv" "strings" "github.com/grafana/regexp" @@ -48,7 +49,7 @@ const ( Drop Action = "drop" // KeepEqual drops targets for which the input does not match the target. KeepEqual Action = "keepequal" - // Drop drops targets for which the input does match the target. + // DropEqual drops targets for which the input does match the target. DropEqual Action = "dropequal" // HashMod sets a label to the modulus of a hash of labels. HashMod Action = "hashmod" @@ -290,7 +291,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { hash := md5.Sum([]byte(val)) // Use only the last 8 bytes of the hash to give the same result as earlier versions of this code. mod := binary.BigEndian.Uint64(hash[8:]) % cfg.Modulus - lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod)) + lb.Set(cfg.TargetLabel, strconv.FormatUint(mod, 10)) case LabelMap: lb.Range(func(l labels.Label) { if cfg.Regex.MatchString(l.Name) { diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index 6798fb02a57..507ea101dd5 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -14,7 +14,7 @@ package relabel import ( - "fmt" + "strconv" "testing" "github.com/prometheus/common/model" @@ -657,7 +657,7 @@ func TestRelabelValidate(t *testing.T) { }, } for i, test := range tests { - t.Run(fmt.Sprint(i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { err := test.config.Validate() if test.expected == "" { require.NoError(t, err) diff --git a/model/rulefmt/rulefmt.go b/model/rulefmt/rulefmt.go index 4ed1619d64e..bfb85ce7405 100644 --- a/model/rulefmt/rulefmt.go +++ b/model/rulefmt/rulefmt.go @@ -136,10 +136,11 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { // RuleGroup is a list of sequentially evaluated recording and alerting rules. type RuleGroup struct { - Name string `yaml:"name"` - Interval model.Duration `yaml:"interval,omitempty"` - Limit int `yaml:"limit,omitempty"` - Rules []RuleNode `yaml:"rules"` + Name string `yaml:"name"` + Interval model.Duration `yaml:"interval,omitempty"` + QueryOffset *model.Duration `yaml:"query_offset,omitempty"` + Limit int `yaml:"limit,omitempty"` + Rules []RuleNode `yaml:"rules"` } // Rule describes an alerting or recording rule. diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index e7a9243bc2b..d2e72ca33bb 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -74,7 +74,7 @@ func TestHandlerNextBatch(t *testing.T) { for i := range make([]struct{}, 2*maxBatchSize+1) { h.queue = append(h.queue, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) } @@ -186,10 +186,10 @@ func TestHandlerSendAll(t *testing.T) { for i := range make([]struct{}, maxBatchSize) { h.queue = append(h.queue, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) expected = append(expected, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) } @@ -297,23 +297,23 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) { for i := range make([]struct{}, maxBatchSize/2) { h.queue = append(h.queue, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }, &Alert{ - Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)), }, ) expected1 = append(expected1, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }, &Alert{ - Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)), }, ) expected2 = append(expected2, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) } @@ -502,7 +502,7 @@ func TestHandlerQueuing(t *testing.T) { var alerts []*Alert for i := range make([]struct{}, 20*maxBatchSize) { alerts = append(alerts, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) } @@ -762,7 +762,7 @@ func TestHangingNotifier(t *testing.T) { var alerts []*Alert for i := range make([]struct{}, 20*maxBatchSize) { alerts = append(alerts, &Alert{ - Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("alertname", strconv.Itoa(i)), }) } diff --git a/promql/bench_test.go b/promql/bench_test.go index 516b0d74823..9a85290915e 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promql_test import ( "context" @@ -23,13 +23,14 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/teststorage" ) -func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, numIntervals int) error { +func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, interval, numIntervals int) error { ctx := context.Background() metrics := []labels.Labels{} @@ -249,13 +250,13 @@ func BenchmarkRangeQuery(b *testing.B) { stor := teststorage.New(b) stor.DB.DisableCompactions() // Don't want auto-compaction disrupting timings. defer stor.Close() - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 50000000, Timeout: 100 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) const interval = 10000 // 10s interval. // A day of data plus 10k steps. @@ -324,7 +325,7 @@ func BenchmarkNativeHistograms(b *testing.B) { }, } - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 50000000, @@ -338,7 +339,7 @@ func BenchmarkNativeHistograms(b *testing.B) { for _, tc := range cases { b.Run(tc.name, func(b *testing.B) { - ng := NewEngine(opts) + ng := promql.NewEngine(opts) for i := 0; i < b.N; i++ { qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step) if err != nil { diff --git a/promql/engine.go b/promql/engine.go index b8a8ea0959f..f9d6f16fc75 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -573,7 +573,8 @@ func (ng *Engine) validateOpts(expr parser.Expr) error { return validationErr } -func (ng *Engine) newTestQuery(f func(context.Context) error) Query { +// NewTestQuery: inject special behaviour into Query for testing. +func (ng *Engine) NewTestQuery(f func(context.Context) error) Query { qry := &query{ q: "test statement", stmt: parser.TestStmt(f), @@ -751,6 +752,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval case parser.ValueTypeScalar: return Scalar{V: mat[0].Floats[0].F, T: start}, warnings, nil case parser.ValueTypeMatrix: + ng.sortMatrixResult(ctx, query, mat) return mat, warnings, nil default: panic(fmt.Errorf("promql.Engine.exec: unexpected expression type %q", s.Expr.Type())) @@ -789,11 +791,15 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval } // TODO(fabxc): where to ensure metric labels are a copy from the storage internals. + ng.sortMatrixResult(ctx, query, mat) + + return mat, warnings, nil +} + +func (ng *Engine) sortMatrixResult(ctx context.Context, query *query, mat Matrix) { sortSpanTimer, _ := query.stats.GetSpanTimer(ctx, stats.ResultSortTime, ng.metrics.queryResultSort) sort.Sort(mat) sortSpanTimer.Finish() - - return mat, warnings, nil } // subqueryTimes returns the sum of offsets and ranges of all subqueries in the path. @@ -2024,25 +2030,21 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec vec := make(Vector, 0, len(vs.Series)) for i, s := range vs.Series { it := seriesIterators[i] - t, f, h, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) - if ok { - vec = append(vec, Sample{ - Metric: s.Labels(), - T: t, - F: f, - H: h, - }) - histSize := 0 - if h != nil { - histSize := h.Size() / 16 // 16 bytes per sample. - ev.currentSamples += histSize - } - ev.currentSamples++ + t, _, _, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) + if !ok { + continue + } - ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, int64(1+histSize)) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } + // Note that we ignore the sample values because call only cares about the timestamp. + vec = append(vec, Sample{ + Metric: s.Labels(), + T: t, + }) + + ev.currentSamples++ + ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, 1) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) } } ev.samplesStats.UpdatePeak(ev.currentSamples) diff --git a/promql/engine_internal_test.go b/promql/engine_internal_test.go new file mode 100644 index 00000000000..cb501b2fdf0 --- /dev/null +++ b/promql/engine_internal_test.go @@ -0,0 +1,82 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "errors" + "testing" + + "github.com/go-kit/log" + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/util/annotations" +) + +func TestRecoverEvaluatorRuntime(t *testing.T) { + var output []interface{} + logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { + output = append(output, keyvals...) + return nil + })) + ev := &evaluator{logger: logger} + + expr, _ := parser.ParseExpr("sum(up)") + + var err error + + defer func() { + require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0") + require.Contains(t, output, "sum(up)") + }() + defer ev.recover(expr, nil, &err) + + // Cause a runtime panic. + var a []int + a[123] = 1 +} + +func TestRecoverEvaluatorError(t *testing.T) { + ev := &evaluator{logger: log.NewNopLogger()} + var err error + + e := errors.New("custom error") + + defer func() { + require.EqualError(t, err, e.Error()) + }() + defer ev.recover(nil, nil, &err) + + panic(e) +} + +func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) { + ev := &evaluator{logger: log.NewNopLogger()} + var err error + var ws annotations.Annotations + + warnings := annotations.New().Add(errors.New("custom warning")) + e := errWithWarnings{ + err: errors.New("custom error"), + warnings: warnings, + } + + defer func() { + require.EqualError(t, err, e.Error()) + require.Equal(t, warnings, ws, "wrong warning message") + }() + defer ev.recover(nil, &ws, &err) + + panic(e) +} diff --git a/promql/engine_test.go b/promql/engine_test.go index 0202c15ae13..69d9ea0361f 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promql_test import ( "context" @@ -20,27 +20,34 @@ import ( "math" "os" "sort" + "strconv" + "sync" "testing" "time" - "github.com/go-kit/log" - "github.com/stretchr/testify/require" "go.uber.org/goleak" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" ) +const ( + env = "query execution" + defaultLookbackDelta = 5 * time.Minute + defaultEpsilon = 0.000001 // Relative error allowed for sample values. +) + func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } @@ -51,10 +58,12 @@ func TestQueryConcurrency(t *testing.T) { dir, err := os.MkdirTemp("", "test_concurrency") require.NoError(t, err) defer os.RemoveAll(dir) - queryTracker := NewActiveQueryTracker(dir, maxConcurrency, nil) - t.Cleanup(queryTracker.Close) + queryTracker := promql.NewActiveQueryTracker(dir, maxConcurrency, nil) + t.Cleanup(func() { + require.NoError(t, queryTracker.Close()) + }) - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, @@ -62,7 +71,7 @@ func TestQueryConcurrency(t *testing.T) { ActiveQueryTracker: queryTracker, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() @@ -84,9 +93,14 @@ func TestQueryConcurrency(t *testing.T) { return nil } + var wg sync.WaitGroup for i := 0; i < maxConcurrency; i++ { - q := engine.newTestQuery(f) - go q.Exec(ctx) + q := engine.NewTestQuery(f) + wg.Add(1) + go func() { + q.Exec(ctx) + wg.Done() + }() select { case <-processing: // Expected. @@ -95,8 +109,12 @@ func TestQueryConcurrency(t *testing.T) { } } - q := engine.newTestQuery(f) - go q.Exec(ctx) + q := engine.NewTestQuery(f) + wg.Add(1) + go func() { + q.Exec(ctx) + wg.Done() + }() select { case <-processing: @@ -119,20 +137,37 @@ func TestQueryConcurrency(t *testing.T) { for i := 0; i < maxConcurrency; i++ { block <- struct{}{} } + + wg.Wait() +} + +// contextDone returns an error if the context was canceled or timed out. +func contextDone(ctx context.Context, env string) error { + if err := ctx.Err(); err != nil { + switch { + case errors.Is(err, context.Canceled): + return promql.ErrQueryCanceled(env) + case errors.Is(err, context.DeadlineExceeded): + return promql.ErrQueryTimeout(env) + default: + return err + } + } + return nil } func TestQueryTimeout(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 5 * time.Millisecond, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() - query := engine.newTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(ctx context.Context) error { time.Sleep(100 * time.Millisecond) return contextDone(ctx, "test statement execution") }) @@ -140,20 +175,20 @@ func TestQueryTimeout(t *testing.T) { res := query.Exec(ctx) require.Error(t, res.Err, "expected timeout error but got none") - var e ErrQueryTimeout + var e promql.ErrQueryTimeout require.ErrorAs(t, res.Err, &e, "expected timeout error but got: %s", res.Err) } -const errQueryCanceled = ErrQueryCanceled("test statement execution") +const errQueryCanceled = promql.ErrQueryCanceled("test statement execution") func TestQueryCancel(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() @@ -161,13 +196,13 @@ func TestQueryCancel(t *testing.T) { block := make(chan struct{}) processing := make(chan struct{}) - query1 := engine.newTestQuery(func(ctx context.Context) error { + query1 := engine.NewTestQuery(func(ctx context.Context) error { processing <- struct{}{} <-block return contextDone(ctx, "test statement execution") }) - var res *Result + var res *promql.Result go func() { res = query1.Exec(ctx) @@ -183,7 +218,7 @@ func TestQueryCancel(t *testing.T) { require.Equal(t, errQueryCanceled, res.Err) // Canceling a query before starting it must have no effect. - query2 := engine.newTestQuery(func(ctx context.Context) error { + query2 := engine.NewTestQuery(func(ctx context.Context) error { return contextDone(ctx, "test statement execution") }) @@ -221,14 +256,14 @@ func (e errSeriesSet) Err() error { return e.err } func (e errSeriesSet) Warnings() annotations.Annotations { return nil } func TestQueryError(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) - errStorage := ErrStorage{errors.New("storage error")} + engine := promql.NewEngine(opts) + errStorage := promql.ErrStorage{errors.New("storage error")} queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return &errQuerier{err: errStorage}, nil }) @@ -270,7 +305,7 @@ func (h *hintRecordingQuerier) Select(ctx context.Context, sortSeries bool, hint } func TestSelectHintsSetCorrectly(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, @@ -561,11 +596,11 @@ func TestSelectHintsSetCorrectly(t *testing.T) { }, } { t.Run(tc.query, func(t *testing.T) { - engine := NewEngine(opts) + engine := promql.NewEngine(opts) hintsRecorder := &noopHintRecordingQueryable{} var ( - query Query + query promql.Query err error ) ctx := context.Background() @@ -586,13 +621,13 @@ func TestSelectHintsSetCorrectly(t *testing.T) { } func TestEngineShutdown(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) ctx, cancelCtx := context.WithCancel(context.Background()) block := make(chan struct{}) @@ -605,13 +640,13 @@ func TestEngineShutdown(t *testing.T) { <-block return contextDone(ctx, "test statement execution") } - query1 := engine.newTestQuery(f) + query1 := engine.NewTestQuery(f) // Stopping the engine must cancel the base context. While executing queries is // still possible, their context is canceled from the beginning and execution should // terminate immediately. - var res *Result + var res *promql.Result go func() { res = query1.Exec(ctx) processing <- struct{}{} @@ -625,7 +660,7 @@ func TestEngineShutdown(t *testing.T) { require.Error(t, res.Err, "expected error on shutdown during query but got none") require.Equal(t, errQueryCanceled, res.Err) - query2 := engine.newTestQuery(func(context.Context) error { + query2 := engine.NewTestQuery(func(context.Context) error { require.FailNow(t, "reached query execution unexpectedly") return nil }) @@ -635,12 +670,12 @@ func TestEngineShutdown(t *testing.T) { res2 := query2.Exec(ctx) require.Error(t, res2.Err, "expected error on querying with canceled context but got none") - var e ErrQueryCanceled + var e promql.ErrQueryCanceled require.ErrorAs(t, res2.Err, &e, "expected cancellation error but got: %s", res2.Err) } func TestEngineEvalStmtTimestamps(t *testing.T) { - storage := LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 10s metric 1 2 `) @@ -657,13 +692,13 @@ load 10s // Instant queries. { Query: "1", - Result: Scalar{V: 1, T: 1000}, + Result: promql.Scalar{V: 1, T: 1000}, Start: time.Unix(1, 0), }, { Query: "metric", - Result: Vector{ - Sample{ + Result: promql.Vector{ + promql.Sample{ F: 1, T: 1000, Metric: labels.FromStrings("__name__", "metric"), @@ -673,9 +708,9 @@ load 10s }, { Query: "metric[20s]", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -684,9 +719,9 @@ load 10s // Range queries. { Query: "1", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, Metric: labels.EmptyLabels(), }, }, @@ -696,9 +731,9 @@ load 10s }, { Query: "metric", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 1000}, {F: 1, T: 2000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -708,9 +743,9 @@ load 10s }, { Query: "metric", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -727,7 +762,7 @@ load 10s for i, c := range cases { t.Run(fmt.Sprintf("%d query=%s", i, c.Query), func(t *testing.T) { var err error - var qry Query + var qry promql.Query engine := newTestEngine() if c.Interval == 0 { qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) @@ -749,7 +784,7 @@ load 10s } func TestQueryStatistics(t *testing.T) { - storage := LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 10s metricWith1SampleEvery10Seconds 1+1x100 metricWith3SampleEvery10Seconds{a="1",b="1"} 1+1x100 @@ -818,8 +853,8 @@ load 10s { Query: "timestamp(metricWith1HistogramEvery10Seconds)", Start: time.Unix(21, 0), - PeakSamples: 13, // histogram size 12 + 1 extra because of timestamp - TotalSamples: 1, // 1 float sample (because of timestamp) / 10 seconds + PeakSamples: 2, + TotalSamples: 1, // 1 float sample (because of timestamp) / 10 seconds TotalSamplesPerStep: stats.TotalSamplesPerStep{ 21000: 1, }, @@ -1116,7 +1151,7 @@ load 10s Start: time.Unix(201, 0), End: time.Unix(220, 0), Interval: 5 * time.Second, - PeakSamples: 16, + PeakSamples: 5, TotalSamples: 4, // 1 sample per query * 4 steps TotalSamplesPerStep: stats.TotalSamplesPerStep{ 201000: 1, @@ -1267,17 +1302,14 @@ load 10s }, } - engine := newTestEngine() - engine.enablePerStepStats = true - origMaxSamples := engine.maxSamplesPerQuery for _, c := range cases { t.Run(c.Query, func(t *testing.T) { - opts := NewPrometheusQueryOpts(true, 0) - engine.maxSamplesPerQuery = origMaxSamples + opts := promql.NewPrometheusQueryOpts(true, 0) + engine := promqltest.NewTestEngine(true, 0, promqltest.DefaultMaxSamplesPerQuery) runQuery := func(expErr error) *stats.Statistics { var err error - var qry Query + var qry promql.Query if c.Interval == 0 { qry, err = engine.NewInstantQuery(context.Background(), storage, opts, c.Query, c.Start) } else { @@ -1300,14 +1332,14 @@ load 10s if c.SkipMaxCheck { return } - engine.maxSamplesPerQuery = stats.Samples.PeakSamples - 1 - runQuery(ErrTooManySamples(env)) + engine = promqltest.NewTestEngine(true, 0, stats.Samples.PeakSamples-1) + runQuery(promql.ErrTooManySamples(env)) }) } } func TestMaxQuerySamples(t *testing.T) { - storage := LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 10s metric 1+1x100 bigmetric{a="1"} 1+1x100 @@ -1423,7 +1455,7 @@ load 10s Interval: 5 * time.Second, }, { - // Sample as above but with only 1 part as step invariant. + // promql.Sample as above but with only 1 part as step invariant. // Here the peak is caused by the non-step invariant part as it touches more time range. // Hence at peak it is 2*21 (subquery from 0s to 20s) // + 11 (buffer of a series per evaluation) @@ -1456,7 +1488,7 @@ load 10s engine := newTestEngine() testFunc := func(expError error) { var err error - var qry Query + var qry promql.Query if c.Interval == 0 { qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) } else { @@ -1474,19 +1506,19 @@ load 10s } // Within limit. - engine.maxSamplesPerQuery = c.MaxSamples + engine = promqltest.NewTestEngine(false, 0, c.MaxSamples) testFunc(nil) // Exceeding limit. - engine.maxSamplesPerQuery = c.MaxSamples - 1 - testFunc(ErrTooManySamples(env)) + engine = promqltest.NewTestEngine(false, 0, c.MaxSamples-1) + testFunc(promql.ErrTooManySamples(env)) }) } } func TestAtModifier(t *testing.T) { engine := newTestEngine() - storage := LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 10s metric{job="1"} 0+1x1000 metric{job="2"} 0+2x1000 @@ -1530,137 +1562,137 @@ load 1ms { // Time of the result is the evaluation time. query: `metric_neg @ 0`, start: 100, - result: Vector{ - Sample{F: 1, T: 100000, Metric: lblsneg}, + result: promql.Vector{ + promql.Sample{F: 1, T: 100000, Metric: lblsneg}, }, }, { query: `metric_neg @ -200`, start: 100, - result: Vector{ - Sample{F: 201, T: 100000, Metric: lblsneg}, + result: promql.Vector{ + promql.Sample{F: 201, T: 100000, Metric: lblsneg}, }, }, { query: `metric{job="2"} @ 50`, start: -2, end: 2, interval: 1, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 10, T: -2000}, {F: 10, T: -1000}, {F: 10, T: 0}, {F: 10, T: 1000}, {F: 10, T: 2000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 10, T: -2000}, {F: 10, T: -1000}, {F: 10, T: 0}, {F: 10, T: 1000}, {F: 10, T: 2000}}, Metric: lbls2, }, }, }, { // Timestamps for matrix selector does not depend on the evaluation time. query: "metric[20s] @ 300", start: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 28, T: 280000}, {F: 29, T: 290000}, {F: 30, T: 300000}}, Metric: lbls1, }, - Series{ - Floats: []FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}}, + promql.Series{ + Floats: []promql.FPoint{{F: 56, T: 280000}, {F: 58, T: 290000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, }, { query: `metric_neg[2s] @ 0`, start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 3, T: -2000}, {F: 2, T: -1000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, }, { query: `metric_neg[3s] @ -500`, start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 504, T: -503000}, {F: 503, T: -502000}, {F: 502, T: -501000}, {F: 501, T: -500000}}, Metric: lblsneg, }, }, }, { query: `metric_ms[3ms] @ 2.345`, start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2342, T: 2342}, {F: 2343, T: 2343}, {F: 2344, T: 2344}, {F: 2345, T: 2345}}, Metric: lblsms, }, }, }, { query: "metric[100s:25s] @ 300", start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 20, T: 200000}, {F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 20, T: 200000}, {F: 22, T: 225000}, {F: 25, T: 250000}, {F: 27, T: 275000}, {F: 30, T: 300000}}, Metric: lbls1, }, - Series{ - Floats: []FPoint{{F: 40, T: 200000}, {F: 44, T: 225000}, {F: 50, T: 250000}, {F: 54, T: 275000}, {F: 60, T: 300000}}, + promql.Series{ + Floats: []promql.FPoint{{F: 40, T: 200000}, {F: 44, T: 225000}, {F: 50, T: 250000}, {F: 54, T: 275000}, {F: 60, T: 300000}}, Metric: lbls2, }, }, }, { query: "metric_neg[50s:25s] @ 0", start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 51, T: -50000}, {F: 26, T: -25000}, {F: 1, T: 0}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 51, T: -50000}, {F: 26, T: -25000}, {F: 1, T: 0}}, Metric: lblsneg, }, }, }, { query: "metric_neg[50s:25s] @ -100", start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 151, T: -150000}, {F: 126, T: -125000}, {F: 101, T: -100000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 151, T: -150000}, {F: 126, T: -125000}, {F: 101, T: -100000}}, Metric: lblsneg, }, }, }, { query: `metric_ms[100ms:25ms] @ 2.345`, start: 100, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 2250, T: 2250}, {F: 2275, T: 2275}, {F: 2300, T: 2300}, {F: 2325, T: 2325}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2250, T: 2250}, {F: 2275, T: 2275}, {F: 2300, T: 2300}, {F: 2325, T: 2325}}, Metric: lblsms, }, }, }, { query: `metric_topk and topk(1, sum_over_time(metric_topk[50s] @ 100))`, start: 50, end: 80, interval: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 995, T: 50000}, {F: 994, T: 60000}, {F: 993, T: 70000}, {F: 992, T: 80000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 995, T: 50000}, {F: 994, T: 60000}, {F: 993, T: 70000}, {F: 992, T: 80000}}, Metric: lblstopk3, }, }, }, { query: `metric_topk and topk(1, sum_over_time(metric_topk[50s] @ 5000))`, start: 50, end: 80, interval: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 10, T: 50000}, {F: 12, T: 60000}, {F: 14, T: 70000}, {F: 16, T: 80000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 10, T: 50000}, {F: 12, T: 60000}, {F: 14, T: 70000}, {F: 16, T: 80000}}, Metric: lblstopk2, }, }, }, { query: `metric_topk and topk(1, sum_over_time(metric_topk[50s] @ end()))`, start: 70, end: 100, interval: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 993, T: 70000}, {F: 992, T: 80000}, {F: 991, T: 90000}, {F: 990, T: 100000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 993, T: 70000}, {F: 992, T: 80000}, {F: 991, T: 90000}, {F: 990, T: 100000}}, Metric: lblstopk3, }, }, }, { query: `metric_topk and topk(1, sum_over_time(metric_topk[50s] @ start()))`, start: 100, end: 130, interval: 10, - result: Matrix{ - Series{ - Floats: []FPoint{{F: 990, T: 100000}, {F: 989, T: 110000}, {F: 988, T: 120000}, {F: 987, T: 130000}}, + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 990, T: 100000}, {F: 989, T: 110000}, {F: 988, T: 120000}, {F: 987, T: 130000}}, Metric: lblstopk3, }, }, @@ -1669,9 +1701,9 @@ load 1ms // The trick here is that the query range should be > lookback delta. query: `timestamp(metric_timestamp @ 3600)`, start: 0, end: 7 * 60, interval: 60, - result: Matrix{ - Series{ - Floats: []FPoint{ + result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{ {F: 3600, T: 0}, {F: 3600, T: 60 * 1000}, {F: 3600, T: 2 * 60 * 1000}, @@ -1694,7 +1726,7 @@ load 1ms } start, end, interval := time.Unix(c.start, 0), time.Unix(c.end, 0), time.Duration(c.interval)*time.Second var err error - var qry Query + var qry promql.Query if c.end == 0 { qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.query, start) } else { @@ -1704,76 +1736,19 @@ load 1ms res := qry.Exec(context.Background()) require.NoError(t, res.Err) - if expMat, ok := c.result.(Matrix); ok { + if expMat, ok := c.result.(promql.Matrix); ok { sort.Sort(expMat) - sort.Sort(res.Value.(Matrix)) + sort.Sort(res.Value.(promql.Matrix)) } testutil.RequireEqual(t, c.result, res.Value, "query %q failed", c.query) }) } } -func TestRecoverEvaluatorRuntime(t *testing.T) { - var output []interface{} - logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { - output = append(output, keyvals...) - return nil - })) - ev := &evaluator{logger: logger} - - expr, _ := parser.ParseExpr("sum(up)") - - var err error - - defer func() { - require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0") - require.Contains(t, output, "sum(up)") - }() - defer ev.recover(expr, nil, &err) - - // Cause a runtime panic. - var a []int - a[123] = 1 -} - -func TestRecoverEvaluatorError(t *testing.T) { - ev := &evaluator{logger: log.NewNopLogger()} - var err error - - e := errors.New("custom error") - - defer func() { - require.EqualError(t, err, e.Error()) - }() - defer ev.recover(nil, nil, &err) - - panic(e) -} - -func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) { - ev := &evaluator{logger: log.NewNopLogger()} - var err error - var ws annotations.Annotations - - warnings := annotations.New().Add(errors.New("custom warning")) - e := errWithWarnings{ - err: errors.New("custom error"), - warnings: warnings, - } - - defer func() { - require.EqualError(t, err, e.Error()) - require.Equal(t, warnings, ws, "wrong warning message") - }() - defer ev.recover(nil, &ws, &err) - - panic(e) -} - func TestSubquerySelector(t *testing.T) { type caseType struct { Query string - Result Result + Result promql.Result Start time.Time } @@ -1787,11 +1762,11 @@ func TestSubquerySelector(t *testing.T) { cases: []caseType{ { Query: "metric[20s:10s]", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1801,11 +1776,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s]", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1815,11 +1790,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 2s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1829,11 +1804,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 6s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 1, T: 5000}, {F: 2, T: 10000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1843,11 +1818,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 4s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1857,11 +1832,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 5s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1871,11 +1846,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 6s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1885,11 +1860,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: "metric[20s:5s] offset 7s", - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -1908,11 +1883,11 @@ func TestSubquerySelector(t *testing.T) { cases: []caseType{ { // Normal selector. Query: `http_requests{group=~"pro.*",instance="0"}[30s:10s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 9990, T: 9990000}, {F: 10000, T: 10000000}, {F: 100, T: 10010000}, {F: 130, T: 10020000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 9990, T: 9990000}, {F: 10000, T: 10000000}, {F: 100, T: 10010000}, {F: 130, T: 10020000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1922,11 +1897,11 @@ func TestSubquerySelector(t *testing.T) { }, { // Default step. Query: `http_requests{group=~"pro.*",instance="0"}[5m:]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 9840, T: 9840000}, {F: 9900, T: 9900000}, {F: 9960, T: 9960000}, {F: 130, T: 10020000}, {F: 310, T: 10080000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 9840, T: 9840000}, {F: 9900, T: 9900000}, {F: 9960, T: 9960000}, {F: 130, T: 10020000}, {F: 310, T: 10080000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1936,11 +1911,11 @@ func TestSubquerySelector(t *testing.T) { }, { // Checking if high offset (>LookbackDelta) is being taken care of. Query: `http_requests{group=~"pro.*",instance="0"}[5m:] offset 20m`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 8640, T: 8640000}, {F: 8700, T: 8700000}, {F: 8760, T: 8760000}, {F: 8820, T: 8820000}, {F: 8880, T: 8880000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 8640, T: 8640000}, {F: 8700, T: 8700000}, {F: 8760, T: 8760000}, {F: 8820, T: 8820000}, {F: 8880, T: 8880000}}, Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), }, }, @@ -1950,23 +1925,23 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `rate(http_requests[1m])[15s:5s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), }, - Series{ - Floats: []FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, + promql.Series{ + Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), }, - Series{ - Floats: []FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), }, - Series{ - Floats: []FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), }, }, @@ -1976,11 +1951,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 270, T: 90000}, {F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 270, T: 90000}, {F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -1990,11 +1965,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `sum(http_requests)[40s:10s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 800, T: 80000}, {F: 900, T: 90000}, {F: 1000, T: 100000}, {F: 1100, T: 110000}, {F: 1200, T: 120000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 800, T: 80000}, {F: 900, T: 90000}, {F: 1000, T: 100000}, {F: 1100, T: 110000}, {F: 1200, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -2004,11 +1979,11 @@ func TestSubquerySelector(t *testing.T) { }, { Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s:5s]`, - Result: Result{ + Result: promql.Result{ nil, - Matrix{ - Series{ - Floats: []FPoint{{F: 1000, T: 100000}, {F: 1000, T: 105000}, {F: 1100, T: 110000}, {F: 1100, T: 115000}, {F: 1200, T: 120000}}, + promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1000, T: 100000}, {F: 1000, T: 105000}, {F: 1100, T: 110000}, {F: 1100, T: 115000}, {F: 1200, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -2021,7 +1996,7 @@ func TestSubquerySelector(t *testing.T) { } { t.Run("", func(t *testing.T) { engine := newTestEngine() - storage := LoadedStorage(t, tst.loadString) + storage := promqltest.LoadedStorage(t, tst.loadString) t.Cleanup(func() { storage.Close() }) for _, c := range tst.cases { @@ -2031,7 +2006,7 @@ func TestSubquerySelector(t *testing.T) { res := qry.Exec(context.Background()) require.Equal(t, c.Result.Err, res.Err) - mat := res.Value.(Matrix) + mat := res.Value.(promql.Matrix) sort.Sort(mat) testutil.RequireEqual(t, c.Result.Value, mat) }) @@ -2042,7 +2017,7 @@ func TestSubquerySelector(t *testing.T) { func TestTimestampFunction_StepsMoreOftenThanSamples(t *testing.T) { engine := newTestEngine() - storage := LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m metric 0+1x1000 `) @@ -2054,20 +2029,20 @@ load 1m interval := time.Second // We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s. - expectedPoints := []FPoint{} + expectedPoints := []promql.FPoint{} for t := 0; t <= 59; t++ { - expectedPoints = append(expectedPoints, FPoint{F: 0, T: int64(t * 1000)}) + expectedPoints = append(expectedPoints, promql.FPoint{F: 0, T: int64(t * 1000)}) } expectedPoints = append( expectedPoints, - FPoint{F: 60, T: 60_000}, - FPoint{F: 60, T: 61_000}, + promql.FPoint{F: 60, T: 60_000}, + promql.FPoint{F: 60, T: 61_000}, ) - expectedResult := Matrix{ - Series{ + expectedResult := promql.Matrix{ + promql.Series{ Floats: expectedPoints, Metric: labels.EmptyLabels(), }, @@ -2104,25 +2079,25 @@ func (f *FakeQueryLogger) Log(l ...interface{}) error { } func TestQueryLogger_basic(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) queryExec := func() { ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() - query := engine.newTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(ctx context.Context) error { return contextDone(ctx, "test statement execution") }) res := query.Exec(ctx) require.NoError(t, res.Err) } - // Query works without query log initialized. + // promql.Query works without query log initialized. queryExec() f1 := NewFakeQueryLogger() @@ -2155,21 +2130,21 @@ func TestQueryLogger_basic(t *testing.T) { } func TestQueryLogger_fields(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) ctx, cancelCtx := context.WithCancel(context.Background()) - ctx = NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) + ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) defer cancelCtx() - query := engine.newTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(ctx context.Context) error { return contextDone(ctx, "test statement execution") }) @@ -2184,22 +2159,22 @@ func TestQueryLogger_fields(t *testing.T) { } func TestQueryLogger_error(t *testing.T) { - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) ctx, cancelCtx := context.WithCancel(context.Background()) - ctx = NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) + ctx = promql.NewOriginContext(ctx, map[string]interface{}{"foo": "bar"}) defer cancelCtx() testErr := errors.New("failure") - query := engine.newTestQuery(func(ctx context.Context) error { + query := engine.NewTestQuery(func(ctx context.Context) error { return testErr }) @@ -3005,7 +2980,7 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { t.Run(test.input, func(t *testing.T) { expr, err := parser.ParseExpr(test.input) require.NoError(t, err) - expr = PreprocessExpr(expr, startTime, endTime) + expr = promql.PreprocessExpr(expr, startTime, endTime) if test.outputTest { require.Equal(t, test.input, expr.String(), "error on input '%s'", test.input) } @@ -3016,64 +2991,64 @@ func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { func TestEngineOptsValidation(t *testing.T) { cases := []struct { - opts EngineOpts + opts promql.EngineOpts query string fail bool expError error }{ { - opts: EngineOpts{EnableAtModifier: false}, - query: "metric @ 100", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "metric @ 100", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1m] @ 100)", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1m] @ 100)", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1h:1m] @ 100)", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1h:1m] @ 100)", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "metric @ start()", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "metric @ start()", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1m] @ start())", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1m] @ start())", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1h:1m] @ start())", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1h:1m] @ start())", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "metric @ end()", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "metric @ end()", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1m] @ end())", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1m] @ end())", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: false}, - query: "rate(metric[1h:1m] @ end())", fail: true, expError: ErrValidationAtModifierDisabled, + opts: promql.EngineOpts{EnableAtModifier: false}, + query: "rate(metric[1h:1m] @ end())", fail: true, expError: promql.ErrValidationAtModifierDisabled, }, { - opts: EngineOpts{EnableAtModifier: true}, + opts: promql.EngineOpts{EnableAtModifier: true}, query: "metric @ 100", }, { - opts: EngineOpts{EnableAtModifier: true}, + opts: promql.EngineOpts{EnableAtModifier: true}, query: "rate(metric[1m] @ start())", }, { - opts: EngineOpts{EnableAtModifier: true}, + opts: promql.EngineOpts{EnableAtModifier: true}, query: "rate(metric[1h:1m] @ end())", }, { - opts: EngineOpts{EnableNegativeOffset: false}, - query: "metric offset -1s", fail: true, expError: ErrValidationNegativeOffsetDisabled, + opts: promql.EngineOpts{EnableNegativeOffset: false}, + query: "metric offset -1s", fail: true, expError: promql.ErrValidationNegativeOffsetDisabled, }, { - opts: EngineOpts{EnableNegativeOffset: true}, + opts: promql.EngineOpts{EnableNegativeOffset: true}, query: "metric offset -1s", }, { - opts: EngineOpts{EnableAtModifier: true, EnableNegativeOffset: true}, + opts: promql.EngineOpts{EnableAtModifier: true, EnableNegativeOffset: true}, query: "metric @ 100 offset -2m", }, { - opts: EngineOpts{EnableAtModifier: true, EnableNegativeOffset: true}, + opts: promql.EngineOpts{EnableAtModifier: true, EnableNegativeOffset: true}, query: "metric offset -2m @ 100", }, } for _, c := range cases { - eng := NewEngine(c.opts) + eng := promql.NewEngine(c.opts) _, err1 := eng.NewInstantQuery(context.Background(), nil, nil, c.query, time.Unix(10, 0)) _, err2 := eng.NewRangeQuery(context.Background(), nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) if c.fail { @@ -3101,9 +3076,9 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 0 1 10 100 1000`, Query: "sum_over_time(bar[30s])", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -3116,9 +3091,9 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 0 1 10 100 1000 0 0 0 0`, Query: "sum_over_time(bar[30s])", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}}, Metric: labels.EmptyLabels(), }, }, @@ -3131,9 +3106,9 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 0 1 10 100 1000 10000 100000 1000000 10000000`, Query: "sum_over_time(bar[30s])", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}, {F: 110000, T: 180000}, {F: 11000000, T: 240000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 0, T: 0}, {F: 11, T: 60000}, {F: 1100, T: 120000}, {F: 110000, T: 180000}, {F: 11000000, T: 240000}}, Metric: labels.EmptyLabels(), }, }, @@ -3146,9 +3121,9 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s bar 5 17 42 2 7 905 51`, Query: "sum_over_time(bar[30s])", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 5, T: 0}, {F: 59, T: 60000}, {F: 9, T: 120000}, {F: 956, T: 180000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 5, T: 0}, {F: 59, T: 60000}, {F: 9, T: 120000}, {F: 956, T: 180000}}, Metric: labels.EmptyLabels(), }, }, @@ -3161,9 +3136,9 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s metric 1+1x4`, Query: "metric", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -3176,9 +3151,9 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s metric 1+1x8`, Query: "metric", - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, Metric: labels.FromStrings("__name__", "metric"), }, }, @@ -3192,16 +3167,16 @@ func TestRangeQuery(t *testing.T) { foo{job="1"} 1+1x4 bar{job="2"} 1+1x4`, Query: `foo > 2 or bar`, - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 0}, {F: 3, T: 60000}, {F: 5, T: 120000}}, Metric: labels.FromStrings( "__name__", "bar", "job", "2", ), }, - Series{ - Floats: []FPoint{{F: 3, T: 60000}, {F: 5, T: 120000}}, + promql.Series{ + Floats: []promql.FPoint{{F: 3, T: 60000}, {F: 5, T: 120000}}, Metric: labels.FromStrings( "__name__", "foo", "job", "1", @@ -3217,9 +3192,9 @@ func TestRangeQuery(t *testing.T) { Load: `load 30s requests{job="1", __address__="bar"} 100`, Query: `requests * 2`, - Result: Matrix{ - Series{ - Floats: []FPoint{{F: 200, T: 0}, {F: 200, T: 60000}, {F: 200, T: 120000}}, + Result: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 200, T: 0}, {F: 200, T: 60000}, {F: 200, T: 120000}}, Metric: labels.FromStrings( "__address__", "bar", "job", "1", @@ -3234,7 +3209,7 @@ func TestRangeQuery(t *testing.T) { for _, c := range cases { t.Run(c.Name, func(t *testing.T) { engine := newTestEngine() - storage := LoadedStorage(t, c.Load) + storage := promqltest.LoadedStorage(t, c.Load) t.Cleanup(func() { storage.Close() }) qry, err := engine.NewRangeQuery(context.Background(), storage, nil, c.Query, c.Start, c.End, c.Interval) @@ -3247,1073 +3222,79 @@ func TestRangeQuery(t *testing.T) { } } -func TestNativeHistogramRate(t *testing.T) { - // TODO(beorn7): Integrate histograms into the PromQL testing framework - // and write more tests there. - engine := newTestEngine() - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - - app := storage.Appender(context.Background()) - for i, h := range tsdbutil.GenerateTestHistograms(100) { - _, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), h, nil) - require.NoError(t, err) - } - require.NoError(t, app.Commit()) - - queryString := fmt.Sprintf("rate(%s[45s])", seriesName) - t.Run("instant_query", func(t *testing.T) { - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) - require.NoError(t, err) - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - vector, err := res.Vector() - require.NoError(t, err) - require.Len(t, vector, 1) - actualHistogram := vector[0].H - expectedHistogram := &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 1, - ZeroThreshold: 0.001, - ZeroCount: 1. / 15., - Count: 9. / 15., - Sum: 1.2266666666666663, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - } - require.Equal(t, expectedHistogram, actualHistogram) - }) - - t.Run("range_query", func(t *testing.T) { - step := 30 * time.Second - start := timestamp.Time(int64(5 * time.Minute / time.Millisecond)) - end := start.Add(step) - qry, err := engine.NewRangeQuery(context.Background(), storage, nil, queryString, start, end, step) - require.NoError(t, err) - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - matrix, err := res.Matrix() - require.NoError(t, err) - require.Len(t, matrix, 1) - require.Len(t, matrix[0].Histograms, 2) - actualHistograms := matrix[0].Histograms - expectedHistograms := []HPoint{{ - T: 300000, - H: &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 1, - ZeroThreshold: 0.001, - ZeroCount: 1. / 15., - Count: 9. / 15., - Sum: 1.2266666666666663, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - }, - }, { - T: 330000, - H: &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 1, - ZeroThreshold: 0.001, - ZeroCount: 1. / 15., - Count: 9. / 15., - Sum: 1.2266666666666663, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - }, - }} - require.Equal(t, expectedHistograms, actualHistograms) - }) -} - -func TestNativeFloatHistogramRate(t *testing.T) { - // TODO(beorn7): Integrate histograms into the PromQL testing framework - // and write more tests there. +func TestInstantQueryWithRangeVectorSelector(t *testing.T) { engine := newTestEngine() - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - - app := storage.Appender(context.Background()) - for i, fh := range tsdbutil.GenerateTestFloatHistograms(100) { - _, err := app.AppendHistogram(0, lbls, int64(i)*int64(15*time.Second/time.Millisecond), nil, fh) - require.NoError(t, err) - } - require.NoError(t, app.Commit()) - - queryString := fmt.Sprintf("rate(%s[1m])", seriesName) - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(int64(5*time.Minute/time.Millisecond))) - require.NoError(t, err) - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - vector, err := res.Vector() - require.NoError(t, err) - require.Len(t, vector, 1) - actualHistogram := vector[0].H - expectedHistogram := &histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 1, - ZeroThreshold: 0.001, - ZeroCount: 1. / 15., - Count: 9. / 15., - Sum: 1.226666666666667, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - PositiveBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - NegativeSpans: []histogram.Span{{Offset: 0, Length: 2}, {Offset: 1, Length: 2}}, - NegativeBuckets: []float64{1. / 15., 1. / 15., 1. / 15., 1. / 15.}, - } - require.Equal(t, expectedHistogram, actualHistogram) -} - -func TestNativeHistogram_HistogramCountAndSum(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - h := &histogram.Histogram{ - Count: 24, - ZeroCount: 4, - ZeroThreshold: 0.001, - Sum: 100, - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - } - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) { - engine := newTestEngine() - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - - ts := int64(10 * time.Minute / time.Millisecond) - app := storage.Appender(context.Background()) - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, h, nil) - } - require.NoError(t, err) - require.NoError(t, app.Commit()) - - queryString := fmt.Sprintf("histogram_count(%s)", seriesName) - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - if floatHisto { - require.Equal(t, h.ToFloat(nil).Count, vector[0].F) - } else { - require.Equal(t, float64(h.Count), vector[0].F) - } - - queryString = fmt.Sprintf("histogram_sum(%s)", seriesName) - qry, err = engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res = qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err = res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - if floatHisto { - require.Equal(t, h.ToFloat(nil).Sum, vector[0].F) - } else { - require.Equal(t, h.Sum, vector[0].F) - } - }) - } -} - -func TestNativeHistogram_HistogramStdDevVar(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - testCases := []struct { - name string - h *histogram.Histogram - stdVar float64 - }{ - { - name: "1, 2, 3, 4 low-res", - h: &histogram.Histogram{ - Count: 4, - Sum: 10, - Schema: 2, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 1}, - {Offset: 3, Length: 1}, - {Offset: 2, Length: 2}, - }, - PositiveBuckets: []int64{1, 0, 0, 0}, - }, - stdVar: 1.163807968526718, // actual variance: 1.25 - }, - { - name: "1, 2, 3, 4 hi-res", - h: &histogram.Histogram{ - Count: 4, - Sum: 10, - Schema: 8, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 1}, - {Offset: 255, Length: 1}, - {Offset: 149, Length: 1}, - {Offset: 105, Length: 1}, - }, - PositiveBuckets: []int64{1, 0, 0, 0}, - }, - stdVar: 1.2471347737158793, // actual variance: 1.25 - }, - { - name: "-50, -8, 0, 3, 8, 9, 100", - h: &histogram.Histogram{ - Count: 7, - ZeroCount: 1, - Sum: 62, - Schema: 3, - PositiveSpans: []histogram.Span{ - {Offset: 13, Length: 1}, - {Offset: 10, Length: 1}, - {Offset: 1, Length: 1}, - {Offset: 27, Length: 1}, - }, - PositiveBuckets: []int64{1, 0, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 24, Length: 1}, - {Offset: 21, Length: 1}, - }, - NegativeBuckets: []int64{1, 0}, - }, - stdVar: 1844.4651144196398, // actual variance: 1738.4082 - }, - { - name: "-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3", - h: &histogram.Histogram{ - Count: 10, - ZeroCount: 0, - Sum: -112946, - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 2, Length: 3}, - {Offset: 1, Length: 2}, - {Offset: 2, Length: 1}, - {Offset: 3, Length: 1}, - {Offset: 2, Length: 1}, - }, - NegativeBuckets: []int64{1, 0, 0, 0, 0, 2, -2, 0}, - }, - stdVar: 759352122.1939945, // actual variance: 882690990 - }, - { - name: "-10 x10", - h: &histogram.Histogram{ - Count: 10, - ZeroCount: 0, - Sum: -100, - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 4, Length: 1}, - }, - NegativeBuckets: []int64{10}, - }, - stdVar: 1.725830020304794, // actual variance: 0 - }, - { - name: "-50, -8, 0, 3, 8, 9, 100, NaN", - h: &histogram.Histogram{ - Count: 8, - ZeroCount: 1, - Sum: math.NaN(), - Schema: 3, - PositiveSpans: []histogram.Span{ - {Offset: 13, Length: 1}, - {Offset: 10, Length: 1}, - {Offset: 1, Length: 1}, - {Offset: 27, Length: 1}, - }, - PositiveBuckets: []int64{1, 0, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 24, Length: 1}, - {Offset: 21, Length: 1}, - }, - NegativeBuckets: []int64{1, 0}, - }, - stdVar: math.NaN(), - }, - { - name: "-50, -8, 0, 3, 8, 9, 100, +Inf", - h: &histogram.Histogram{ - Count: 7, - ZeroCount: 1, - Sum: math.Inf(1), - Schema: 3, - PositiveSpans: []histogram.Span{ - {Offset: 13, Length: 1}, - {Offset: 10, Length: 1}, - {Offset: 1, Length: 1}, - {Offset: 27, Length: 1}, - }, - PositiveBuckets: []int64{1, 0, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 24, Length: 1}, - {Offset: 21, Length: 1}, - }, - NegativeBuckets: []int64{1, 0}, - }, - stdVar: math.NaN(), - }, - } - for _, tc := range testCases { - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("%s floatHistogram=%t", tc.name, floatHisto), func(t *testing.T) { - engine := newTestEngine() - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - - ts := int64(10 * time.Minute / time.Millisecond) - app := storage.Appender(context.Background()) - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, tc.h.ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, tc.h, nil) - } - require.NoError(t, err) - require.NoError(t, app.Commit()) - - queryString := fmt.Sprintf("histogram_stdvar(%s)", seriesName) - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - vector, err := res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - require.InEpsilon(t, tc.stdVar, vector[0].F, 1e-12) - - queryString = fmt.Sprintf("histogram_stddev(%s)", seriesName) - qry, err = engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res = qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err = res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - require.InEpsilon(t, math.Sqrt(tc.stdVar), vector[0].F, 1e-12) - }) - } - } -} - -func TestNativeHistogram_HistogramQuantile(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - type subCase struct { - quantile string - value float64 - } - - cases := []struct { - text string - // Histogram to test. - h *histogram.Histogram - // Different quantiles to test for this histogram. - subCases []subCase + baseT := timestamp.Time(0) + storage := promqltest.LoadedStorage(t, ` + load 1m + some_metric{env="1"} 0+1x4 + some_metric{env="2"} 0+2x4 + some_metric_with_stale_marker 0 1 stale 3 + `) + t.Cleanup(func() { require.NoError(t, storage.Close()) }) + + testCases := map[string]struct { + expr string + expected promql.Matrix + ts time.Time }{ - { - text: "all positive buckets with zero bucket", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - }, - subCases: []subCase{ - { - quantile: "1.0001", - value: math.Inf(1), - }, - { - quantile: "1", - value: 16, - }, + "matches series with points in range": { + expr: "some_metric[1m]", + ts: baseT.Add(2 * time.Minute), + expected: promql.Matrix{ { - quantile: "0.99", - value: 15.759999999999998, - }, - { - quantile: "0.9", - value: 13.600000000000001, - }, - { - quantile: "0.6", - value: 4.799999999999997, - }, - { - quantile: "0.5", - value: 1.6666666666666665, - }, - { // Zero bucket. - quantile: "0.1", - value: 0.0006000000000000001, - }, - { - quantile: "0", - value: 0, - }, - { - quantile: "-1", - value: math.Inf(-1), - }, - }, - }, - { - text: "all negative buckets with zero bucket", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - subCases: []subCase{ - { - quantile: "1.0001", - value: math.Inf(1), - }, - { // Zero bucket. - quantile: "1", - value: 0, - }, - { // Zero bucket. - quantile: "0.99", - value: -6.000000000000048e-05, - }, - { // Zero bucket. - quantile: "0.9", - value: -0.0005999999999999996, - }, - { - quantile: "0.5", - value: -1.6666666666666667, - }, - { - quantile: "0.1", - value: -13.6, - }, - { - quantile: "0", - value: -16, - }, - { - quantile: "-1", - value: math.Inf(-1), - }, - }, - }, - { - text: "both positive and negative buckets with zero bucket", - h: &histogram.Histogram{ - Count: 24, - ZeroCount: 4, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - subCases: []subCase{ - { - quantile: "1.0001", - value: math.Inf(1), - }, - { - quantile: "1", - value: 16, - }, - { - quantile: "0.99", - value: 15.519999999999996, - }, - { - quantile: "0.9", - value: 11.200000000000003, - }, - { - quantile: "0.7", - value: 1.2666666666666657, - }, - { // Zero bucket. - quantile: "0.55", - value: 0.0006000000000000005, - }, - { // Zero bucket. - quantile: "0.5", - value: 0, - }, - { // Zero bucket. - quantile: "0.45", - value: -0.0005999999999999996, - }, - { - quantile: "0.3", - value: -1.266666666666667, - }, - { - quantile: "0.1", - value: -11.2, - }, - { - quantile: "0.01", - value: -15.52, - }, - { - quantile: "0", - value: -16, + Metric: labels.FromStrings("__name__", "some_metric", "env", "1"), + Floats: []promql.FPoint{ + {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1}, + {T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 2}, + }, }, { - quantile: "-1", - value: math.Inf(-1), + Metric: labels.FromStrings("__name__", "some_metric", "env", "2"), + Floats: []promql.FPoint{ + {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 2}, + {T: timestamp.FromTime(baseT.Add(2 * time.Minute)), F: 4}, + }, }, }, }, - } - - engine := newTestEngine() - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - idx := int64(0) - for _, floatHisto := range []bool{true, false} { - for _, c := range cases { - t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - ts := idx * int64(10*time.Minute/time.Millisecond) - app := storage.Appender(context.Background()) - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) - } - require.NoError(t, err) - require.NoError(t, app.Commit()) - - for j, sc := range c.subCases { - t.Run(fmt.Sprintf("%d %s", j, sc.quantile), func(t *testing.T) { - queryString := fmt.Sprintf("histogram_quantile(%s, %s)", sc.quantile, seriesName) - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - require.True(t, almostEqual(sc.value, vector[0].F, defaultEpsilon)) - }) - } - idx++ - }) - } - } -} - -func TestNativeHistogram_HistogramFraction(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - type subCase struct { - lower, upper string - value float64 - } - - invariantCases := []subCase{ - { - lower: "42", - upper: "3.1415", - value: 0, - }, - { - lower: "0", - upper: "0", - value: 0, - }, - { - lower: "0.000001", - upper: "0.000001", - value: 0, - }, - { - lower: "42", - upper: "42", - value: 0, - }, - { - lower: "-3.1", - upper: "-3.1", - value: 0, - }, - { - lower: "3.1415", - upper: "NaN", - value: math.NaN(), - }, - { - lower: "NaN", - upper: "42", - value: math.NaN(), - }, - { - lower: "NaN", - upper: "NaN", - value: math.NaN(), + "matches no series": { + expr: "some_nonexistent_metric[1m]", + ts: baseT, + expected: promql.Matrix{}, }, - { - lower: "-Inf", - upper: "+Inf", - value: 1, + "no samples in range": { + expr: "some_metric[1m]", + ts: baseT.Add(20 * time.Minute), + expected: promql.Matrix{}, }, - } - - cases := []struct { - text string - // Histogram to test. - h *histogram.Histogram - // Different ranges to test for this histogram. - subCases []subCase - }{ - { - text: "empty histogram", - h: &histogram.Histogram{}, - subCases: []subCase{ + "metric with stale marker": { + expr: "some_metric_with_stale_marker[3m]", + ts: baseT.Add(3 * time.Minute), + expected: promql.Matrix{ { - lower: "3.1415", - upper: "42", - value: math.NaN(), - }, - }, - }, - { - text: "all positive buckets with zero bucket", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, // Abs: 2, 3, 1, 4 - }, - subCases: append([]subCase{ - { - lower: "0", - upper: "+Inf", - value: 1, - }, - { - lower: "-Inf", - upper: "0", - value: 0, - }, - { - lower: "-0.001", - upper: "0", - value: 0, - }, - { - lower: "0", - upper: "0.001", - value: 2. / 12., - }, - { - lower: "0", - upper: "0.0005", - value: 1. / 12., - }, - { - lower: "0.001", - upper: "inf", - value: 10. / 12., - }, - { - lower: "-inf", - upper: "-0.001", - value: 0, - }, - { - lower: "1", - upper: "2", - value: 3. / 12., - }, - { - lower: "1.5", - upper: "2", - value: 1.5 / 12., - }, - { - lower: "1", - upper: "8", - value: 4. / 12., - }, - { - lower: "1", - upper: "6", - value: 3.5 / 12., - }, - { - lower: "1.5", - upper: "6", - value: 2. / 12., - }, - { - lower: "-2", - upper: "-1", - value: 0, - }, - { - lower: "-2", - upper: "-1.5", - value: 0, - }, - { - lower: "-8", - upper: "-1", - value: 0, - }, - { - lower: "-6", - upper: "-1", - value: 0, - }, - { - lower: "-6", - upper: "-1.5", - value: 0, - }, - }, invariantCases...), - }, - { - text: "all negative buckets with zero bucket", - h: &histogram.Histogram{ - Count: 12, - ZeroCount: 2, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{2, 1, -2, 3}, - }, - subCases: append([]subCase{ - { - lower: "0", - upper: "+Inf", - value: 0, - }, - { - lower: "-Inf", - upper: "0", - value: 1, - }, - { - lower: "-0.001", - upper: "0", - value: 2. / 12., - }, - { - lower: "0", - upper: "0.001", - value: 0, - }, - { - lower: "-0.0005", - upper: "0", - value: 1. / 12., - }, - { - lower: "0.001", - upper: "inf", - value: 0, - }, - { - lower: "-inf", - upper: "-0.001", - value: 10. / 12., - }, - { - lower: "1", - upper: "2", - value: 0, - }, - { - lower: "1.5", - upper: "2", - value: 0, - }, - { - lower: "1", - upper: "8", - value: 0, - }, - { - lower: "1", - upper: "6", - value: 0, - }, - { - lower: "1.5", - upper: "6", - value: 0, - }, - { - lower: "-2", - upper: "-1", - value: 3. / 12., - }, - { - lower: "-2", - upper: "-1.5", - value: 1.5 / 12., - }, - { - lower: "-8", - upper: "-1", - value: 4. / 12., - }, - { - lower: "-6", - upper: "-1", - value: 3.5 / 12., - }, - { - lower: "-6", - upper: "-1.5", - value: 2. / 12., - }, - }, invariantCases...), - }, - { - text: "both positive and negative buckets with zero bucket", - h: &histogram.Histogram{ - Count: 24, - ZeroCount: 4, - ZeroThreshold: 0.001, - Sum: 100, // Does not matter. - Schema: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{2, 1, -2, 3}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, + Metric: labels.FromStrings("__name__", "some_metric_with_stale_marker"), + Floats: []promql.FPoint{ + {T: timestamp.FromTime(baseT), F: 0}, + {T: timestamp.FromTime(baseT.Add(time.Minute)), F: 1}, + {T: timestamp.FromTime(baseT.Add(3 * time.Minute)), F: 3}, + }, }, - NegativeBuckets: []int64{2, 1, -2, 3}, }, - subCases: append([]subCase{ - { - lower: "0", - upper: "+Inf", - value: 0.5, - }, - { - lower: "-Inf", - upper: "0", - value: 0.5, - }, - { - lower: "-0.001", - upper: "0", - value: 2. / 24, - }, - { - lower: "0", - upper: "0.001", - value: 2. / 24., - }, - { - lower: "-0.0005", - upper: "0.0005", - value: 2. / 24., - }, - { - lower: "0.001", - upper: "inf", - value: 10. / 24., - }, - { - lower: "-inf", - upper: "-0.001", - value: 10. / 24., - }, - { - lower: "1", - upper: "2", - value: 3. / 24., - }, - { - lower: "1.5", - upper: "2", - value: 1.5 / 24., - }, - { - lower: "1", - upper: "8", - value: 4. / 24., - }, - { - lower: "1", - upper: "6", - value: 3.5 / 24., - }, - { - lower: "1.5", - upper: "6", - value: 2. / 24., - }, - { - lower: "-2", - upper: "-1", - value: 3. / 24., - }, - { - lower: "-2", - upper: "-1.5", - value: 1.5 / 24., - }, - { - lower: "-8", - upper: "-1", - value: 4. / 24., - }, - { - lower: "-6", - upper: "-1", - value: 3.5 / 24., - }, - { - lower: "-6", - upper: "-1.5", - value: 2. / 24., - }, - }, invariantCases...), }, } - idx := int64(0) - for _, floatHisto := range []bool{true, false} { - for _, c := range cases { - t.Run(fmt.Sprintf("%s floatHistogram=%t", c.text, floatHisto), func(t *testing.T) { - engine := newTestEngine() - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - lbls := labels.FromStrings("__name__", seriesName) - ts := idx * int64(10*time.Minute/time.Millisecond) - app := storage.Appender(context.Background()) - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, c.h.ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, c.h, nil) - } - require.NoError(t, err) - require.NoError(t, app.Commit()) - - for j, sc := range c.subCases { - t.Run(fmt.Sprintf("%d %s %s", j, sc.lower, sc.upper), func(t *testing.T) { - queryString := fmt.Sprintf("histogram_fraction(%s, %s, %s)", sc.lower, sc.upper, seriesName) - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + q, err := engine.NewInstantQuery(context.Background(), storage, nil, testCase.expr, testCase.ts) + require.NoError(t, err) + defer q.Close() - require.Len(t, vector, 1) - require.Nil(t, vector[0].H) - if math.IsNaN(sc.value) { - require.True(t, math.IsNaN(vector[0].F)) - return - } - require.Equal(t, sc.value, vector[0].F) - }) - } - idx++ - }) - } + res := q.Exec(context.Background()) + require.NoError(t, res.Err) + testutil.RequireEqual(t, testCase.expected, res.Value) + }) } } @@ -4444,7 +3425,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { _, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42) require.NoError(t, err) for idx1, h := range c.histograms { - lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) + lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) // Since we mutate h later, we need to create a copy here. var err error if floatHisto { @@ -4466,7 +3447,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { } require.NoError(t, app.Commit()) - queryAndCheck := func(queryString string, ts int64, exp Vector) { + queryAndCheck := func(queryString string, ts int64, exp promql.Vector) { qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) @@ -4490,7 +3471,7 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { // sum(). queryString := fmt.Sprintf("sum(%s)", seriesName) - queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) queryString = `sum({idx="0"})` var annos annotations.Annotations @@ -4502,26 +3483,26 @@ func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { for idx := 1; idx < len(c.histograms); idx++ { queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx) } - queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) // count(). queryString = fmt.Sprintf("count(%s)", seriesName) - queryAndCheck(queryString, ts, []Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}}) // avg(). queryString = fmt.Sprintf("avg(%s)", seriesName) - queryAndCheck(queryString, ts, []Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) offset := int64(len(c.histograms) - 1) newTs := ts + offset*int64(time.Minute/time.Millisecond) // sum_over_time(). queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset) - queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}}) // avg_over_time(). queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset) - queryAndCheck(queryString, newTs, []Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) }) idx0++ } @@ -4716,7 +3697,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { ts := idx0 * int64(10*time.Minute/time.Millisecond) app := storage.Appender(context.Background()) for idx1, h := range c.histograms { - lbls := labels.FromStrings("__name__", seriesName, "idx", fmt.Sprintf("%d", idx1)) + lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) // Since we mutate h later, we need to create a copy here. var err error if floatHisto { @@ -4728,7 +3709,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { } require.NoError(t, app.Commit()) - queryAndCheck := func(queryString string, exp Vector) { + queryAndCheck := func(queryString string, exp promql.Vector) { qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) @@ -4756,7 +3737,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { for idx := 1; idx < len(c.histograms); idx++ { queryString += fmt.Sprintf(` - ignoring(idx) %s{idx="%d"}`, seriesName, idx) } - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) }) } idx0++ @@ -4886,7 +3867,7 @@ func TestNativeHistogram_MulDivOperator(t *testing.T) { require.NoError(t, err) require.NoError(t, app.Commit()) - queryAndCheck := func(queryString string, exp Vector) { + queryAndCheck := func(queryString string, exp promql.Vector) { qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) require.NoError(t, err) @@ -4901,27 +3882,27 @@ func TestNativeHistogram_MulDivOperator(t *testing.T) { // histogram * scalar. queryString := fmt.Sprintf(`%s * %f`, seriesName, c.scalar) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) // scalar * histogram. queryString = fmt.Sprintf(`%f * %s`, c.scalar, seriesName) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) // histogram * float. queryString = fmt.Sprintf(`%s * %s`, seriesName, floatSeriesName) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) // float * histogram. queryString = fmt.Sprintf(`%s * %s`, floatSeriesName, seriesName) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) // histogram / scalar. queryString = fmt.Sprintf(`%s / %f`, seriesName, c.scalar) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) // histogram / float. queryString = fmt.Sprintf(`%s / %s`, seriesName, floatSeriesName) - queryAndCheck(queryString, []Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) + queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) }) idx0++ } @@ -4991,20 +3972,17 @@ metric 0 1 2 for _, c := range cases { c := c t.Run(c.name, func(t *testing.T) { - engine := newTestEngine() - storage := LoadedStorage(t, load) + engine := promqltest.NewTestEngine(false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery) + storage := promqltest.LoadedStorage(t, load) t.Cleanup(func() { storage.Close() }) - if c.engineLookback != 0 { - engine.lookbackDelta = c.engineLookback - } - opts := NewPrometheusQueryOpts(false, c.queryLookback) + opts := promql.NewPrometheusQueryOpts(false, c.queryLookback) qry, err := engine.NewInstantQuery(context.Background(), storage, opts, query, c.ts) require.NoError(t, err) res := qry.Exec(context.Background()) require.NoError(t, res.Err) - vec, ok := res.Value.(Vector) + vec, ok := res.Value.(promql.Vector) require.True(t, ok) if c.expectSamples { require.NotEmpty(t, vec) @@ -5014,3 +3992,9 @@ metric 0 1 2 }) } } + +func makeInt64Pointer(val int64) *int64 { + valp := new(int64) + *valp = val + return valp +} diff --git a/promql/functions.go b/promql/functions.go index 2e15a14672d..9b3be22874b 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -948,15 +948,6 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe return enh.Out, nil } -func kahanSum(samples []float64) float64 { - var sum, c float64 - - for _, v := range samples { - sum, c = kahanSumInc(v, sum, c) - } - return sum + c -} - func kahanSumInc(inc, sum, c float64) (newSum, newC float64) { t := sum + inc // Using Neumaier improvement, swap if next term larger than sum. diff --git a/promql/functions_test.go b/promql/functions_test.go index 6d5c3784ea6..aef59c83795 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -11,11 +11,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promql_test import ( "context" - "math" "testing" "time" @@ -23,6 +22,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/util/teststorage" ) @@ -33,13 +33,13 @@ func TestDeriv(t *testing.T) { // so we test it by hand. storage := teststorage.New(t) defer storage.Close() - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10000, Timeout: 10 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) a := storage.Appender(context.Background()) @@ -70,19 +70,13 @@ func TestDeriv(t *testing.T) { func TestFunctionList(t *testing.T) { // Test that Functions and parser.Functions list the same functions. - for i := range FunctionCalls { + for i := range promql.FunctionCalls { _, ok := parser.Functions[i] require.True(t, ok, "function %s exists in promql package, but not in parser package", i) } for i := range parser.Functions { - _, ok := FunctionCalls[i] + _, ok := promql.FunctionCalls[i] require.True(t, ok, "function %s exists in parser package, but not in promql package", i) } } - -func TestKahanSum(t *testing.T) { - vals := []float64{1.0, math.Pow(10, 100), 1.0, -1 * math.Pow(10, 100)} - expected := 2.0 - require.Equal(t, expected, kahanSum(vals)) -} diff --git a/promql/parser/printer.go b/promql/parser/printer.go index ff171f2152a..f3bdefdeb1f 100644 --- a/promql/parser/printer.go +++ b/promql/parser/printer.go @@ -204,8 +204,8 @@ func (node *VectorSelector) String() string { labelStrings = make([]string, 0, len(node.LabelMatchers)-1) } for _, matcher := range node.LabelMatchers { - // Only include the __name__ label if its equality matching and matches the name. - if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual && matcher.Value == node.Name { + // Only include the __name__ label if its equality matching and matches the name, but don't skip if it's an explicit empty name matcher. + if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual && matcher.Value == node.Name && matcher.Value != "" { continue } labelStrings = append(labelStrings, matcher.String()) diff --git a/promql/parser/printer_test.go b/promql/parser/printer_test.go index a044b6969c6..f224d841d0d 100644 --- a/promql/parser/printer_test.go +++ b/promql/parser/printer_test.go @@ -135,6 +135,19 @@ func TestExprString(t *testing.T) { { in: `a[1m] @ end()`, }, + { + in: `{__name__="",a="x"}`, + }, + { + in: `{"a.b"="c"}`, + }, + { + in: `{"0"="1"}`, + }, + { + in: `{"_0"="1"}`, + out: `{_0="1"}`, + }, } for _, test := range inputs { @@ -216,6 +229,16 @@ func TestVectorSelector_String(t *testing.T) { }, expected: `{__name__="foobar"}`, }, + { + name: "empty name matcher", + vs: VectorSelector{ + LabelMatchers: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, ""), + labels.MustNewMatcher(labels.MatchEqual, "a", "x"), + }, + }, + expected: `{__name__="",a="x"}`, + }, } { t.Run(tc.name, func(t *testing.T) { require.Equal(t, tc.expected, tc.vs.String()) diff --git a/promql/promql_test.go b/promql/promql_test.go index 05821b1c11f..7bafc02e3bc 100644 --- a/promql/promql_test.go +++ b/promql/promql_test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promql_test import ( "context" @@ -22,37 +22,30 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/util/teststorage" ) -func newTestEngine() *Engine { - return NewEngine(EngineOpts{ - Logger: nil, - Reg: nil, - MaxSamples: 10000, - Timeout: 100 * time.Second, - NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(1 * time.Minute) }, - EnableAtModifier: true, - EnableNegativeOffset: true, - EnablePerStepStats: true, - }) +func newTestEngine() *promql.Engine { + return promqltest.NewTestEngine(false, 0, promqltest.DefaultMaxSamplesPerQuery) } func TestEvaluations(t *testing.T) { - RunBuiltinTests(t, newTestEngine()) + promqltest.RunBuiltinTests(t, newTestEngine()) } // Run a lot of queries at the same time, to check for race conditions. func TestConcurrentRangeQueries(t *testing.T) { stor := teststorage.New(t) defer stor.Close() - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 50000000, Timeout: 100 * time.Second, } - engine := NewEngine(opts) + engine := promql.NewEngine(opts) const interval = 10000 // 10s interval. // A day of data plus 10k steps. diff --git a/promql/test.go b/promql/promqltest/test.go similarity index 89% rename from promql/test.go rename to promql/promqltest/test.go index 1cdfe8d311c..1affd91f637 100644 --- a/promql/test.go +++ b/promql/promqltest/test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promqltest import ( "context" @@ -19,7 +19,6 @@ import ( "errors" "fmt" "io/fs" - "math" "strconv" "strings" "testing" @@ -33,16 +32,16 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/almost" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" ) var ( - minNormal = math.Float64frombits(0x0010000000000000) // The smallest positive normal value of type float64. - patSpace = regexp.MustCompile("[\t ]+") patLoad = regexp.MustCompile(`^load\s+(.+?)$`) patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) @@ -50,7 +49,8 @@ var ( ) const ( - defaultEpsilon = 0.000001 // Relative error allowed for sample values. + defaultEpsilon = 0.000001 // Relative error allowed for sample values. + DefaultMaxSamplesPerQuery = 10000 ) var testStartTime = time.Unix(0, 0).UTC() @@ -72,8 +72,22 @@ func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage { return test.storage } +func NewTestEngine(enablePerStepStats bool, lookbackDelta time.Duration, maxSamples int) *promql.Engine { + return promql.NewEngine(promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: maxSamples, + Timeout: 100 * time.Second, + NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(1 * time.Minute) }, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: enablePerStepStats, + LookbackDelta: lookbackDelta, + }) +} + // RunBuiltinTests runs an acceptance test suite against the provided engine. -func RunBuiltinTests(t *testing.T, engine QueryEngine) { +func RunBuiltinTests(t *testing.T, engine promql.QueryEngine) { t.Cleanup(func() { parser.EnableExperimentalFunctions = false }) parser.EnableExperimentalFunctions = true @@ -90,11 +104,11 @@ func RunBuiltinTests(t *testing.T, engine QueryEngine) { } // RunTest parses and runs the test against the provided engine. -func RunTest(t testutil.T, input string, engine QueryEngine) { +func RunTest(t testutil.T, input string, engine promql.QueryEngine) { require.NoError(t, runTest(t, input, engine)) } -func runTest(t testutil.T, input string, engine QueryEngine) error { +func runTest(t testutil.T, input string, engine promql.QueryEngine) error { test, err := newTest(t, input) // Why do this before checking err? newTest() can create the test storage and then return an error, @@ -368,7 +382,7 @@ func (*evalCmd) testCmd() {} type loadCmd struct { gap time.Duration metrics map[uint64]labels.Labels - defs map[uint64][]Sample + defs map[uint64][]promql.Sample exemplars map[uint64][]exemplar.Exemplar } @@ -376,7 +390,7 @@ func newLoadCmd(gap time.Duration) *loadCmd { return &loadCmd{ gap: gap, metrics: map[uint64]labels.Labels{}, - defs: map[uint64][]Sample{}, + defs: map[uint64][]promql.Sample{}, exemplars: map[uint64][]exemplar.Exemplar{}, } } @@ -389,11 +403,11 @@ func (cmd loadCmd) String() string { func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) { h := m.Hash() - samples := make([]Sample, 0, len(vals)) + samples := make([]promql.Sample, 0, len(vals)) ts := testStartTime for _, v := range vals { if !v.Omitted { - samples = append(samples, Sample{ + samples = append(samples, promql.Sample{ T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond), F: v.Value, H: v.Histogram, @@ -419,7 +433,7 @@ func (cmd *loadCmd) append(a storage.Appender) error { return nil } -func appendSample(a storage.Appender, s Sample, m labels.Labels) error { +func appendSample(a storage.Appender, s promql.Sample, m labels.Labels) error { if s.H != nil { if _, err := a.AppendHistogram(0, m, s.T, nil, s.H); err != nil { return err @@ -503,7 +517,7 @@ func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.Sequenc // compareResult compares the result value with the defined expectation. func (ev *evalCmd) compareResult(result parser.Value) error { switch val := result.(type) { - case Matrix: + case promql.Matrix: if ev.ordered { return fmt.Errorf("expected ordered result, but query returned a matrix") } @@ -521,8 +535,8 @@ func (ev *evalCmd) compareResult(result parser.Value) error { seen[hash] = true exp := ev.expected[hash] - var expectedFloats []FPoint - var expectedHistograms []HPoint + var expectedFloats []promql.FPoint + var expectedHistograms []promql.HPoint for i, e := range exp.vals { ts := ev.start.Add(time.Duration(i) * ev.step) @@ -534,9 +548,9 @@ func (ev *evalCmd) compareResult(result parser.Value) error { t := ts.UnixNano() / int64(time.Millisecond/time.Nanosecond) if e.Histogram != nil { - expectedHistograms = append(expectedHistograms, HPoint{T: t, H: e.Histogram}) + expectedHistograms = append(expectedHistograms, promql.HPoint{T: t, H: e.Histogram}) } else if !e.Omitted { - expectedFloats = append(expectedFloats, FPoint{T: t, F: e.Value}) + expectedFloats = append(expectedFloats, promql.FPoint{T: t, F: e.Value}) } } @@ -551,7 +565,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { return fmt.Errorf("expected float value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s)) } - if !almostEqual(actual.F, expected.F, defaultEpsilon) { + if !almost.Equal(actual.F, expected.F, defaultEpsilon) { return fmt.Errorf("expected float value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.F, actual.F, formatSeriesResult(s)) } } @@ -575,7 +589,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { } } - case Vector: + case promql.Vector: seen := map[uint64]bool{} for pos, v := range val { fp := v.Metric.Hash() @@ -601,7 +615,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { if expH != nil && !expH.Compact(0).Equals(v.H) { return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H)) } - if !almostEqual(exp0.Value, v.F, defaultEpsilon) { + if !almost.Equal(exp0.Value, v.F, defaultEpsilon) { return fmt.Errorf("expected %v for %s but got %v", exp0.Value, v.Metric, v.F) } @@ -613,7 +627,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { } } - case Scalar: + case promql.Scalar: if len(ev.expected) != 1 { return fmt.Errorf("expected vector result, but got scalar %s", val.String()) } @@ -621,7 +635,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { if exp0.Histogram != nil { return fmt.Errorf("expected Histogram %v but got scalar %s", exp0.Histogram.TestExpression(), val.String()) } - if !almostEqual(exp0.Value, val.V, defaultEpsilon) { + if !almost.Equal(exp0.Value, val.V, defaultEpsilon) { return fmt.Errorf("expected Scalar %v but got %v", val.V, exp0.Value) } @@ -631,7 +645,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { return nil } -func formatSeriesResult(s Series) string { +func formatSeriesResult(s promql.Series) string { floatPlural := "s" histogramPlural := "s" @@ -678,8 +692,7 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa // If there is a subquery, then the selectors inside it don't get the @ timestamp. // If any selector already has the @ timestamp set, then it is untouched. parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { - _, _, subqTs := subqueryTimes(path) - if subqTs != nil { + if hasAtModifier(path) { // There is a subquery with timestamp in the path, // hence don't change any timestamps further. return nil @@ -701,7 +714,7 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa } case *parser.Call: - _, ok := AtModifierUnsafeFunctions[n.Func.Name] + _, ok := promql.AtModifierUnsafeFunctions[n.Func.Name] containsNonStepInvariant = containsNonStepInvariant || ok } return nil @@ -729,8 +742,19 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa return testCases, nil } +func hasAtModifier(path []parser.Node) bool { + for _, node := range path { + if n, ok := node.(*parser.SubqueryExpr); ok { + if n.Timestamp != nil { + return true + } + } + } + return false +} + // exec processes a single step of the test. -func (t *test) exec(tc testCommand, engine QueryEngine) error { +func (t *test) exec(tc testCommand, engine promql.QueryEngine) error { switch cmd := tc.(type) { case *clearCmd: t.clear() @@ -755,7 +779,7 @@ func (t *test) exec(tc testCommand, engine QueryEngine) error { return nil } -func (t *test) execEval(cmd *evalCmd, engine QueryEngine) error { +func (t *test) execEval(cmd *evalCmd, engine promql.QueryEngine) error { if cmd.isRange { return t.execRangeEval(cmd, engine) } @@ -763,7 +787,7 @@ func (t *test) execEval(cmd *evalCmd, engine QueryEngine) error { return t.execInstantEval(cmd, engine) } -func (t *test) execRangeEval(cmd *evalCmd, engine QueryEngine) error { +func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error { q, err := engine.NewRangeQuery(t.context, t.storage, nil, cmd.expr, cmd.start, cmd.end, cmd.step) if err != nil { return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err) @@ -788,7 +812,7 @@ func (t *test) execRangeEval(cmd *evalCmd, engine QueryEngine) error { return nil } -func (t *test) execInstantEval(cmd *evalCmd, engine QueryEngine) error { +func (t *test) execInstantEval(cmd *evalCmd, engine promql.QueryEngine) error { queries, err := atModifierTestCases(cmd.expr, cmd.start) if err != nil { return err @@ -830,29 +854,29 @@ func (t *test) execInstantEval(cmd *evalCmd, engine QueryEngine) error { // Range queries are always sorted by labels, so skip this test case that expects results in a particular order. continue } - mat := rangeRes.Value.(Matrix) + mat := rangeRes.Value.(promql.Matrix) if err := assertMatrixSorted(mat); err != nil { return err } - vec := make(Vector, 0, len(mat)) + vec := make(promql.Vector, 0, len(mat)) for _, series := range mat { // We expect either Floats or Histograms. for _, point := range series.Floats { if point.T == timeMilliseconds(iq.evalTime) { - vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F}) + vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, F: point.F}) break } } for _, point := range series.Histograms { if point.T == timeMilliseconds(iq.evalTime) { - vec = append(vec, Sample{Metric: series.Metric, T: point.T, H: point.H}) + vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, H: point.H}) break } } } - if _, ok := res.Value.(Scalar); ok { - err = cmd.compareResult(Scalar{V: vec[0].F}) + if _, ok := res.Value.(promql.Scalar); ok { + err = cmd.compareResult(promql.Scalar{V: vec[0].F}) } else { err = cmd.compareResult(vec) } @@ -864,7 +888,7 @@ func (t *test) execInstantEval(cmd *evalCmd, engine QueryEngine) error { return nil } -func assertMatrixSorted(m Matrix) error { +func assertMatrixSorted(m promql.Matrix) error { if len(m) <= 1 { return nil } @@ -894,29 +918,6 @@ func (t *test) clear() { t.context, t.cancelCtx = context.WithCancel(context.Background()) } -// almostEqual returns true if a and b differ by less than their sum -// multiplied by epsilon. -func almostEqual(a, b, epsilon float64) bool { - // NaN has no equality but for testing we still want to know whether both values - // are NaN. - if math.IsNaN(a) && math.IsNaN(b) { - return true - } - - // Cf. http://floating-point-gui.de/errors/comparison/ - if a == b { - return true - } - - absSum := math.Abs(a) + math.Abs(b) - diff := math.Abs(a - b) - - if a == 0 || b == 0 || absSum < minNormal { - return diff < epsilon*minNormal - } - return diff/math.Min(absSum, math.MaxFloat64) < epsilon -} - func parseNumber(s string) (float64, error) { n, err := strconv.ParseInt(s, 0, 64) f := float64(n) @@ -937,7 +938,7 @@ type LazyLoader struct { storage storage.Storage SubqueryInterval time.Duration - queryEngine *Engine + queryEngine *promql.Engine context context.Context cancelCtx context.CancelFunc @@ -1004,7 +1005,7 @@ func (ll *LazyLoader) clear() error { return err } - opts := EngineOpts{ + opts := promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: 10000, @@ -1014,7 +1015,7 @@ func (ll *LazyLoader) clear() error { EnableNegativeOffset: ll.opts.EnableNegativeOffset, } - ll.queryEngine = NewEngine(opts) + ll.queryEngine = promql.NewEngine(opts) ll.context, ll.cancelCtx = context.WithCancel(context.Background()) return nil } @@ -1048,7 +1049,7 @@ func (ll *LazyLoader) WithSamplesTill(ts time.Time, fn func(error)) { } // QueryEngine returns the LazyLoader's query engine. -func (ll *LazyLoader) QueryEngine() *Engine { +func (ll *LazyLoader) QueryEngine() *promql.Engine { return ll.queryEngine } @@ -1074,3 +1075,17 @@ func (ll *LazyLoader) Close() error { ll.cancelCtx() return ll.storage.Close() } + +func makeInt64Pointer(val int64) *int64 { + valp := new(int64) + *valp = val + return valp +} + +func timeMilliseconds(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +func durationMilliseconds(d time.Duration) int64 { + return int64(d / (time.Millisecond / time.Nanosecond)) +} diff --git a/promql/test_test.go b/promql/promqltest/test_test.go similarity index 88% rename from promql/test_test.go rename to promql/promqltest/test_test.go index a5b24ac6986..f6fe3870716 100644 --- a/promql/test_test.go +++ b/promql/promqltest/test_test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promqltest import ( "math" @@ -21,14 +21,15 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/tsdb/chunkenc" ) func TestLazyLoader_WithSamplesTill(t *testing.T) { type testCase struct { ts time.Time - series []Series // Each series is checked separately. Need not mention all series here. - checkOnlyError bool // If this is true, series is not checked. + series []promql.Series // Each series is checked separately. Need not mention all series here. + checkOnlyError bool // If this is true, series is not checked. } cases := []struct { @@ -44,33 +45,33 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) { testCases: []testCase{ { ts: time.Unix(40, 0), - series: []Series{ + series: []promql.Series{ { Metric: labels.FromStrings("__name__", "metric1"), - Floats: []FPoint{ - {0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5}, + Floats: []promql.FPoint{ + {T: 0, F: 1}, {T: 10000, F: 2}, {T: 20000, F: 3}, {T: 30000, F: 4}, {T: 40000, F: 5}, }, }, }, }, { ts: time.Unix(10, 0), - series: []Series{ + series: []promql.Series{ { Metric: labels.FromStrings("__name__", "metric1"), - Floats: []FPoint{ - {0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5}, + Floats: []promql.FPoint{ + {T: 0, F: 1}, {T: 10000, F: 2}, {T: 20000, F: 3}, {T: 30000, F: 4}, {T: 40000, F: 5}, }, }, }, }, { ts: time.Unix(60, 0), - series: []Series{ + series: []promql.Series{ { Metric: labels.FromStrings("__name__", "metric1"), - Floats: []FPoint{ - {0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5}, {50000, 6}, {60000, 7}, + Floats: []promql.FPoint{ + {T: 0, F: 1}, {T: 10000, F: 2}, {T: 20000, F: 3}, {T: 30000, F: 4}, {T: 40000, F: 5}, {T: 50000, F: 6}, {T: 60000, F: 7}, }, }, }, @@ -86,17 +87,17 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) { testCases: []testCase{ { // Adds all samples of metric1. ts: time.Unix(70, 0), - series: []Series{ + series: []promql.Series{ { Metric: labels.FromStrings("__name__", "metric1"), - Floats: []FPoint{ - {0, 1}, {10000, 1}, {20000, 1}, {30000, 1}, {40000, 1}, {50000, 1}, + Floats: []promql.FPoint{ + {T: 0, F: 1}, {T: 10000, F: 1}, {T: 20000, F: 1}, {T: 30000, F: 1}, {T: 40000, F: 1}, {T: 50000, F: 1}, }, }, { Metric: labels.FromStrings("__name__", "metric2"), - Floats: []FPoint{ - {0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5}, {50000, 6}, {60000, 7}, {70000, 8}, + Floats: []promql.FPoint{ + {T: 0, F: 1}, {T: 10000, F: 2}, {T: 20000, F: 3}, {T: 30000, F: 4}, {T: 40000, F: 5}, {T: 50000, F: 6}, {T: 60000, F: 7}, {T: 70000, F: 8}, }, }, }, @@ -140,13 +141,13 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) { require.False(t, ss.Next(), "Expecting only 1 series") // Convert `storage.Series` to `promql.Series`. - got := Series{ + got := promql.Series{ Metric: storageSeries.Labels(), } it := storageSeries.Iterator(nil) for it.Next() == chunkenc.ValFloat { t, v := it.At() - got.Floats = append(got.Floats, FPoint{T: t, F: v}) + got.Floats = append(got.Floats, promql.FPoint{T: t, F: v}) } require.NoError(t, it.Err()) @@ -450,7 +451,7 @@ eval range from 0 to 5m step 5m testmetric for name, testCase := range testCases { t.Run(name, func(t *testing.T) { - err := runTest(t, testCase.input, newTestEngine()) + err := runTest(t, testCase.input, NewTestEngine(false, 0, DefaultMaxSamplesPerQuery)) if testCase.expectedError == "" { require.NoError(t, err) @@ -463,42 +464,42 @@ eval range from 0 to 5m step 5m testmetric func TestAssertMatrixSorted(t *testing.T) { testCases := map[string]struct { - matrix Matrix + matrix promql.Matrix expectedError string }{ "empty matrix": { - matrix: Matrix{}, + matrix: promql.Matrix{}, }, "matrix with one series": { - matrix: Matrix{ - Series{Metric: labels.FromStrings("the_label", "value_1")}, + matrix: promql.Matrix{ + promql.Series{Metric: labels.FromStrings("the_label", "value_1")}, }, }, "matrix with two series, series in sorted order": { - matrix: Matrix{ - Series{Metric: labels.FromStrings("the_label", "value_1")}, - Series{Metric: labels.FromStrings("the_label", "value_2")}, + matrix: promql.Matrix{ + promql.Series{Metric: labels.FromStrings("the_label", "value_1")}, + promql.Series{Metric: labels.FromStrings("the_label", "value_2")}, }, }, "matrix with two series, series in reverse order": { - matrix: Matrix{ - Series{Metric: labels.FromStrings("the_label", "value_2")}, - Series{Metric: labels.FromStrings("the_label", "value_1")}, + matrix: promql.Matrix{ + promql.Series{Metric: labels.FromStrings("the_label", "value_2")}, + promql.Series{Metric: labels.FromStrings("the_label", "value_1")}, }, expectedError: `matrix results should always be sorted by labels, but matrix is not sorted: series at index 1 with labels {the_label="value_1"} sorts before series at index 0 with labels {the_label="value_2"}`, }, "matrix with three series, series in sorted order": { - matrix: Matrix{ - Series{Metric: labels.FromStrings("the_label", "value_1")}, - Series{Metric: labels.FromStrings("the_label", "value_2")}, - Series{Metric: labels.FromStrings("the_label", "value_3")}, + matrix: promql.Matrix{ + promql.Series{Metric: labels.FromStrings("the_label", "value_1")}, + promql.Series{Metric: labels.FromStrings("the_label", "value_2")}, + promql.Series{Metric: labels.FromStrings("the_label", "value_3")}, }, }, "matrix with three series, series not in sorted order": { - matrix: Matrix{ - Series{Metric: labels.FromStrings("the_label", "value_1")}, - Series{Metric: labels.FromStrings("the_label", "value_3")}, - Series{Metric: labels.FromStrings("the_label", "value_2")}, + matrix: promql.Matrix{ + promql.Series{Metric: labels.FromStrings("the_label", "value_1")}, + promql.Series{Metric: labels.FromStrings("the_label", "value_3")}, + promql.Series{Metric: labels.FromStrings("the_label", "value_2")}, }, expectedError: `matrix results should always be sorted by labels, but matrix is not sorted: series at index 2 with labels {the_label="value_2"} sorts before series at index 1 with labels {the_label="value_3"}`, }, diff --git a/promql/testdata/aggregators.test b/promql/promqltest/testdata/aggregators.test similarity index 100% rename from promql/testdata/aggregators.test rename to promql/promqltest/testdata/aggregators.test diff --git a/promql/testdata/at_modifier.test b/promql/promqltest/testdata/at_modifier.test similarity index 100% rename from promql/testdata/at_modifier.test rename to promql/promqltest/testdata/at_modifier.test diff --git a/promql/testdata/collision.test b/promql/promqltest/testdata/collision.test similarity index 100% rename from promql/testdata/collision.test rename to promql/promqltest/testdata/collision.test diff --git a/promql/testdata/functions.test b/promql/promqltest/testdata/functions.test similarity index 99% rename from promql/testdata/functions.test rename to promql/promqltest/testdata/functions.test index e01c75a7f61..2c198374acb 100644 --- a/promql/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -764,6 +764,14 @@ eval instant at 1m avg_over_time(metric10[1m]) eval instant at 1m sum_over_time(metric10[1m])/count_over_time(metric10[1m]) {} 0 +# Test if very big intermediate values cause loss of detail. +clear +load 10s + metric 1 1e100 1 -1e100 + +eval instant at 1m sum_over_time(metric[1m]) + {} 2 + # Tests for stddev_over_time and stdvar_over_time. clear load 10s diff --git a/promql/testdata/histograms.test b/promql/promqltest/testdata/histograms.test similarity index 100% rename from promql/testdata/histograms.test rename to promql/promqltest/testdata/histograms.test diff --git a/promql/testdata/literals.test b/promql/promqltest/testdata/literals.test similarity index 100% rename from promql/testdata/literals.test rename to promql/promqltest/testdata/literals.test diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test new file mode 100644 index 00000000000..37818e4f888 --- /dev/null +++ b/promql/promqltest/testdata/native_histograms.test @@ -0,0 +1,716 @@ +# Minimal valid case: an empty histogram. +load 5m + empty_histogram {{}} + +eval instant at 5m empty_histogram + {__name__="empty_histogram"} {{}} + +eval instant at 5m histogram_count(empty_histogram) + {} 0 + +eval instant at 5m histogram_sum(empty_histogram) + {} 0 + +eval instant at 5m histogram_avg(empty_histogram) + {} NaN + +eval instant at 5m histogram_fraction(-Inf, +Inf, empty_histogram) + {} NaN + +eval instant at 5m histogram_fraction(0, 8, empty_histogram) + {} NaN + + + +# buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4). +load 5m + single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}} + +# histogram_count extracts the count property from the histogram. +eval instant at 5m histogram_count(single_histogram) + {} 4 + +# histogram_sum extracts the sum property from the histogram. +eval instant at 5m histogram_sum(single_histogram) + {} 5 + +# histogram_avg calculates the average from sum and count properties. +eval instant at 5m histogram_avg(single_histogram) + {} 1.25 + +# We expect half of the values to fall in the range 1 < x <= 2. +eval instant at 5m histogram_fraction(1, 2, single_histogram) + {} 0.5 + +# We expect all values to fall in the range 0 < x <= 8. +eval instant at 5m histogram_fraction(0, 8, single_histogram) + {} 1 + +# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2. +eval instant at 5m histogram_quantile(0.5, single_histogram) + {} 1.5 + + + +# Repeat the same histogram 10 times. +load 5m + multi_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}}x10 + +eval instant at 5m histogram_count(multi_histogram) + {} 4 + +eval instant at 5m histogram_sum(multi_histogram) + {} 5 + +eval instant at 5m histogram_avg(multi_histogram) + {} 1.25 + +eval instant at 5m histogram_fraction(1, 2, multi_histogram) + {} 0.5 + +eval instant at 5m histogram_quantile(0.5, multi_histogram) + {} 1.5 + + +# Each entry should look the same as the first. +eval instant at 50m histogram_count(multi_histogram) + {} 4 + +eval instant at 50m histogram_sum(multi_histogram) + {} 5 + +eval instant at 50m histogram_avg(multi_histogram) + {} 1.25 + +eval instant at 50m histogram_fraction(1, 2, multi_histogram) + {} 0.5 + +eval instant at 50m histogram_quantile(0.5, multi_histogram) + {} 1.5 + + + +# Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket +# with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket +# positions for upper limits <1 (tending toward zero), where offset:-1 is the bucket to the left of offset:0. +load 5m + incr_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{sum:2 count:1 buckets:[1] offset:1}}x10 + +eval instant at 5m histogram_count(incr_histogram) + {} 5 + +eval instant at 5m histogram_sum(incr_histogram) + {} 6 + +eval instant at 5m histogram_avg(incr_histogram) + {} 1.2 + +# We expect 3/5ths of the values to fall in the range 1 < x <= 2. +eval instant at 5m histogram_fraction(1, 2, incr_histogram) + {} 0.6 + +eval instant at 5m histogram_quantile(0.5, incr_histogram) + {} 1.5 + + +eval instant at 50m incr_histogram + {__name__="incr_histogram"} {{count:14 sum:24 buckets:[1 12 1]}} + +eval instant at 50m histogram_count(incr_histogram) + {} 14 + +eval instant at 50m histogram_sum(incr_histogram) + {} 24 + +eval instant at 50m histogram_avg(incr_histogram) + {} 1.7142857142857142 + +# We expect 12/14ths of the values to fall in the range 1 < x <= 2. +eval instant at 50m histogram_fraction(1, 2, incr_histogram) + {} 0.8571428571428571 + +eval instant at 50m histogram_quantile(0.5, incr_histogram) + {} 1.5 + +# Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum. +eval instant at 50m rate(incr_histogram[5m]) + {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} + +# Calculate the 50th percentile of observations over the last 10m. +eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m])) + {} 1.5 + + + +# Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.: +# 0: 1 2 4 8 16 32 64 (higher resolution) +# -1: 1 4 16 64 (lower resolution) +# +# Histograms can be merged as long as the histogram to the right is same resolution or higher. +load 5m + low_res_histogram {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}+{{schema:0 sum:4 count:4 buckets:[2 2] offset:1}}x1 + +eval instant at 5m low_res_histogram + {__name__="low_res_histogram"} {{schema:-1 count:5 sum:8 offset:1 buckets:[5]}} + +eval instant at 5m histogram_count(low_res_histogram) + {} 5 + +eval instant at 5m histogram_sum(low_res_histogram) + {} 8 + +eval instant at 5m histogram_avg(low_res_histogram) + {} 1.6 + +# We expect all values to fall into the lower-resolution bucket with the range 1 < x <= 4. +eval instant at 5m histogram_fraction(1, 4, low_res_histogram) + {} 1 + + + +# z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range +# 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket. +load 5m + single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}} + +eval instant at 5m histogram_count(single_zero_histogram) + {} 1 + +eval instant at 5m histogram_sum(single_zero_histogram) + {} 0.25 + +eval instant at 5m histogram_avg(single_zero_histogram) + {} 0.25 + +# When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally +# distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the +# entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5. +eval instant at 5m histogram_fraction(-0.5, 0.5, single_zero_histogram) + {} 1 + +# Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5. +eval instant at 5m histogram_quantile(0.5, single_zero_histogram) + {} 0 + + + +# Let's turn single_histogram upside-down. +load 5m + negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}} + +eval instant at 5m histogram_count(negative_histogram) + {} 4 + +eval instant at 5m histogram_sum(negative_histogram) + {} -5 + +eval instant at 5m histogram_avg(negative_histogram) + {} -1.25 + +# We expect half of the values to fall in the range -2 < x <= -1. +eval instant at 5m histogram_fraction(-2, -1, negative_histogram) + {} 0.5 + +eval instant at 5m histogram_quantile(0.5, negative_histogram) + {} -1.5 + + + +# Two histogram samples. +load 5m + two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}} + +# We expect to see the newest sample. +eval instant at 10m histogram_count(two_samples_histogram) + {} 4 + +eval instant at 10m histogram_sum(two_samples_histogram) + {} -4 + +eval instant at 10m histogram_avg(two_samples_histogram) + {} -1 + +eval instant at 10m histogram_fraction(-2, -1, two_samples_histogram) + {} 0.5 + +eval instant at 10m histogram_quantile(0.5, two_samples_histogram) + {} -1.5 + + + +# Add two histograms with negated data. +load 5m + balanced_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}}x1 + +eval instant at 5m histogram_count(balanced_histogram) + {} 8 + +eval instant at 5m histogram_sum(balanced_histogram) + {} 0 + +eval instant at 5m histogram_avg(balanced_histogram) + {} 0 + +eval instant at 5m histogram_fraction(0, 4, balanced_histogram) + {} 0.5 + +# If the quantile happens to be located in a span of empty buckets, the actually returned value is the lower bound of +# the first populated bucket after the span of empty buckets. +eval instant at 5m histogram_quantile(0.5, balanced_histogram) + {} 0.5 + +# Add histogram to test sum(last_over_time) regression +load 5m + incr_sum_histogram{number="1"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:1 count:1 buckets:[1]}}x10 + incr_sum_histogram{number="2"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:2 count:1 buckets:[1]}}x10 + +eval instant at 50m histogram_sum(sum(incr_sum_histogram)) + {} 30 + +eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m]))) + {} 30 + +# Apply rate function to histogram. +load 15s + histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100 + +eval instant at 5m rate(histogram_rate[45s]) + {} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}} + +eval range from 5m to 5m30s step 30s rate(histogram_rate[45s]) + {} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1 + +# Apply count and sum function to histogram. +load 10m + histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_count(histogram_count_sum_2) + {} 24 + +eval instant at 10m histogram_sum(histogram_count_sum_2) + {} 100 + +# Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res). +load 10m + histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_1) + {} 1.0787993180043811 + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1) + {} 1.163807968526718 + +# Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res). +load 10m + histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_2) + {} 0.0048960313898237465 + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2) + {} 2.3971123370139447e-05 + +# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}. +load 10m + histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3) + {} 42.947236400258 + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3) + {} 1844.4651144196398 + +# Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}. +load 10m + histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_4) + {} 27556.344499842 + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4) + {} 759352122.1939945 + +# Apply stddev and stdvar function to histogram with {-10x10}. +load 10m + histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_5) + {} 1.3137084989848 + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5) + {} 1.725830020304794 + +# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}. +load 10m + histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_6) + {} NaN + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6) + {} NaN + +# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}. +load 10m + histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 + +eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7) + {} NaN + +eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7) + {} NaN + +# Apply quantile function to histogram with all positive buckets with zero bucket. +load 10m + histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_quantile(1.001, histogram_quantile_1) + {} Inf + +eval instant at 10m histogram_quantile(1, histogram_quantile_1) + {} 16 + +eval instant at 10m histogram_quantile(0.99, histogram_quantile_1) + {} 15.759999999999998 + +eval instant at 10m histogram_quantile(0.9, histogram_quantile_1) + {} 13.600000000000001 + +eval instant at 10m histogram_quantile(0.6, histogram_quantile_1) + {} 4.799999999999997 + +eval instant at 10m histogram_quantile(0.5, histogram_quantile_1) + {} 1.6666666666666665 + +eval instant at 10m histogram_quantile(0.1, histogram_quantile_1) + {} 0.0006000000000000001 + +eval instant at 10m histogram_quantile(0, histogram_quantile_1) + {} 0 + +eval instant at 10m histogram_quantile(-1, histogram_quantile_1) + {} -Inf + +# Apply quantile function to histogram with all negative buckets with zero bucket. +load 10m + histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_quantile(1.001, histogram_quantile_2) + {} Inf + +eval instant at 10m histogram_quantile(1, histogram_quantile_2) + {} 0 + +eval instant at 10m histogram_quantile(0.99, histogram_quantile_2) + {} -6.000000000000048e-05 + +eval instant at 10m histogram_quantile(0.9, histogram_quantile_2) + {} -0.0005999999999999996 + +eval instant at 10m histogram_quantile(0.5, histogram_quantile_2) + {} -1.6666666666666667 + +eval instant at 10m histogram_quantile(0.1, histogram_quantile_2) + {} -13.6 + +eval instant at 10m histogram_quantile(0, histogram_quantile_2) + {} -16 + +eval instant at 10m histogram_quantile(-1, histogram_quantile_2) + {} -Inf + +# Apply quantile function to histogram with both positive and negative buckets with zero bucket. +load 10m + histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_quantile(1.001, histogram_quantile_3) + {} Inf + +eval instant at 10m histogram_quantile(1, histogram_quantile_3) + {} 16 + +eval instant at 10m histogram_quantile(0.99, histogram_quantile_3) + {} 15.519999999999996 + +eval instant at 10m histogram_quantile(0.9, histogram_quantile_3) + {} 11.200000000000003 + +eval instant at 10m histogram_quantile(0.7, histogram_quantile_3) + {} 1.2666666666666657 + +eval instant at 10m histogram_quantile(0.55, histogram_quantile_3) + {} 0.0006000000000000005 + +eval instant at 10m histogram_quantile(0.5, histogram_quantile_3) + {} 0 + +eval instant at 10m histogram_quantile(0.45, histogram_quantile_3) + {} -0.0005999999999999996 + +eval instant at 10m histogram_quantile(0.3, histogram_quantile_3) + {} -1.266666666666667 + +eval instant at 10m histogram_quantile(0.1, histogram_quantile_3) + {} -11.2 + +eval instant at 10m histogram_quantile(0.01, histogram_quantile_3) + {} -15.52 + +eval instant at 10m histogram_quantile(0, histogram_quantile_3) + {} -16 + +eval instant at 10m histogram_quantile(-1, histogram_quantile_3) + {} -Inf + +# Apply fraction function to empty histogram. +load 10m + histogram_fraction_1 {{}}x1 + +eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1) + {} NaN + +# Apply fraction function to histogram with positive and zero buckets. +load 10m + histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_2) + {} 1 + +eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_2) + {} 0.16666666666666666 + +eval instant at 10m histogram_fraction(0, 0.0005, histogram_fraction_2) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2) + {} 0.8333333333333334 + +eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(1, 2, histogram_fraction_2) + {} 0.25 + +eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_2) + {} 0.125 + +eval instant at 10m histogram_fraction(1, 8, histogram_fraction_2) + {} 0.3333333333333333 + +eval instant at 10m histogram_fraction(1, 6, histogram_fraction_2) + {} 0.2916666666666667 + +eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_2) + {} 0.16666666666666666 + +eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(0, 0, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(42, 42, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_2) + {} 0 + +eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_2) + {} NaN + +eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_2) + {} NaN + +eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_2) + {} NaN + +eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_2) + {} 1 + +# Apply fraction function to histogram with negative and zero buckets. +load 10m + histogram_fraction_3 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_3) + {} 1 + +eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_3) + {} 0.16666666666666666 + +eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(-0.0005, 0, histogram_fraction_3) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_3) + {} 0.8333333333333334 + +eval instant at 10m histogram_fraction(1, 2, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(1, 8, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(1, 6, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_3) + {} 0.25 + +eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_3) + {} 0.125 + +eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_3) + {} 0.3333333333333333 + +eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_3) + {} 0.2916666666666667 + +eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_3) + {} 0.16666666666666666 + +eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(0, 0, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(42, 42, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_3) + {} 0 + +eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_3) + {} NaN + +eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_3) + {} NaN + +eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_3) + {} NaN + +eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3) + {} 1 + +# Apply fraction function to histogram with both positive, negative and zero buckets. +load 10m + histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 + +eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_4) + {} 0.5 + +eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_4) + {} 0.5 + +eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_4) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_4) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(-0.0005, 0.0005, histogram_fraction_4) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_4) + {} 0.4166666666666667 + +eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_4) + {} 0.4166666666666667 + +eval instant at 10m histogram_fraction(1, 2, histogram_fraction_4) + {} 0.125 + +eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_4) + {} 0.0625 + +eval instant at 10m histogram_fraction(1, 8, histogram_fraction_4) + {} 0.16666666666666666 + +eval instant at 10m histogram_fraction(1, 6, histogram_fraction_4) + {} 0.14583333333333334 + +eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_4) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_4) + {} 0.125 + +eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_4) + {} 0.0625 + +eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_4) + {} 0.16666666666666666 + +eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_4) + {} 0.14583333333333334 + +eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_4) + {} 0.08333333333333333 + +eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_4) + {} 0 + +eval instant at 10m histogram_fraction(0, 0, histogram_fraction_4) + {} 0 + +eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_4) + {} 0 + +eval instant at 10m histogram_fraction(42, 42, histogram_fraction_4) + {} 0 + +eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_4) + {} 0 + +eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_4) + {} NaN + +eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_4) + {} NaN + +eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_4) + {} NaN + +eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4) + {} 1 diff --git a/promql/testdata/operators.test b/promql/promqltest/testdata/operators.test similarity index 100% rename from promql/testdata/operators.test rename to promql/promqltest/testdata/operators.test diff --git a/promql/testdata/selectors.test b/promql/promqltest/testdata/selectors.test similarity index 100% rename from promql/testdata/selectors.test rename to promql/promqltest/testdata/selectors.test diff --git a/promql/testdata/staleness.test b/promql/promqltest/testdata/staleness.test similarity index 100% rename from promql/testdata/staleness.test rename to promql/promqltest/testdata/staleness.test diff --git a/promql/testdata/subquery.test b/promql/promqltest/testdata/subquery.test similarity index 100% rename from promql/testdata/subquery.test rename to promql/promqltest/testdata/subquery.test diff --git a/promql/testdata/trig_functions.test b/promql/promqltest/testdata/trig_functions.test similarity index 100% rename from promql/testdata/trig_functions.test rename to promql/promqltest/testdata/trig_functions.test diff --git a/promql/quantile.go b/promql/quantile.go index 6a225afb11f..d4bc9ee6e47 100644 --- a/promql/quantile.go +++ b/promql/quantile.go @@ -20,6 +20,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/almost" ) // smallDeltaTolerance is the threshold for relative deltas between classic @@ -397,7 +398,7 @@ func ensureMonotonicAndIgnoreSmallDeltas(buckets buckets, tolerance float64) (bo // No correction needed if the counts are identical between buckets. continue } - if almostEqual(prev, curr, tolerance) { + if almost.Equal(prev, curr, tolerance) { // Silently correct numerically insignificant differences from floating // point precision errors, regardless of direction. // Do not update the 'prev' value as we are ignoring the difference. diff --git a/promql/query_logger.go b/promql/query_logger.go index fa4e1fb0792..7e06ebb97fe 100644 --- a/promql/query_logger.go +++ b/promql/query_logger.go @@ -16,6 +16,8 @@ package promql import ( "context" "encoding/json" + "errors" + "fmt" "io" "os" "path/filepath" @@ -36,6 +38,8 @@ type ActiveQueryTracker struct { maxConcurrent int } +var _ io.Closer = &ActiveQueryTracker{} + type Entry struct { Query string `json:"query"` Timestamp int64 `json:"timestamp_sec"` @@ -83,6 +87,23 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { } } +type mmapedFile struct { + f io.Closer + m mmap.MMap +} + +func (f *mmapedFile) Close() error { + err := f.m.Unmap() + if err != nil { + err = fmt.Errorf("mmapedFile: unmapping: %w", err) + } + if fErr := f.f.Close(); fErr != nil { + return errors.Join(fmt.Errorf("close mmapedFile.f: %w", fErr), err) + } + + return err +} + func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) { file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) if err != nil { @@ -96,17 +117,19 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io err = file.Truncate(int64(filesize)) if err != nil { + file.Close() level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err) return nil, nil, err } fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0) if err != nil { + file.Close() level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) return nil, nil, err } - return fileAsBytes, file, err + return fileAsBytes, &mmapedFile{f: file, m: fileAsBytes}, err } func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { @@ -202,9 +225,13 @@ func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int } } -func (tracker *ActiveQueryTracker) Close() { +// Close closes tracker. +func (tracker *ActiveQueryTracker) Close() error { if tracker == nil || tracker.closer == nil { - return + return nil + } + if err := tracker.closer.Close(); err != nil { + return fmt.Errorf("close ActiveQueryTracker.closer: %w", err) } - tracker.closer.Close() + return nil } diff --git a/promql/query_logger_test.go b/promql/query_logger_test.go index 4135753fd70..7bd93781ec8 100644 --- a/promql/query_logger_test.go +++ b/promql/query_logger_test.go @@ -16,6 +16,7 @@ package promql import ( "context" "os" + "path/filepath" "testing" "github.com/grafana/regexp" @@ -104,29 +105,26 @@ func TestIndexReuse(t *testing.T) { } func TestMMapFile(t *testing.T) { - file, err := os.CreateTemp("", "mmapedFile") - require.NoError(t, err) - - filename := file.Name() - defer os.Remove(filename) - - fileAsBytes, closer, err := getMMapedFile(filename, 2, nil) - if err != nil { - t.Cleanup(func() { closer.Close() }) - } + dir := t.TempDir() + fpath := filepath.Join(dir, "mmapedFile") + const data = "ab" + fileAsBytes, closer, err := getMMapedFile(fpath, 2, nil) require.NoError(t, err) - copy(fileAsBytes, "ab") + copy(fileAsBytes, data) + require.NoError(t, closer.Close()) - f, err := os.Open(filename) + f, err := os.Open(fpath) require.NoError(t, err) + t.Cleanup(func() { + _ = f.Close() + }) bytes := make([]byte, 4) n, err := f.Read(bytes) - require.Equal(t, 2, n) require.NoError(t, err, "Unexpected error while reading file.") - - require.Equal(t, fileAsBytes, bytes[:2], "Mmap failed") + require.Equal(t, 2, n) + require.Equal(t, []byte(data), bytes[:2], "Mmap failed") } func TestParseBrokenJSON(t *testing.T) { diff --git a/promql/testdata/native_histograms.test b/promql/testdata/native_histograms.test deleted file mode 100644 index 1da68a385f8..00000000000 --- a/promql/testdata/native_histograms.test +++ /dev/null @@ -1,271 +0,0 @@ -# Minimal valid case: an empty histogram. -load 5m - empty_histogram {{}} - -eval instant at 5m empty_histogram - {__name__="empty_histogram"} {{}} - -eval instant at 5m histogram_count(empty_histogram) - {} 0 - -eval instant at 5m histogram_sum(empty_histogram) - {} 0 - -eval instant at 5m histogram_avg(empty_histogram) - {} NaN - -eval instant at 5m histogram_fraction(-Inf, +Inf, empty_histogram) - {} NaN - -eval instant at 5m histogram_fraction(0, 8, empty_histogram) - {} NaN - - - -# buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4). -load 5m - single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}} - -# histogram_count extracts the count property from the histogram. -eval instant at 5m histogram_count(single_histogram) - {} 4 - -# histogram_sum extracts the sum property from the histogram. -eval instant at 5m histogram_sum(single_histogram) - {} 5 - -# histogram_avg calculates the average from sum and count properties. -eval instant at 5m histogram_avg(single_histogram) - {} 1.25 - -# We expect half of the values to fall in the range 1 < x <= 2. -eval instant at 5m histogram_fraction(1, 2, single_histogram) - {} 0.5 - -# We expect all values to fall in the range 0 < x <= 8. -eval instant at 5m histogram_fraction(0, 8, single_histogram) - {} 1 - -# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2. -eval instant at 5m histogram_quantile(0.5, single_histogram) - {} 1.5 - - - -# Repeat the same histogram 10 times. -load 5m - multi_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}}x10 - -eval instant at 5m histogram_count(multi_histogram) - {} 4 - -eval instant at 5m histogram_sum(multi_histogram) - {} 5 - -eval instant at 5m histogram_avg(multi_histogram) - {} 1.25 - -eval instant at 5m histogram_fraction(1, 2, multi_histogram) - {} 0.5 - -eval instant at 5m histogram_quantile(0.5, multi_histogram) - {} 1.5 - - -# Each entry should look the same as the first. -eval instant at 50m histogram_count(multi_histogram) - {} 4 - -eval instant at 50m histogram_sum(multi_histogram) - {} 5 - -eval instant at 50m histogram_avg(multi_histogram) - {} 1.25 - -eval instant at 50m histogram_fraction(1, 2, multi_histogram) - {} 0.5 - -eval instant at 50m histogram_quantile(0.5, multi_histogram) - {} 1.5 - - - -# Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket -# with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket -# positions for upper limits <1 (tending toward zero), where offset:-1 is the bucket to the left of offset:0. -load 5m - incr_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{sum:2 count:1 buckets:[1] offset:1}}x10 - -eval instant at 5m histogram_count(incr_histogram) - {} 5 - -eval instant at 5m histogram_sum(incr_histogram) - {} 6 - -eval instant at 5m histogram_avg(incr_histogram) - {} 1.2 - -# We expect 3/5ths of the values to fall in the range 1 < x <= 2. -eval instant at 5m histogram_fraction(1, 2, incr_histogram) - {} 0.6 - -eval instant at 5m histogram_quantile(0.5, incr_histogram) - {} 1.5 - - -eval instant at 50m incr_histogram - {__name__="incr_histogram"} {{count:14 sum:24 buckets:[1 12 1]}} - -eval instant at 50m histogram_count(incr_histogram) - {} 14 - -eval instant at 50m histogram_sum(incr_histogram) - {} 24 - -eval instant at 50m histogram_avg(incr_histogram) - {} 1.7142857142857142 - -# We expect 12/14ths of the values to fall in the range 1 < x <= 2. -eval instant at 50m histogram_fraction(1, 2, incr_histogram) - {} 0.8571428571428571 - -eval instant at 50m histogram_quantile(0.5, incr_histogram) - {} 1.5 - -# Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum. -eval instant at 50m rate(incr_histogram[5m]) - {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} - -# Calculate the 50th percentile of observations over the last 10m. -eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m])) - {} 1.5 - - - -# Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.: -# 0: 1 2 4 8 16 32 64 (higher resolution) -# -1: 1 4 16 64 (lower resolution) -# -# Histograms can be merged as long as the histogram to the right is same resolution or higher. -load 5m - low_res_histogram {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}+{{schema:0 sum:4 count:4 buckets:[2 2] offset:1}}x1 - -eval instant at 5m low_res_histogram - {__name__="low_res_histogram"} {{schema:-1 count:5 sum:8 offset:1 buckets:[5]}} - -eval instant at 5m histogram_count(low_res_histogram) - {} 5 - -eval instant at 5m histogram_sum(low_res_histogram) - {} 8 - -eval instant at 5m histogram_avg(low_res_histogram) - {} 1.6 - -# We expect all values to fall into the lower-resolution bucket with the range 1 < x <= 4. -eval instant at 5m histogram_fraction(1, 4, low_res_histogram) - {} 1 - - - -# z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range -# 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket. -load 5m - single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}} - -eval instant at 5m histogram_count(single_zero_histogram) - {} 1 - -eval instant at 5m histogram_sum(single_zero_histogram) - {} 0.25 - -eval instant at 5m histogram_avg(single_zero_histogram) - {} 0.25 - -# When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally -# distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the -# entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5. -eval instant at 5m histogram_fraction(-0.5, 0.5, single_zero_histogram) - {} 1 - -# Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5. -eval instant at 5m histogram_quantile(0.5, single_zero_histogram) - {} 0 - - - -# Let's turn single_histogram upside-down. -load 5m - negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}} - -eval instant at 5m histogram_count(negative_histogram) - {} 4 - -eval instant at 5m histogram_sum(negative_histogram) - {} -5 - -eval instant at 5m histogram_avg(negative_histogram) - {} -1.25 - -# We expect half of the values to fall in the range -2 < x <= -1. -eval instant at 5m histogram_fraction(-2, -1, negative_histogram) - {} 0.5 - -eval instant at 5m histogram_quantile(0.5, negative_histogram) - {} -1.5 - - - -# Two histogram samples. -load 5m - two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}} - -# We expect to see the newest sample. -eval instant at 10m histogram_count(two_samples_histogram) - {} 4 - -eval instant at 10m histogram_sum(two_samples_histogram) - {} -4 - -eval instant at 10m histogram_avg(two_samples_histogram) - {} -1 - -eval instant at 10m histogram_fraction(-2, -1, two_samples_histogram) - {} 0.5 - -eval instant at 10m histogram_quantile(0.5, two_samples_histogram) - {} -1.5 - - - -# Add two histograms with negated data. -load 5m - balanced_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}}x1 - -eval instant at 5m histogram_count(balanced_histogram) - {} 8 - -eval instant at 5m histogram_sum(balanced_histogram) - {} 0 - -eval instant at 5m histogram_avg(balanced_histogram) - {} 0 - -eval instant at 5m histogram_fraction(0, 4, balanced_histogram) - {} 0.5 - -# If the quantile happens to be located in a span of empty buckets, the actually returned value is the lower bound of -# the first populated bucket after the span of empty buckets. -eval instant at 5m histogram_quantile(0.5, balanced_histogram) - {} 0.5 - -# Add histogram to test sum(last_over_time) regression -load 5m - incr_sum_histogram{number="1"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:1 count:1 buckets:[1]}}x10 - incr_sum_histogram{number="2"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:2 count:1 buckets:[1]}}x10 - -eval instant at 50m histogram_sum(sum(incr_sum_histogram)) - {} 30 - -eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m]))) - {} 30 diff --git a/promql/value_test.go b/promql/value_test.go index c93ba8213d5..0017b41e2cc 100644 --- a/promql/value_test.go +++ b/promql/value_test.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package promql +package promql_test import ( "testing" @@ -19,39 +19,40 @@ import ( "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" ) func TestVector_ContainsSameLabelset(t *testing.T) { for name, tc := range map[string]struct { - vector Vector + vector promql.Vector expected bool }{ "empty vector": { - vector: Vector{}, + vector: promql.Vector{}, expected: false, }, "vector with one series": { - vector: Vector{ + vector: promql.Vector{ {Metric: labels.FromStrings("lbl", "a")}, }, expected: false, }, "vector with two different series": { - vector: Vector{ + vector: promql.Vector{ {Metric: labels.FromStrings("lbl", "a")}, {Metric: labels.FromStrings("lbl", "b")}, }, expected: false, }, "vector with two equal series": { - vector: Vector{ + vector: promql.Vector{ {Metric: labels.FromStrings("lbl", "a")}, {Metric: labels.FromStrings("lbl", "a")}, }, expected: true, }, "vector with three series, two equal": { - vector: Vector{ + vector: promql.Vector{ {Metric: labels.FromStrings("lbl", "a")}, {Metric: labels.FromStrings("lbl", "b")}, {Metric: labels.FromStrings("lbl", "a")}, @@ -67,35 +68,35 @@ func TestVector_ContainsSameLabelset(t *testing.T) { func TestMatrix_ContainsSameLabelset(t *testing.T) { for name, tc := range map[string]struct { - matrix Matrix + matrix promql.Matrix expected bool }{ "empty matrix": { - matrix: Matrix{}, + matrix: promql.Matrix{}, expected: false, }, "matrix with one series": { - matrix: Matrix{ + matrix: promql.Matrix{ {Metric: labels.FromStrings("lbl", "a")}, }, expected: false, }, "matrix with two different series": { - matrix: Matrix{ + matrix: promql.Matrix{ {Metric: labels.FromStrings("lbl", "a")}, {Metric: labels.FromStrings("lbl", "b")}, }, expected: false, }, "matrix with two equal series": { - matrix: Matrix{ + matrix: promql.Matrix{ {Metric: labels.FromStrings("lbl", "a")}, {Metric: labels.FromStrings("lbl", "a")}, }, expected: true, }, "matrix with three series, two equal": { - matrix: Matrix{ + matrix: promql.Matrix{ {Metric: labels.FromStrings("lbl", "a")}, {Metric: labels.FromStrings("lbl", "b")}, {Metric: labels.FromStrings("lbl", "a")}, diff --git a/rules/alerting.go b/rules/alerting.go index 50c67fa2d92..2dc0917dceb 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -246,13 +246,16 @@ func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample { return s } -// forStateSample returns the sample for ALERTS_FOR_STATE. +// forStateSample returns a promql.Sample with the rule labels, `ALERTS_FOR_STATE` as the metric name and the rule name as the `alertname` label. +// Optionally, if an alert is provided it'll copy the labels of the alert into the sample labels. func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) promql.Sample { lb := labels.NewBuilder(r.labels) - alert.Labels.Range(func(l labels.Label) { - lb.Set(l.Name, l.Value) - }) + if alert != nil { + alert.Labels.Range(func(l labels.Label) { + lb.Set(l.Name, l.Value) + }) + } lb.Set(labels.MetricName, alertForStateMetricName) lb.Set(labels.AlertName, r.name) @@ -265,9 +268,11 @@ func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) pro return s } -// QueryforStateSeries returns the series for ALERTS_FOR_STATE. -func (r *AlertingRule) QueryforStateSeries(ctx context.Context, alert *Alert, q storage.Querier) (storage.Series, error) { - smpl := r.forStateSample(alert, time.Now(), 0) +// QueryForStateSeries returns the series for ALERTS_FOR_STATE of the alert rule. +func (r *AlertingRule) QueryForStateSeries(ctx context.Context, q storage.Querier) (storage.SeriesSet, error) { + // We use a sample to ease the building of matchers. + // Don't provide an alert as we want matchers that match all series for the alert rule. + smpl := r.forStateSample(nil, time.Now(), 0) var matchers []*labels.Matcher smpl.Metric.Range(func(l labels.Label) { mt, err := labels.NewMatcher(labels.MatchEqual, l.Name, l.Value) @@ -276,20 +281,9 @@ func (r *AlertingRule) QueryforStateSeries(ctx context.Context, alert *Alert, q } matchers = append(matchers, mt) }) - sset := q.Select(ctx, false, nil, matchers...) - - var s storage.Series - for sset.Next() { - // Query assures that smpl.Metric is included in sset.At().Labels(), - // hence just checking the length would act like equality. - // (This is faster than calling labels.Compare again as we already have some info). - if sset.At().Labels().Len() == len(matchers) { - s = sset.At() - break - } - } - return s, sset.Err() + sset := q.Select(ctx, false, nil, matchers...) + return sset, sset.Err() } // SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation. @@ -344,10 +338,9 @@ const resolvedRetention = 15 * time.Minute // Eval evaluates the rule expression and then creates pending alerts and fires // or removes previously pending alerts accordingly. -func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) { +func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts time.Time, query QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) { ctx = NewOriginContext(ctx, NewRuleDetail(r)) - - res, err := query(ctx, r.vector.String(), ts) + res, err := query(ctx, r.vector.String(), ts.Add(-queryOffset)) if err != nil { return nil, err } @@ -457,8 +450,17 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, } } - // If the alert was previously firing, keep it around for a given - // retention time so it is reported as resolved to the AlertManager. + // If the alert is resolved (was firing but is now inactive) keep it for + // at least the retention period. This is important for a number of reasons: + // + // 1. It allows for Prometheus to be more resilient to network issues that + // would otherwise prevent a resolved alert from being reported as resolved + // to Alertmanager. + // + // 2. It helps reduce the chance of resolved notifications being lost if + // Alertmanager crashes or restarts between receiving the resolved alert + // from Prometheus and sending the resolved notification. This tends to + // occur for routes with large Group intervals. if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) { delete(r.active, fp) } @@ -481,8 +483,8 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, } if r.restored.Load() { - vec = append(vec, r.sample(a, ts)) - vec = append(vec, r.forStateSample(a, ts, float64(a.ActiveAt.Unix()))) + vec = append(vec, r.sample(a, ts.Add(-queryOffset))) + vec = append(vec, r.forStateSample(a, ts.Add(-queryOffset), float64(a.ActiveAt.Unix()))) } } @@ -548,6 +550,13 @@ func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) { } } +func (r *AlertingRule) ActiveAlertsCount() int { + r.activeMtx.Lock() + defer r.activeMtx.Unlock() + + return len(r.active) +} + func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay, interval time.Duration, notifyFunc NotifyFunc) { alerts := []*Alert{} r.ForEachActiveAlert(func(alert *Alert) { diff --git a/rules/alerting_test.go b/rules/alerting_test.go index ddfe345efb9..5ebd049f669 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -30,6 +30,7 @@ import ( "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" @@ -122,7 +123,7 @@ func TestAlertingRuleTemplateWithHistogram(t *testing.T) { ) evalTime := time.Now() - res, err := rule.Eval(context.TODO(), evalTime, q, nil, 0) + res, err := rule.Eval(context.TODO(), 0, evalTime, q, nil, 0) require.NoError(t, err) require.Len(t, res, 2) @@ -148,7 +149,7 @@ func TestAlertingRuleTemplateWithHistogram(t *testing.T) { } func TestAlertingRuleLabelsUpdate(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 75 85 70 70 stale `) @@ -229,7 +230,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { t.Logf("case %d", i) evalTime := baseTime.Add(time.Duration(i) * time.Minute) result[0].T = timestamp.FromTime(evalTime) - res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. @@ -246,13 +247,13 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { testutil.RequireEqual(t, result, filteredRes) } evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) - res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) require.Empty(t, res) } func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 75 85 70 70 `) @@ -314,7 +315,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. res, err := ruleWithoutExternalLabels.Eval( - context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, + context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -328,7 +329,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { } res, err = ruleWithExternalLabels.Eval( - context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, + context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -345,7 +346,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { } func TestAlertingRuleExternalURLInTemplate(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 75 85 70 70 `) @@ -407,7 +408,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. res, err := ruleWithoutExternalURL.Eval( - context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, + context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -421,7 +422,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { } res, err = ruleWithExternalURL.Eval( - context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, + context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -438,7 +439,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { } func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 75 85 70 70 `) @@ -476,7 +477,7 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) { var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. res, err := rule.Eval( - context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0, + context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -492,7 +493,7 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) { } func TestAlertingRuleQueryInTemplate(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 70 85 70 70 `) @@ -543,7 +544,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }}; close(getDoneCh) }() _, err = ruleWithQueryInTemplate.Eval( - context.TODO(), evalTime, slowQueryFunc, nil, 0, + context.TODO(), 0, evalTime, slowQueryFunc, nil, 0, ) require.NoError(t, err) } @@ -595,13 +596,13 @@ func TestAlertingRuleDuplicate(t *testing.T) { "", true, log.NewNopLogger(), ) - _, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, 0) + _, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0) require.Error(t, err) require.EqualError(t, err, "vector contains metrics with the same labelset after applying alert labels") } func TestAlertingRuleLimit(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m metric{label="1"} 1 metric{label="2"} 1 @@ -643,7 +644,7 @@ func TestAlertingRuleLimit(t *testing.T) { evalTime := time.Unix(0, 0) for _, test := range tests { - switch _, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); { + switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); { case err != nil: require.EqualError(t, err, test.err) case test.err != "": @@ -710,19 +711,17 @@ func TestQueryForStateSeries(t *testing.T) { labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil, ) - alert := &Alert{ - State: 0, - Labels: labels.EmptyLabels(), - Annotations: labels.EmptyLabels(), - Value: 0, - ActiveAt: time.Time{}, - FiredAt: time.Time{}, - ResolvedAt: time.Time{}, - LastSentAt: time.Time{}, - ValidUntil: time.Time{}, - } + sample := rule.forStateSample(nil, time.Time{}, 0) + + seriesSet, err := rule.QueryForStateSeries(context.Background(), querier) - series, err := rule.QueryforStateSeries(context.Background(), alert, querier) + var series storage.Series + for seriesSet.Next() { + if seriesSet.At().Labels().Len() == sample.Metric.Len() { + series = seriesSet.At() + break + } + } require.Equal(t, tst.expectedSeries, series) require.Equal(t, tst.expectedError, err) @@ -785,7 +784,7 @@ func TestSendAlertsDontAffectActiveAlerts(t *testing.T) { } func TestKeepFiringFor(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 75 85 70 70 10x5 `) @@ -872,7 +871,7 @@ func TestKeepFiringFor(t *testing.T) { t.Logf("case %d", i) evalTime := baseTime.Add(time.Duration(i) * time.Minute) result[0].T = timestamp.FromTime(evalTime) - res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. @@ -889,13 +888,13 @@ func TestKeepFiringFor(t *testing.T) { testutil.RequireEqual(t, result, filteredRes) } evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) - res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) require.Empty(t, res) } func TestPendingAndKeepFiringFor(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m http_requests{job="app-server", instance="0"} 75 10x10 `) @@ -926,7 +925,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) { baseTime := time.Unix(0, 0) result.T = timestamp.FromTime(baseTime) - res, err := rule.Eval(context.TODO(), baseTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err := rule.Eval(context.TODO(), 0, baseTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) require.Len(t, res, 2) @@ -941,7 +940,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) { } evalTime := baseTime.Add(time.Minute) - res, err = rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err = rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) require.Empty(t, res) } @@ -975,7 +974,7 @@ func TestAlertingEvalWithOrigin(t *testing.T) { true, log.NewNopLogger(), ) - _, err = rule.Eval(ctx, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) { + _, err = rule.Eval(ctx, 0, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) { detail = FromOriginContext(ctx) return nil, nil }, nil, 0) @@ -1025,3 +1024,24 @@ func TestAlertingRule_SetNoDependencyRules(t *testing.T) { rule.SetNoDependencyRules(true) require.True(t, rule.NoDependencyRules()) } + +func TestAlertingRule_ActiveAlertsCount(t *testing.T) { + rule := NewAlertingRule( + "TestRule", + nil, + time.Minute, + 0, + labels.FromStrings("severity", "critical"), + labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil, + ) + + require.Equal(t, 0, rule.ActiveAlertsCount()) + + // Set an active alert. + lbls := labels.FromStrings("a1", "1") + h := lbls.Hash() + al := &Alert{State: StateFiring, Labels: lbls, ActiveAt: time.Now()} + rule.active[h] = al + + require.Equal(t, 1, rule.ActiveAlertsCount()) +} diff --git a/rules/group.go b/rules/group.go index c268d2df7d9..c0ad18c187f 100644 --- a/rules/group.go +++ b/rules/group.go @@ -47,6 +47,7 @@ type Group struct { name string file string interval time.Duration + queryOffset *time.Duration limit int rules []Rule seriesInPreviousEval []map[string]labels.Labels // One per Rule. @@ -90,6 +91,7 @@ type GroupOptions struct { Rules []Rule ShouldRestore bool Opts *ManagerOptions + QueryOffset *time.Duration done chan struct{} EvalIterationFunc GroupEvalIterationFunc } @@ -126,6 +128,7 @@ func NewGroup(o GroupOptions) *Group { name: o.Name, file: o.File, interval: o.Interval, + queryOffset: o.QueryOffset, limit: o.Limit, rules: o.Rules, shouldRestore: o.ShouldRestore, @@ -230,7 +233,11 @@ func (g *Group) run(ctx context.Context) { g.evalIterationFunc(ctx, g, evalTimestamp) } - g.RestoreForState(time.Now()) + restoreStartTime := time.Now() + g.RestoreForState(restoreStartTime) + totalRestoreTimeSeconds := time.Since(restoreStartTime).Seconds() + g.metrics.GroupLastRestoreDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(totalRestoreTimeSeconds) + level.Debug(g.logger).Log("msg", "'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds) g.shouldRestore = false } @@ -439,6 +446,8 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { wg sync.WaitGroup ) + ruleQueryOffset := g.QueryOffset() + for i, rule := range g.rules { select { case <-g.done: @@ -469,7 +478,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() - vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit()) + vector, err := rule.Eval(ctx, ruleQueryOffset, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit()) if err != nil { rule.SetHealth(HealthBad) rule.SetLastError(err) @@ -558,7 +567,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { for metric, lset := range g.seriesInPreviousEval[i] { if _, ok := seriesReturned[metric]; !ok { // Series no longer exposed, mark it stale. - _, err = app.Append(0, lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN)) + _, err = app.Append(0, lset, timestamp.FromTime(ts.Add(-ruleQueryOffset)), math.Float64frombits(value.StaleNaN)) unwrappedErr := errors.Unwrap(err) if unwrappedErr == nil { unwrappedErr = err @@ -597,14 +606,27 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { g.cleanupStaleSeries(ctx, ts) } +func (g *Group) QueryOffset() time.Duration { + if g.queryOffset != nil { + return *g.queryOffset + } + + if g.opts.DefaultRuleQueryOffset != nil { + return g.opts.DefaultRuleQueryOffset() + } + + return time.Duration(0) +} + func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { if len(g.staleSeries) == 0 { return } app := g.opts.Appendable.Appender(ctx) + queryOffset := g.QueryOffset() for _, s := range g.staleSeries { // Rule that produced series no longer configured, mark it stale. - _, err := app.Append(0, s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN)) + _, err := app.Append(0, s, timestamp.FromTime(ts.Add(-queryOffset)), math.Float64frombits(value.StaleNaN)) unwrappedErr := errors.Unwrap(err) if unwrappedErr == nil { unwrappedErr = err @@ -660,25 +682,40 @@ func (g *Group) RestoreForState(ts time.Time) { continue } + sset, err := alertRule.QueryForStateSeries(g.opts.Context, q) + if err != nil { + level.Error(g.logger).Log( + "msg", "Failed to restore 'for' state", + labels.AlertName, alertRule.Name(), + "stage", "Select", + "err", err, + ) + // Even if we failed to query the `ALERT_FOR_STATE` series, we currently have no way to retry the restore process. + // So the best we can do is mark the rule as restored and let it eventually fire. + alertRule.SetRestored(true) + continue + } + + // While not technically the same number of series we expect, it's as good of an approximation as any. + seriesByLabels := make(map[string]storage.Series, alertRule.ActiveAlertsCount()) + for sset.Next() { + seriesByLabels[sset.At().Labels().DropMetricName().String()] = sset.At() + } + + // No results for this alert rule. + if len(seriesByLabels) == 0 { + level.Debug(g.logger).Log("msg", "No series found to restore the 'for' state of the alert rule", labels.AlertName, alertRule.Name()) + alertRule.SetRestored(true) + continue + } + alertRule.ForEachActiveAlert(func(a *Alert) { var s storage.Series - s, err := alertRule.QueryforStateSeries(g.opts.Context, a, q) - if err != nil { - // Querier Warnings are ignored. We do not care unless we have an error. - level.Error(g.logger).Log( - "msg", "Failed to restore 'for' state", - labels.AlertName, alertRule.Name(), - "stage", "Select", - "err", err, - ) - return - } - - if s == nil { + s, ok := seriesByLabels[a.Labels.String()] + if !ok { return } - // Series found for the 'for' state. var t int64 var v float64 @@ -756,6 +793,10 @@ func (g *Group) Equals(ng *Group) bool { return false } + if ((g.queryOffset == nil) != (ng.queryOffset == nil)) || (g.queryOffset != nil && ng.queryOffset != nil && *g.queryOffset != *ng.queryOffset) { + return false + } + if len(g.rules) != len(ng.rules) { return false } @@ -779,17 +820,18 @@ const namespace = "prometheus" // Metrics for rule evaluation. type Metrics struct { - EvalDuration prometheus.Summary - IterationDuration prometheus.Summary - IterationsMissed *prometheus.CounterVec - IterationsScheduled *prometheus.CounterVec - EvalTotal *prometheus.CounterVec - EvalFailures *prometheus.CounterVec - GroupInterval *prometheus.GaugeVec - GroupLastEvalTime *prometheus.GaugeVec - GroupLastDuration *prometheus.GaugeVec - GroupRules *prometheus.GaugeVec - GroupSamples *prometheus.GaugeVec + EvalDuration prometheus.Summary + IterationDuration prometheus.Summary + IterationsMissed *prometheus.CounterVec + IterationsScheduled *prometheus.CounterVec + EvalTotal *prometheus.CounterVec + EvalFailures *prometheus.CounterVec + GroupInterval *prometheus.GaugeVec + GroupLastEvalTime *prometheus.GaugeVec + GroupLastDuration *prometheus.GaugeVec + GroupLastRestoreDuration *prometheus.GaugeVec + GroupRules *prometheus.GaugeVec + GroupSamples *prometheus.GaugeVec } // NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer, @@ -865,6 +907,14 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { }, []string{"rule_group"}, ), + GroupLastRestoreDuration: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "rule_group_last_restore_duration_seconds", + Help: "The duration of the last alert rules alerts restoration using the `ALERTS_FOR_STATE` series.", + }, + []string{"rule_group"}, + ), GroupRules: prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, @@ -894,6 +944,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { m.GroupInterval, m.GroupLastEvalTime, m.GroupLastDuration, + m.GroupLastRestoreDuration, m.GroupRules, m.GroupSamples, ) diff --git a/rules/group_test.go b/rules/group_test.go new file mode 100644 index 00000000000..ff1ef3d6c1e --- /dev/null +++ b/rules/group_test.go @@ -0,0 +1,98 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestGroup_Equals(t *testing.T) { + tests := map[string]struct { + first *Group + second *Group + expected bool + }{ + "no query offset set on both groups": { + first: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + }, + second: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + }, + expected: true, + }, + "query offset set only on the first group": { + first: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + queryOffset: pointerOf[time.Duration](time.Minute), + }, + second: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + }, + expected: false, + }, + "query offset set on both groups to the same value": { + first: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + queryOffset: pointerOf[time.Duration](time.Minute), + }, + second: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + queryOffset: pointerOf[time.Duration](time.Minute), + }, + expected: true, + }, + "query offset set on both groups to different value": { + first: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + queryOffset: pointerOf[time.Duration](time.Minute), + }, + second: &Group{ + name: "group-1", + file: "file-1", + interval: time.Minute, + queryOffset: pointerOf[time.Duration](2 * time.Minute), + }, + expected: false, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + require.Equal(t, testData.expected, testData.first.Equals(testData.second)) + require.Equal(t, testData.expected, testData.second.Equals(testData.first)) + }) + } +} + +func pointerOf[T any](value T) *T { + return &value +} diff --git a/rules/manager.go b/rules/manager.go index 165dca144e6..063189e0ab1 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -116,6 +116,7 @@ type ManagerOptions struct { ForGracePeriod time.Duration ResendDelay time.Duration GroupLoader GroupLoader + DefaultRuleQueryOffset func() time.Duration MaxConcurrentEvals int64 ConcurrentEvalsEnabled bool RuleConcurrencyController RuleConcurrencyController @@ -336,6 +337,7 @@ func (m *Manager) LoadGroups( Rules: rules, ShouldRestore: shouldRestore, Opts: m.opts, + QueryOffset: (*time.Duration)(rg.QueryOffset), done: m.done, EvalIterationFunc: groupEvalIterationFunc, }) diff --git a/rules/manager_test.go b/rules/manager_test.go index 50ab6b861fd..11d1282bd34 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -16,9 +16,12 @@ package rules import ( "context" "fmt" + "io/fs" "math" "os" + "path" "sort" + "strconv" "sync" "testing" "time" @@ -38,6 +41,7 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/tsdbutil" @@ -50,7 +54,7 @@ func TestMain(m *testing.M) { } func TestAlertingRule(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 5m http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85 http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140 @@ -160,7 +164,7 @@ func TestAlertingRule(t *testing.T) { evalTime := baseTime.Add(test.time) - res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. @@ -190,152 +194,156 @@ func TestAlertingRule(t *testing.T) { } func TestForStateAddSamples(t *testing.T) { - storage := promql.LoadedStorage(t, ` + for _, queryOffset := range []time.Duration{0, time.Minute} { + t.Run(fmt.Sprintf("queryOffset %s", queryOffset.String()), func(t *testing.T) { + storage := promqltest.LoadedStorage(t, ` load 5m http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 95 105 105 95 85 http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 80 90 100 110 120 130 140 `) - t.Cleanup(func() { storage.Close() }) - - expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`) - require.NoError(t, err) - - rule := NewAlertingRule( - "HTTPRequestRateLow", - expr, - time.Minute, - 0, - labels.FromStrings("severity", "{{\"c\"}}ritical"), - labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil, - ) - result := promql.Vector{ - promql.Sample{ - Metric: labels.FromStrings( - "__name__", "ALERTS_FOR_STATE", - "alertname", "HTTPRequestRateLow", - "group", "canary", - "instance", "0", - "job", "app-server", - "severity", "critical", - ), - F: 1, - }, - promql.Sample{ - Metric: labels.FromStrings( - "__name__", "ALERTS_FOR_STATE", - "alertname", "HTTPRequestRateLow", - "group", "canary", - "instance", "1", - "job", "app-server", - "severity", "critical", - ), - F: 1, - }, - promql.Sample{ - Metric: labels.FromStrings( - "__name__", "ALERTS_FOR_STATE", - "alertname", "HTTPRequestRateLow", - "group", "canary", - "instance", "0", - "job", "app-server", - "severity", "critical", - ), - F: 1, - }, - promql.Sample{ - Metric: labels.FromStrings( - "__name__", "ALERTS_FOR_STATE", - "alertname", "HTTPRequestRateLow", - "group", "canary", - "instance", "1", - "job", "app-server", - "severity", "critical", - ), - F: 1, - }, - } + t.Cleanup(func() { storage.Close() }) + + expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`) + require.NoError(t, err) + + rule := NewAlertingRule( + "HTTPRequestRateLow", + expr, + time.Minute, + 0, + labels.FromStrings("severity", "{{\"c\"}}ritical"), + labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil, + ) + result := promql.Vector{ + promql.Sample{ + Metric: labels.FromStrings( + "__name__", "ALERTS_FOR_STATE", + "alertname", "HTTPRequestRateLow", + "group", "canary", + "instance", "0", + "job", "app-server", + "severity", "critical", + ), + F: 1, + }, + promql.Sample{ + Metric: labels.FromStrings( + "__name__", "ALERTS_FOR_STATE", + "alertname", "HTTPRequestRateLow", + "group", "canary", + "instance", "1", + "job", "app-server", + "severity", "critical", + ), + F: 1, + }, + promql.Sample{ + Metric: labels.FromStrings( + "__name__", "ALERTS_FOR_STATE", + "alertname", "HTTPRequestRateLow", + "group", "canary", + "instance", "0", + "job", "app-server", + "severity", "critical", + ), + F: 1, + }, + promql.Sample{ + Metric: labels.FromStrings( + "__name__", "ALERTS_FOR_STATE", + "alertname", "HTTPRequestRateLow", + "group", "canary", + "instance", "1", + "job", "app-server", + "severity", "critical", + ), + F: 1, + }, + } - baseTime := time.Unix(0, 0) + baseTime := time.Unix(0, 0) + + tests := []struct { + time time.Duration + result promql.Vector + persistThisTime bool // If true, it means this 'time' is persisted for 'for'. + }{ + { + time: 0, + result: append(promql.Vector{}, result[:2]...), + persistThisTime: true, + }, + { + time: 5 * time.Minute, + result: append(promql.Vector{}, result[2:]...), + }, + { + time: 10 * time.Minute, + result: append(promql.Vector{}, result[2:3]...), + }, + { + time: 15 * time.Minute, + result: nil, + }, + { + time: 20 * time.Minute, + result: nil, + }, + { + time: 25 * time.Minute, + result: append(promql.Vector{}, result[:1]...), + persistThisTime: true, + }, + { + time: 30 * time.Minute, + result: append(promql.Vector{}, result[2:3]...), + }, + } - tests := []struct { - time time.Duration - result promql.Vector - persistThisTime bool // If true, it means this 'time' is persisted for 'for'. - }{ - { - time: 0, - result: append(promql.Vector{}, result[:2]...), - persistThisTime: true, - }, - { - time: 5 * time.Minute, - result: append(promql.Vector{}, result[2:]...), - }, - { - time: 10 * time.Minute, - result: append(promql.Vector{}, result[2:3]...), - }, - { - time: 15 * time.Minute, - result: nil, - }, - { - time: 20 * time.Minute, - result: nil, - }, - { - time: 25 * time.Minute, - result: append(promql.Vector{}, result[:1]...), - persistThisTime: true, - }, - { - time: 30 * time.Minute, - result: append(promql.Vector{}, result[2:3]...), - }, - } + var forState float64 + for i, test := range tests { + t.Logf("case %d", i) + evalTime := baseTime.Add(test.time).Add(queryOffset) - var forState float64 - for i, test := range tests { - t.Logf("case %d", i) - evalTime := baseTime.Add(test.time) + if test.persistThisTime { + forState = float64(evalTime.Unix()) + } + if test.result == nil { + forState = float64(value.StaleNaN) + } - if test.persistThisTime { - forState = float64(evalTime.Unix()) - } - if test.result == nil { - forState = float64(value.StaleNaN) - } + res, err := rule.Eval(context.TODO(), queryOffset, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + require.NoError(t, err) + + var filteredRes promql.Vector // After removing 'ALERTS' samples. + for _, smpl := range res { + smplName := smpl.Metric.Get("__name__") + if smplName == "ALERTS_FOR_STATE" { + filteredRes = append(filteredRes, smpl) + } else { + // If not 'ALERTS_FOR_STATE', it has to be 'ALERTS'. + require.Equal(t, "ALERTS", smplName) + } + } + for i := range test.result { + test.result[i].T = timestamp.FromTime(evalTime.Add(-queryOffset)) + // Updating the expected 'for' state. + if test.result[i].F >= 0 { + test.result[i].F = forState + } + } + require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res)) - res, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, 0) - require.NoError(t, err) + sort.Slice(filteredRes, func(i, j int) bool { + return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0 + }) + prom_testutil.RequireEqual(t, test.result, filteredRes) - var filteredRes promql.Vector // After removing 'ALERTS' samples. - for _, smpl := range res { - smplName := smpl.Metric.Get("__name__") - if smplName == "ALERTS_FOR_STATE" { - filteredRes = append(filteredRes, smpl) - } else { - // If not 'ALERTS_FOR_STATE', it has to be 'ALERTS'. - require.Equal(t, "ALERTS", smplName) - } - } - for i := range test.result { - test.result[i].T = timestamp.FromTime(evalTime) - // Updating the expected 'for' state. - if test.result[i].F >= 0 { - test.result[i].F = forState + for _, aa := range rule.ActiveAlerts() { + require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) + } } - } - require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res)) - - sort.Slice(filteredRes, func(i, j int) bool { - return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0 }) - prom_testutil.RequireEqual(t, test.result, filteredRes) - - for _, aa := range rule.ActiveAlerts() { - require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) - } } } @@ -347,242 +355,251 @@ func sortAlerts(items []*Alert) { } func TestForStateRestore(t *testing.T) { - storage := promql.LoadedStorage(t, ` + for _, queryOffset := range []time.Duration{0, time.Minute} { + t.Run(fmt.Sprintf("queryOffset %s", queryOffset.String()), func(t *testing.T) { + storage := promqltest.LoadedStorage(t, ` load 5m http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"} 75 85 50 0 0 25 0 0 40 0 120 http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"} 125 90 60 0 0 25 0 0 40 0 130 `) - t.Cleanup(func() { storage.Close() }) - - expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`) - require.NoError(t, err) - - opts := &ManagerOptions{ - QueryFunc: EngineQueryFunc(testEngine, storage), - Appendable: storage, - Queryable: storage, - Context: context.Background(), - Logger: log.NewNopLogger(), - NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {}, - OutageTolerance: 30 * time.Minute, - ForGracePeriod: 10 * time.Minute, - } - - alertForDuration := 25 * time.Minute - // Initial run before prometheus goes down. - rule := NewAlertingRule( - "HTTPRequestRateLow", - expr, - alertForDuration, - 0, - labels.FromStrings("severity", "critical"), - labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil, - ) + t.Cleanup(func() { storage.Close() }) + + expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`) + require.NoError(t, err) + + opts := &ManagerOptions{ + QueryFunc: EngineQueryFunc(testEngine, storage), + Appendable: storage, + Queryable: storage, + Context: context.Background(), + Logger: log.NewNopLogger(), + NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {}, + OutageTolerance: 30 * time.Minute, + ForGracePeriod: 10 * time.Minute, + } - group := NewGroup(GroupOptions{ - Name: "default", - Interval: time.Second, - Rules: []Rule{rule}, - ShouldRestore: true, - Opts: opts, - }) - groups := make(map[string]*Group) - groups["default;"] = group + alertForDuration := 25 * time.Minute + // Initial run before prometheus goes down. + rule := NewAlertingRule( + "HTTPRequestRateLow", + expr, + alertForDuration, + 0, + labels.FromStrings("severity", "critical"), + labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil, + ) + + group := NewGroup(GroupOptions{ + Name: "default", + Interval: time.Second, + Rules: []Rule{rule}, + ShouldRestore: true, + Opts: opts, + }) + groups := make(map[string]*Group) + groups["default;"] = group - initialRuns := []time.Duration{0, 5 * time.Minute} + initialRuns := []time.Duration{0, 5 * time.Minute} - baseTime := time.Unix(0, 0) - for _, duration := range initialRuns { - evalTime := baseTime.Add(duration) - group.Eval(context.TODO(), evalTime) - } + baseTime := time.Unix(0, 0) + for _, duration := range initialRuns { + evalTime := baseTime.Add(duration) + group.Eval(context.TODO(), evalTime) + } - exp := rule.ActiveAlerts() - for _, aa := range exp { - require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) - } - sort.Slice(exp, func(i, j int) bool { - return labels.Compare(exp[i].Labels, exp[j].Labels) < 0 - }) + // Prometheus goes down here. We create new rules and groups. + type testInput struct { + name string + restoreDuration time.Duration + expectedAlerts []*Alert + + num int + noRestore bool + gracePeriod bool + downDuration time.Duration + before func() + } - // Prometheus goes down here. We create new rules and groups. - type testInput struct { - restoreDuration time.Duration - alerts []*Alert + tests := []testInput{ + { + name: "normal restore (alerts were not firing)", + restoreDuration: 15 * time.Minute, + expectedAlerts: rule.ActiveAlerts(), + downDuration: 10 * time.Minute, + }, + { + name: "outage tolerance", + restoreDuration: 40 * time.Minute, + noRestore: true, + num: 2, + }, + { + name: "no active alerts", + restoreDuration: 50 * time.Minute, + expectedAlerts: []*Alert{}, + }, + { + name: "test the grace period", + restoreDuration: 25 * time.Minute, + expectedAlerts: []*Alert{}, + gracePeriod: true, + before: func() { + for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} { + evalTime := baseTime.Add(duration) + group.Eval(context.TODO(), evalTime) + } + }, + num: 2, + }, + } - num int - noRestore bool - gracePeriod bool - downDuration time.Duration + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.before != nil { + tt.before() + } + + newRule := NewAlertingRule( + "HTTPRequestRateLow", + expr, + alertForDuration, + 0, + labels.FromStrings("severity", "critical"), + labels.EmptyLabels(), labels.EmptyLabels(), "", false, nil, + ) + newGroup := NewGroup(GroupOptions{ + Name: "default", + Interval: time.Second, + Rules: []Rule{newRule}, + ShouldRestore: true, + Opts: opts, + QueryOffset: &queryOffset, + }) + + newGroups := make(map[string]*Group) + newGroups["default;"] = newGroup + + restoreTime := baseTime.Add(tt.restoreDuration).Add(queryOffset) + // First eval before restoration. + newGroup.Eval(context.TODO(), restoreTime) + // Restore happens here. + newGroup.RestoreForState(restoreTime) + + got := newRule.ActiveAlerts() + for _, aa := range got { + require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) + } + sort.Slice(got, func(i, j int) bool { + return labels.Compare(got[i].Labels, got[j].Labels) < 0 + }) + + // In all cases, we expect the restoration process to have completed. + require.Truef(t, newRule.Restored(), "expected the rule restoration process to have completed") + + // Checking if we have restored it correctly. + switch { + case tt.noRestore: + require.Len(t, got, tt.num) + for _, e := range got { + require.Equal(t, e.ActiveAt, restoreTime) + } + case tt.gracePeriod: + + require.Len(t, got, tt.num) + for _, e := range got { + require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime)) + } + default: + exp := tt.expectedAlerts + require.Equal(t, len(exp), len(got)) + sortAlerts(exp) + sortAlerts(got) + for i, e := range exp { + require.Equal(t, e.Labels, got[i].Labels) + + // Difference in time should be within 1e6 ns, i.e. 1ms + // (due to conversion between ns & ms, float64 & int64). + activeAtDiff := queryOffset.Seconds() + float64(e.ActiveAt.Unix()+int64(tt.downDuration/time.Second)-got[i].ActiveAt.Unix()) + require.Equal(t, 0.0, math.Abs(activeAtDiff), "'for' state restored time is wrong") + } + } + }) + } + }) } +} - tests := []testInput{ - { - // Normal restore (alerts were not firing). - restoreDuration: 15 * time.Minute, - alerts: rule.ActiveAlerts(), - downDuration: 10 * time.Minute, - }, - { - // Testing Outage Tolerance. - restoreDuration: 40 * time.Minute, - noRestore: true, - num: 2, - }, - { - // No active alerts. - restoreDuration: 50 * time.Minute, - alerts: []*Alert{}, - }, - } +func TestStaleness(t *testing.T) { + for _, queryOffset := range []time.Duration{0, time.Minute} { + st := teststorage.New(t) + defer st.Close() + engineOpts := promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 10, + Timeout: 10 * time.Second, + } + engine := promql.NewEngine(engineOpts) + opts := &ManagerOptions{ + QueryFunc: EngineQueryFunc(engine, st), + Appendable: st, + Queryable: st, + Context: context.Background(), + Logger: log.NewNopLogger(), + } - testFunc := func(tst testInput) { - newRule := NewAlertingRule( - "HTTPRequestRateLow", - expr, - alertForDuration, - 0, - labels.FromStrings("severity", "critical"), - labels.EmptyLabels(), labels.EmptyLabels(), "", false, nil, - ) - newGroup := NewGroup(GroupOptions{ + expr, err := parser.ParseExpr("a + 1") + require.NoError(t, err) + rule := NewRecordingRule("a_plus_one", expr, labels.Labels{}) + group := NewGroup(GroupOptions{ Name: "default", Interval: time.Second, - Rules: []Rule{newRule}, + Rules: []Rule{rule}, ShouldRestore: true, Opts: opts, + QueryOffset: &queryOffset, }) - newGroups := make(map[string]*Group) - newGroups["default;"] = newGroup - - restoreTime := baseTime.Add(tst.restoreDuration) - // First eval before restoration. - newGroup.Eval(context.TODO(), restoreTime) - // Restore happens here. - newGroup.RestoreForState(restoreTime) - - got := newRule.ActiveAlerts() - for _, aa := range got { - require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) - } - sort.Slice(got, func(i, j int) bool { - return labels.Compare(got[i].Labels, got[j].Labels) < 0 - }) - - // Checking if we have restored it correctly. - switch { - case tst.noRestore: - require.Len(t, got, tst.num) - for _, e := range got { - require.Equal(t, e.ActiveAt, restoreTime) - } - case tst.gracePeriod: - require.Len(t, got, tst.num) - for _, e := range got { - require.Equal(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime)) - } - default: - exp := tst.alerts - require.Equal(t, len(exp), len(got)) - sortAlerts(exp) - sortAlerts(got) - for i, e := range exp { - require.Equal(t, e.Labels, got[i].Labels) - - // Difference in time should be within 1e6 ns, i.e. 1ms - // (due to conversion between ns & ms, float64 & int64). - activeAtDiff := float64(e.ActiveAt.Unix() + int64(tst.downDuration/time.Second) - got[i].ActiveAt.Unix()) - require.Equal(t, 0.0, math.Abs(activeAtDiff), "'for' state restored time is wrong") - } - } - } - - for _, tst := range tests { - testFunc(tst) - } - - // Testing the grace period. - for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} { - evalTime := baseTime.Add(duration) - group.Eval(context.TODO(), evalTime) - } - testFunc(testInput{ - restoreDuration: 25 * time.Minute, - alerts: []*Alert{}, - gracePeriod: true, - num: 2, - }) -} - -func TestStaleness(t *testing.T) { - st := teststorage.New(t) - defer st.Close() - engineOpts := promql.EngineOpts{ - Logger: nil, - Reg: nil, - MaxSamples: 10, - Timeout: 10 * time.Second, - } - engine := promql.NewEngine(engineOpts) - opts := &ManagerOptions{ - QueryFunc: EngineQueryFunc(engine, st), - Appendable: st, - Queryable: st, - Context: context.Background(), - Logger: log.NewNopLogger(), - } - - expr, err := parser.ParseExpr("a + 1") - require.NoError(t, err) - rule := NewRecordingRule("a_plus_one", expr, labels.Labels{}) - group := NewGroup(GroupOptions{ - Name: "default", - Interval: time.Second, - Rules: []Rule{rule}, - ShouldRestore: true, - Opts: opts, - }) + // A time series that has two samples and then goes stale. + app := st.Appender(context.Background()) + app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 0, 1) + app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2) + app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 2000, math.Float64frombits(value.StaleNaN)) - // A time series that has two samples and then goes stale. - app := st.Appender(context.Background()) - app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 0, 1) - app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2) - app.Append(0, labels.FromStrings(model.MetricNameLabel, "a"), 2000, math.Float64frombits(value.StaleNaN)) + err = app.Commit() + require.NoError(t, err) - err = app.Commit() - require.NoError(t, err) + ctx := context.Background() - ctx := context.Background() + // Execute 3 times, 1 second apart. + group.Eval(ctx, time.Unix(0, 0).Add(queryOffset)) + group.Eval(ctx, time.Unix(1, 0).Add(queryOffset)) + group.Eval(ctx, time.Unix(2, 0).Add(queryOffset)) - // Execute 3 times, 1 second apart. - group.Eval(ctx, time.Unix(0, 0)) - group.Eval(ctx, time.Unix(1, 0)) - group.Eval(ctx, time.Unix(2, 0)) + querier, err := st.Querier(0, 2000) + require.NoError(t, err) + defer querier.Close() - querier, err := st.Querier(0, 2000) - require.NoError(t, err) - defer querier.Close() + matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one") + require.NoError(t, err) - matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one") - require.NoError(t, err) + set := querier.Select(ctx, false, nil, matcher) + samples, err := readSeriesSet(set) + require.NoError(t, err) - set := querier.Select(ctx, false, nil, matcher) - samples, err := readSeriesSet(set) - require.NoError(t, err) + metric := labels.FromStrings(model.MetricNameLabel, "a_plus_one").String() + metricSample, ok := samples[metric] - metric := labels.FromStrings(model.MetricNameLabel, "a_plus_one").String() - metricSample, ok := samples[metric] + require.True(t, ok, "Series %s not returned.", metric) + require.True(t, value.IsStaleNaN(metricSample[2].F), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].F)) + metricSample[2].F = 42 // require.Equal cannot handle NaN. - require.True(t, ok, "Series %s not returned.", metric) - require.True(t, value.IsStaleNaN(metricSample[2].F), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].F)) - metricSample[2].F = 42 // require.Equal cannot handle NaN. + want := map[string][]promql.FPoint{ + metric: {{T: 0, F: 2}, {T: 1000, F: 3}, {T: 2000, F: 42}}, + } - want := map[string][]promql.FPoint{ - metric: {{T: 0, F: 2}, {T: 1000, F: 3}, {T: 2000, F: 42}}, + require.Equal(t, want, samples) } - - require.Equal(t, want, samples) } // Convert a SeriesSet into a form usable with require.Equal. @@ -606,6 +623,46 @@ func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.FPoint, error) { return result, ss.Err() } +func TestGroup_QueryOffset(t *testing.T) { + config := ` +groups: + - name: group1 + query_offset: 2m + - name: group2 + query_offset: 0s + - name: group3 +` + + dir := t.TempDir() + fname := path.Join(dir, "rules.yaml") + err := os.WriteFile(fname, []byte(config), fs.ModePerm) + require.NoError(t, err) + + m := NewManager(&ManagerOptions{ + Logger: log.NewNopLogger(), + DefaultRuleQueryOffset: func() time.Duration { + return time.Minute + }, + }) + m.start() + err = m.Update(time.Second, []string{fname}, labels.EmptyLabels(), "", nil) + require.NoError(t, err) + + rgs := m.RuleGroups() + sort.Slice(rgs, func(i, j int) bool { + return rgs[i].Name() < rgs[j].Name() + }) + + // From config. + require.Equal(t, 2*time.Minute, rgs[0].QueryOffset()) + // Setting 0 in config is detected. + require.Equal(t, time.Duration(0), rgs[1].QueryOffset()) + // Default when nothing is set. + require.Equal(t, time.Minute, rgs[2].QueryOffset()) + + m.Stop() +} + func TestCopyState(t *testing.T) { oldGroup := &Group{ rules: []Rule{ @@ -1231,7 +1288,7 @@ func TestRuleHealthUpdates(t *testing.T) { } func TestRuleGroupEvalIterationFunc(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 5m http_requests{instance="0"} 75 85 50 0 0 25 0 0 40 0 120 `) @@ -1359,7 +1416,7 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) { ts := time.Now() app := db.Appender(context.Background()) for i, h := range hists { - l := labels.FromStrings("__name__", "histogram_metric", "idx", fmt.Sprintf("%d", i)) + l := labels.FromStrings("__name__", "histogram_metric", "idx", strconv.Itoa(i)) _, err := app.AppendHistogram(0, l, ts.UnixMilli(), h.Copy(), nil) require.NoError(t, err) } @@ -2041,7 +2098,7 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) { require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount)) } -const artificialDelay = 10 * time.Millisecond +const artificialDelay = 15 * time.Millisecond func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions { var inflightMu sync.Mutex diff --git a/rules/origin_test.go b/rules/origin_test.go index ca466301ddb..75c83f9a4e9 100644 --- a/rules/origin_test.go +++ b/rules/origin_test.go @@ -31,7 +31,7 @@ type unknownRule struct{} func (u unknownRule) Name() string { return "" } func (u unknownRule) Labels() labels.Labels { return labels.EmptyLabels() } -func (u unknownRule) Eval(context.Context, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) { +func (u unknownRule) Eval(context.Context, time.Duration, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) { return nil, nil } func (u unknownRule) String() string { return "" } diff --git a/rules/recording.go b/rules/recording.go index e2b0a31a032..17a75fdd1a3 100644 --- a/rules/recording.go +++ b/rules/recording.go @@ -77,10 +77,9 @@ func (rule *RecordingRule) Labels() labels.Labels { } // Eval evaluates the rule and then overrides the metric names and labels accordingly. -func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, _ *url.URL, limit int) (promql.Vector, error) { +func (rule *RecordingRule) Eval(ctx context.Context, queryOffset time.Duration, ts time.Time, query QueryFunc, _ *url.URL, limit int) (promql.Vector, error) { ctx = NewOriginContext(ctx, NewRuleDetail(rule)) - - vector, err := query(ctx, rule.vector.String(), ts) + vector, err := query(ctx, rule.vector.String(), ts.Add(-queryOffset)) if err != nil { return nil, err } diff --git a/rules/recording_test.go b/rules/recording_test.go index 24b7d653902..fdddd4e02e0 100644 --- a/rules/recording_test.go +++ b/rules/recording_test.go @@ -24,6 +24,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" ) @@ -111,7 +112,7 @@ var ruleEvalTestScenarios = []struct { } func setUpRuleEvalTest(t require.TestingT) *teststorage.TestStorage { - return promql.LoadedStorage(t, ` + return promqltest.LoadedStorage(t, ` load 1m metric{label_a="1",label_b="3"} 1 metric{label_a="2",label_b="4"} 10 @@ -125,7 +126,7 @@ func TestRuleEval(t *testing.T) { for _, scenario := range ruleEvalTestScenarios { t.Run(scenario.name, func(t *testing.T) { rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels) - result, err := rule.Eval(context.TODO(), ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0) + result, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0) require.NoError(t, err) testutil.RequireEqual(t, scenario.expected, result) }) @@ -143,7 +144,7 @@ func BenchmarkRuleEval(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := rule.Eval(context.TODO(), ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0) + _, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0) if err != nil { require.NoError(b, err) } @@ -172,13 +173,13 @@ func TestRuleEvalDuplicate(t *testing.T) { expr, _ := parser.ParseExpr(`vector(0) or label_replace(vector(0),"test","x","","")`) rule := NewRecordingRule("foo", expr, labels.FromStrings("test", "test")) - _, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, 0) + _, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0) require.Error(t, err) require.EqualError(t, err, "vector contains metrics with the same labelset after applying rule labels") } func TestRecordingRuleLimit(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m metric{label="1"} 1 metric{label="2"} 1 @@ -214,7 +215,7 @@ func TestRecordingRuleLimit(t *testing.T) { evalTime := time.Unix(0, 0) for _, test := range tests { - switch _, err := rule.Eval(context.TODO(), evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); { + switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); { case err != nil: require.EqualError(t, err, test.err) case test.err != "": @@ -242,7 +243,7 @@ func TestRecordingEvalWithOrigin(t *testing.T) { require.NoError(t, err) rule := NewRecordingRule(name, expr, lbs) - _, err = rule.Eval(ctx, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) { + _, err = rule.Eval(ctx, 0, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) { detail = FromOriginContext(ctx) return nil, nil }, nil, 0) diff --git a/rules/rule.go b/rules/rule.go index 59af3e0bbaa..687c03d000d 100644 --- a/rules/rule.go +++ b/rules/rule.go @@ -40,7 +40,7 @@ type Rule interface { // Labels of the rule. Labels() labels.Labels // Eval evaluates the rule, including any associated recording or alerting actions. - Eval(context.Context, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error) + Eval(ctx context.Context, queryOffset time.Duration, evaluationTime time.Time, queryFunc QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) // String returns a human-readable string representation of the rule. String() string // Query returns the rule query expression. diff --git a/scrape/manager.go b/scrape/manager.go index a7a8b828e59..cb92db5a8c0 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -81,6 +81,8 @@ type Options struct { // Option to enable the ingestion of the created timestamp as a synthetic zero sample. // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md EnableCreatedTimestampZeroIngestion bool + // Option to enable the ingestion of native histograms. + EnableNativeHistogramsIngestion bool // Optional HTTP client options to use when scraping. HTTPClientOptions []config_util.HTTPClientOption diff --git a/scrape/scrape.go b/scrape/scrape.go index 4bbeab57a76..c285f05e36a 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -178,6 +178,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed opts.interval, opts.timeout, opts.scrapeClassicHistograms, + options.EnableNativeHistogramsIngestion, options.EnableCreatedTimestampZeroIngestion, options.ExtraMetrics, options.EnableMetadataStorage, @@ -827,7 +828,10 @@ type scrapeLoop struct { interval time.Duration timeout time.Duration scrapeClassicHistograms bool - enableCTZeroIngestion bool + + // Feature flagged options. + enableNativeHistogramIngestion bool + enableCTZeroIngestion bool appender func(ctx context.Context) storage.Appender symbolTable *labels.SymbolTable @@ -1123,6 +1127,7 @@ func newScrapeLoop(ctx context.Context, interval time.Duration, timeout time.Duration, scrapeClassicHistograms bool, + enableNativeHistogramIngestion bool, enableCTZeroIngestion bool, reportExtraMetrics bool, appendMetadataToWAL bool, @@ -1153,33 +1158,34 @@ func newScrapeLoop(ctx context.Context, } sl := &scrapeLoop{ - scraper: sc, - buffers: buffers, - cache: cache, - appender: appender, - symbolTable: symbolTable, - sampleMutator: sampleMutator, - reportSampleMutator: reportSampleMutator, - stopped: make(chan struct{}), - offsetSeed: offsetSeed, - l: l, - parentCtx: ctx, - appenderCtx: appenderCtx, - honorTimestamps: honorTimestamps, - trackTimestampsStaleness: trackTimestampsStaleness, - enableCompression: enableCompression, - sampleLimit: sampleLimit, - bucketLimit: bucketLimit, - maxSchema: maxSchema, - labelLimits: labelLimits, - interval: interval, - timeout: timeout, - scrapeClassicHistograms: scrapeClassicHistograms, - enableCTZeroIngestion: enableCTZeroIngestion, - reportExtraMetrics: reportExtraMetrics, - appendMetadataToWAL: appendMetadataToWAL, - metrics: metrics, - skipOffsetting: skipOffsetting, + scraper: sc, + buffers: buffers, + cache: cache, + appender: appender, + symbolTable: symbolTable, + sampleMutator: sampleMutator, + reportSampleMutator: reportSampleMutator, + stopped: make(chan struct{}), + offsetSeed: offsetSeed, + l: l, + parentCtx: ctx, + appenderCtx: appenderCtx, + honorTimestamps: honorTimestamps, + trackTimestampsStaleness: trackTimestampsStaleness, + enableCompression: enableCompression, + sampleLimit: sampleLimit, + bucketLimit: bucketLimit, + maxSchema: maxSchema, + labelLimits: labelLimits, + interval: interval, + timeout: timeout, + scrapeClassicHistograms: scrapeClassicHistograms, + enableNativeHistogramIngestion: enableNativeHistogramIngestion, + enableCTZeroIngestion: enableCTZeroIngestion, + reportExtraMetrics: reportExtraMetrics, + appendMetadataToWAL: appendMetadataToWAL, + metrics: metrics, + skipOffsetting: skipOffsetting, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1627,7 +1633,7 @@ loop: } } - if isHistogram { + if isHistogram && sl.enableNativeHistogramIngestion { if h != nil { ref, err = app.AppendHistogram(ref, lset, t, h, nil) } else { diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 20b21936b9c..b5a31cb650b 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -678,6 +678,7 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app false, false, false, + false, nil, false, newTestScrapeMetrics(t), @@ -819,6 +820,7 @@ func TestScrapeLoopRun(t *testing.T) { false, false, false, + false, nil, false, scrapeMetrics, @@ -962,6 +964,7 @@ func TestScrapeLoopMetadata(t *testing.T) { false, false, false, + false, nil, false, scrapeMetrics, @@ -1282,7 +1285,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { for i := 0; i < 500; i++ { s = fmt.Sprintf("%smetric_%d_%d 42\n", s, i, numScrapes) } - w.Write([]byte(fmt.Sprintf(s + "&"))) + w.Write([]byte(s + "&")) } else { cancel() } @@ -1571,6 +1574,7 @@ func TestScrapeLoop_HistogramBucketLimit(t *testing.T) { app := &bucketLimitAppender{Appender: resApp, limit: 2} sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.enableNativeHistogramIngestion = true sl.sampleMutator = func(l labels.Labels) labels.Labels { if l.Has("deleteme") { return labels.EmptyLabels() @@ -1797,14 +1801,15 @@ func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) { func TestScrapeLoopAppendExemplar(t *testing.T) { tests := []struct { - title string - scrapeClassicHistograms bool - scrapeText string - contentType string - discoveryLabels []string - floats []floatSample - histograms []histogramSample - exemplars []exemplar.Exemplar + title string + scrapeClassicHistograms bool + enableNativeHistogramsIngestion bool + scrapeText string + contentType string + discoveryLabels []string + floats []floatSample + histograms []histogramSample + exemplars []exemplar.Exemplar }{ { title: "Metric without exemplars", @@ -1862,6 +1867,8 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000 }, { title: "Native histogram with three exemplars", + + enableNativeHistogramsIngestion: true, scrapeText: `name: "test_histogram" help: "Test histogram with many buckets removed to keep it manageable in size." type: HISTOGRAM @@ -1976,6 +1983,8 @@ metric: < }, { title: "Native histogram with three exemplars scraped as classic histogram", + + enableNativeHistogramsIngestion: true, scrapeText: `name: "test_histogram" help: "Test histogram with many buckets removed to keep it manageable in size." type: HISTOGRAM @@ -2115,6 +2124,7 @@ metric: < } sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return app }, 0) + sl.enableNativeHistogramIngestion = test.enableNativeHistogramsIngestion sl.sampleMutator = func(l labels.Labels) labels.Labels { return mutateSampleLabels(l, discoveryLabels, false, nil) } @@ -3710,7 +3720,7 @@ scrape_configs: s.DB.EnableNativeHistograms() reg := prometheus.NewRegistry() - mng, err := NewManager(nil, nil, s, reg) + mng, err := NewManager(&Options{EnableNativeHistogramsIngestion: true}, nil, s, reg) require.NoError(t, err) cfg, err := config.Load(configStr, false, log.NewNopLogger()) require.NoError(t, err) diff --git a/scrape/target_test.go b/scrape/target_test.go index f91e31050a8..b43ff240638 100644 --- a/scrape/target_test.go +++ b/scrape/target_test.go @@ -21,6 +21,7 @@ import ( "net/http/httptest" "net/url" "os" + "strconv" "strings" "testing" "time" @@ -67,7 +68,7 @@ func TestTargetOffset(t *testing.T) { // Calculate offsets for 10000 different targets. for i := range offsets { target := newTestTarget("example.com:80", 0, labels.FromStrings( - "label", fmt.Sprintf("%d", i), + "label", strconv.Itoa(i), )) offsets[i] = target.offset(interval, offsetSeed) } diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index a7a40c1be56..8de7af6394b 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -24,15 +24,16 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: install Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - name: Install Go + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: go-version: 1.22.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0 + uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 with: - version: v1.56.2 + args: --verbose + version: v1.59.0 diff --git a/storage/errors.go b/storage/errors.go new file mode 100644 index 00000000000..eff70f678d0 --- /dev/null +++ b/storage/errors.go @@ -0,0 +1,48 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import "fmt" + +type errDuplicateSampleForTimestamp struct { + timestamp int64 + existing float64 + newValue float64 +} + +func NewDuplicateFloatErr(t int64, existing, newValue float64) error { + return errDuplicateSampleForTimestamp{ + timestamp: t, + existing: existing, + newValue: newValue, + } +} + +func (e errDuplicateSampleForTimestamp) Error() string { + if e.timestamp == 0 { + return "duplicate sample for timestamp" + } + return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing %g, new value %g", e.timestamp, e.existing, e.newValue) +} + +// Every errDuplicateSampleForTimestamp compares equal to the global ErrDuplicateSampleForTimestamp. +func (e errDuplicateSampleForTimestamp) Is(t error) bool { + if t == ErrDuplicateSampleForTimestamp { + return true + } + if v, ok := t.(errDuplicateSampleForTimestamp); ok { + return e == v + } + return false +} diff --git a/storage/interface.go b/storage/interface.go index 347e779b561..493c2d6893b 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -37,7 +37,7 @@ var ( // ErrTooOldSample is when out of order support is enabled but the sample is outside the time window allowed. ErrTooOldSample = errors.New("too old sample") // ErrDuplicateSampleForTimestamp is when the sample has same timestamp but different value. - ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp") + ErrDuplicateSampleForTimestamp = errDuplicateSampleForTimestamp{} ErrOutOfOrderExemplar = errors.New("out of order exemplar") ErrDuplicateExemplar = errors.New("duplicate exemplar") ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) diff --git a/storage/remote/azuread/azuread.go b/storage/remote/azuread/azuread.go index e2058fb54de..58520c6a5dd 100644 --- a/storage/remote/azuread/azuread.go +++ b/storage/remote/azuread/azuread.go @@ -75,7 +75,7 @@ type AzureADConfig struct { //nolint:revive // exported. // OAuth is the oauth config that is being used to authenticate. OAuth *OAuthConfig `yaml:"oauth,omitempty"` - // OAuth is the oauth config that is being used to authenticate. + // SDK is the SDK config that is being used to authenticate. SDK *SDKConfig `yaml:"sdk,omitempty"` // Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina. diff --git a/storage/remote/otlptranslator/README.md b/storage/remote/otlptranslator/README.md deleted file mode 100644 index 774fac5a7f9..00000000000 --- a/storage/remote/otlptranslator/README.md +++ /dev/null @@ -1,22 +0,0 @@ -## Copying from opentelemetry/opentelemetry-collector-contrib - -This files in the `prometheus/` and `prometheusremotewrite/` are copied from the OpenTelemetry Project[^1]. - -This is done instead of adding a go.mod dependency because OpenTelemetry depends on `prometheus/prometheus` and a cyclic dependency will be created. This is just a temporary solution and the long-term solution is to move the required packages from OpenTelemetry into `prometheus/prometheus`. - -To update the dependency is a multi-step process: -1. Vendor the latest `prometheus/prometheus`@`main` into [`opentelemetry/opentelemetry-collector-contrib`](https://github.com/open-telemetry/opentelemetry-collector-contrib) -1. Update the VERSION in `update-copy.sh`. -1. Run `./update-copy.sh`. - -### Why copy? - -This is because the packages we copy depend on the [`prompb`](https://github.com/prometheus/prometheus/blob/main/prompb) package. While the package is relatively stable, there are still changes. For example, https://github.com/prometheus/prometheus/pull/11935 changed the types. -This means if we depend on the upstream packages directly, we will never able to make the changes like above. Hence we're copying the code for now. - -### I need to manually change these files - -When we do want to make changes to the types in `prompb`, we might need to edit the files directly. That is OK, please let @gouthamve or @jesusvazquez know so they can take care of updating the upstream code (by vendoring in `prometheus/prometheus` upstream and resolving conflicts) and then will run the copy -script again to keep things updated. - -[^1]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus and https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheusremotewrite diff --git a/storage/remote/otlptranslator/prometheus/normalize_label.go b/storage/remote/otlptranslator/prometheus/normalize_label.go index a6b41d1c37b..6360aa9765e 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -1,22 +1,24 @@ -// DO NOT EDIT. COPIED AS-IS. SEE ../README.md - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. -package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" +package prometheus import ( "strings" "unicode" - - "go.opentelemetry.io/collector/featuregate" -) - -var dropSanitizationGate = featuregate.GlobalRegistry().MustRegister( - "pkg.translator.prometheus.PermissiveLabelSanitization", - featuregate.StageAlpha, - featuregate.WithRegisterDescription("Controls whether to change labels starting with '_' to 'key_'."), - featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8950"), ) // Normalizes the specified label to follow Prometheus label names standard @@ -39,7 +41,7 @@ func NormalizeLabel(label string) string { // If label starts with a number, prepend with "key_" if unicode.IsDigit(rune(label[0])) { label = "key_" + label - } else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") && !dropSanitizationGate.IsEnabled() { + } else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") { label = "key" + label } diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index a976dfb4852..4cf36671aac 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -1,15 +1,25 @@ -// DO NOT EDIT. COPIED AS-IS. SEE ../README.md - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. -package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" +package prometheus import ( "strings" "unicode" - "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/pmetric" ) @@ -67,13 +77,6 @@ var perUnitMap = map[string]string{ "y": "year", } -var normalizeNameGate = featuregate.GlobalRegistry().MustRegister( - "pkg.translator.prometheus.NormalizeName", - featuregate.StageBeta, - featuregate.WithRegisterDescription("Controls whether metrics names are automatically normalized to follow Prometheus naming convention"), - featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8950"), -) - // BuildCompliantName builds a Prometheus-compliant metric name for the specified metric // // Metric name is prefixed with specified namespace and underscore (if any). @@ -86,7 +89,7 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix var metricName string // Full normalization following standard Prometheus naming conventions - if addMetricSuffixes && normalizeNameGate.IsEnabled() { + if addMetricSuffixes { return normalizeName(metric, namespace) } diff --git a/storage/remote/otlptranslator/prometheus/unit_to_ucum.go b/storage/remote/otlptranslator/prometheus/unit_to_ucum.go index 718a5206757..1f8bf1a6380 100644 --- a/storage/remote/otlptranslator/prometheus/unit_to_ucum.go +++ b/storage/remote/otlptranslator/prometheus/unit_to_ucum.go @@ -1,9 +1,20 @@ -// DO NOT EDIT. COPIED AS-IS. SEE ../README.md +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/unit_to_ucum.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" +package prometheus import "strings" diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 817cbaba7de..68be82e4435 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -1,29 +1,42 @@ -// DO NOT EDIT. COPIED AS-IS. SEE ../README.md - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/helper.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. -package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite" +package prometheusremotewrite import ( "encoding/hex" "fmt" "log" "math" + "slices" "sort" "strconv" - "strings" "time" "unicode/utf8" + "github.com/cespare/xxhash/v2" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/timestamp" - "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/prompb" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/prompb" + prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" ) @@ -48,7 +61,7 @@ const ( ) type bucketBoundsData struct { - sig string + ts *prompb.TimeSeries bound float64 } @@ -66,94 +79,47 @@ func (a ByLabelName) Len() int { return len(a) } func (a ByLabelName) Less(i, j int) bool { return a[i].Name < a[j].Name } func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -// addSample finds a TimeSeries in tsMap that corresponds to the label set labels, and add sample to the TimeSeries; it -// creates a new TimeSeries in the map if not found and returns the time series signature. -// tsMap will be unmodified if either labels or sample is nil, but can still be modified if the exemplar is nil. -func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label, - datatype string) string { - if sample == nil || labels == nil || tsMap == nil { - // This shouldn't happen - return "" - } +// timeSeriesSignature returns a hashed label set signature. +// The label slice should not contain duplicate label names; this method sorts the slice by label name before creating +// the signature. +// The algorithm is the same as in Prometheus' labels.StableHash function. +func timeSeriesSignature(labels []prompb.Label) uint64 { + sort.Sort(ByLabelName(labels)) - sig := timeSeriesSignature(datatype, labels) - ts := tsMap[sig] - if ts != nil { - ts.Samples = append(ts.Samples, *sample) - } else { - newTs := &prompb.TimeSeries{ - Labels: labels, - Samples: []prompb.Sample{*sample}, + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for i, v := range labels { + if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { + // If labels entry is 1KB+ do not allocate whole entry. + h := xxhash.New() + _, _ = h.Write(b) + for _, v := range labels[i:] { + _, _ = h.WriteString(v.Name) + _, _ = h.Write(seps) + _, _ = h.WriteString(v.Value) + _, _ = h.Write(seps) + } + return h.Sum64() } - tsMap[sig] = newTs - } - - return sig -} - -// addExemplars finds a bucket bound that corresponds to the exemplars value and add the exemplar to the specific sig; -// we only add exemplars if samples are presents -// tsMap is unmodified if either of its parameters is nil and samples are nil. -func addExemplars(tsMap map[string]*prompb.TimeSeries, exemplars []prompb.Exemplar, bucketBoundsData []bucketBoundsData) { - if len(tsMap) == 0 || len(bucketBoundsData) == 0 || len(exemplars) == 0 { - return - } - sort.Sort(byBucketBoundsData(bucketBoundsData)) - - for _, exemplar := range exemplars { - addExemplar(tsMap, bucketBoundsData, exemplar) + b = append(b, v.Name...) + b = append(b, seps[0]) + b = append(b, v.Value...) + b = append(b, seps[0]) } + return xxhash.Sum64(b) } -func addExemplar(tsMap map[string]*prompb.TimeSeries, bucketBounds []bucketBoundsData, exemplar prompb.Exemplar) { - for _, bucketBound := range bucketBounds { - sig := bucketBound.sig - bound := bucketBound.bound - - ts := tsMap[sig] - if ts != nil && len(ts.Samples) > 0 && exemplar.Value <= bound { - ts.Exemplars = append(ts.Exemplars, exemplar) - return - } - } -} - -// timeSeries return a string signature in the form of: -// -// TYPE-label1-value1- ... -labelN-valueN -// -// the label slice should not contain duplicate label names; this method sorts the slice by label name before creating -// the signature. -func timeSeriesSignature(datatype string, labels []prompb.Label) string { - length := len(datatype) - - for _, lb := range labels { - length += 2 + len(lb.GetName()) + len(lb.GetValue()) - } - - b := strings.Builder{} - b.Grow(length) - b.WriteString(datatype) - - sort.Sort(ByLabelName(labels)) - - for _, lb := range labels { - b.WriteString("-") - b.WriteString(lb.GetName()) - b.WriteString("-") - b.WriteString(lb.GetValue()) - } - - return b.String() -} +var seps = []byte{'\xff'} // createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values. -// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen, and overwrites are -// logged. Resulting label names are sanitized. -func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, extras ...string) []prompb.Label { - serviceName, haveServiceName := resource.Attributes().Get(conventions.AttributeServiceName) - instance, haveInstanceID := resource.Attributes().Get(conventions.AttributeServiceInstanceID) +// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and +// if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized. +func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, + ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label { + resourceAttrs := resource.Attributes() + serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName) + instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID) // Calculate the maximum possible number of labels we could return so we can preallocate l maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2 @@ -171,9 +137,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa // Ensure attributes are sorted by key for consistent merging of keys which // collide when sanitized. - labels := make([]prompb.Label, 0, attributes.Len()) + labels := make([]prompb.Label, 0, maxLabelCount) + // XXX: Should we always drop service namespace/service name/service instance ID from the labels + // (as they get mapped to other Prometheus labels)? attributes.Range(func(key string, value pcommon.Value) bool { - labels = append(labels, prompb.Label{Name: key, Value: value.AsString()}) + if !slices.Contains(ignoreAttrs, key) { + labels = append(labels, prompb.Label{Name: key, Value: value.AsString()}) + } return true }) sort.Stable(ByLabelName(labels)) @@ -190,7 +160,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa // Map service.name + service.namespace to job if haveServiceName { val := serviceName.AsString() - if serviceNamespace, ok := resource.Attributes().Get(conventions.AttributeServiceNamespace); ok { + if serviceNamespace, ok := resourceAttrs.Get(conventions.AttributeServiceNamespace); ok { val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val) } l[model.JobLabel] = val @@ -213,7 +183,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa break } _, found := l[extras[i]] - if found { + if found && logOnOverwrite { log.Println("label " + extras[i] + " is overwritten. Check if Prometheus reserved labels are used.") } // internal labels should be maintained @@ -224,12 +194,12 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa l[name] = extras[i+1] } - s := make([]prompb.Label, 0, len(l)) + labels = labels[:0] for k, v := range l { - s = append(s, prompb.Label{Name: k, Value: v}) + labels = append(labels, prompb.Label{Name: k, Value: v}) } - return s + return labels } // isValidAggregationTemporality checks whether an OTel metric has a valid @@ -249,100 +219,84 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool { return false } -// addSingleHistogramDataPoint converts pt to 2 + min(len(ExplicitBounds), len(BucketCount)) + 1 samples. It -// ignore extra buckets if len(ExplicitBounds) > len(BucketCounts) -func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries, baseName string) { - timestamp := convertTimeStamp(pt.Timestamp()) - baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels) +func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.HistogramDataPointSlice, + resource pcommon.Resource, settings Settings, baseName string) { + for x := 0; x < dataPoints.Len(); x++ { + pt := dataPoints.At(x) + timestamp := convertTimeStamp(pt.Timestamp()) + baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false) + + // If the sum is unset, it indicates the _sum metric point should be + // omitted + if pt.HasSum() { + // treat sum as a sample in an individual TimeSeries + sum := &prompb.Sample{ + Value: pt.Sum(), + Timestamp: timestamp, + } + if pt.Flags().NoRecordedValue() { + sum.Value = math.Float64frombits(value.StaleNaN) + } - createLabels := func(nameSuffix string, extras ...string) []prompb.Label { - extraLabelCount := len(extras) / 2 - labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name - copy(labels, baseLabels) + sumlabels := createLabels(baseName+sumStr, baseLabels) + c.addSample(sum, sumlabels) - for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ { - labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]}) } - // sum, count, and buckets of the histogram should append suffix to baseName - labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: baseName + nameSuffix}) - - return labels - } - - // If the sum is unset, it indicates the _sum metric point should be - // omitted - if pt.HasSum() { - // treat sum as a sample in an individual TimeSeries - sum := &prompb.Sample{ - Value: pt.Sum(), + // treat count as a sample in an individual TimeSeries + count := &prompb.Sample{ + Value: float64(pt.Count()), Timestamp: timestamp, } if pt.Flags().NoRecordedValue() { - sum.Value = math.Float64frombits(value.StaleNaN) + count.Value = math.Float64frombits(value.StaleNaN) } - sumlabels := createLabels(sumStr) - addSample(tsMap, sum, sumlabels, metric.Type().String()) - - } - - // treat count as a sample in an individual TimeSeries - count := &prompb.Sample{ - Value: float64(pt.Count()), - Timestamp: timestamp, - } - if pt.Flags().NoRecordedValue() { - count.Value = math.Float64frombits(value.StaleNaN) - } - - countlabels := createLabels(countStr) - addSample(tsMap, count, countlabels, metric.Type().String()) + countlabels := createLabels(baseName+countStr, baseLabels) + c.addSample(count, countlabels) - // cumulative count for conversion to cumulative histogram - var cumulativeCount uint64 + // cumulative count for conversion to cumulative histogram + var cumulativeCount uint64 - promExemplars := getPromExemplars[pmetric.HistogramDataPoint](pt) + var bucketBounds []bucketBoundsData - var bucketBounds []bucketBoundsData + // process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1 + for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ { + bound := pt.ExplicitBounds().At(i) + cumulativeCount += pt.BucketCounts().At(i) + bucket := &prompb.Sample{ + Value: float64(cumulativeCount), + Timestamp: timestamp, + } + if pt.Flags().NoRecordedValue() { + bucket.Value = math.Float64frombits(value.StaleNaN) + } + boundStr := strconv.FormatFloat(bound, 'f', -1, 64) + labels := createLabels(baseName+bucketStr, baseLabels, leStr, boundStr) + ts := c.addSample(bucket, labels) - // process each bound, based on histograms proto definition, # of buckets = # of explicit bounds + 1 - for i := 0; i < pt.ExplicitBounds().Len() && i < pt.BucketCounts().Len(); i++ { - bound := pt.ExplicitBounds().At(i) - cumulativeCount += pt.BucketCounts().At(i) - bucket := &prompb.Sample{ - Value: float64(cumulativeCount), + bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: bound}) + } + // add le=+Inf bucket + infBucket := &prompb.Sample{ Timestamp: timestamp, } if pt.Flags().NoRecordedValue() { - bucket.Value = math.Float64frombits(value.StaleNaN) + infBucket.Value = math.Float64frombits(value.StaleNaN) + } else { + infBucket.Value = float64(pt.Count()) } - boundStr := strconv.FormatFloat(bound, 'f', -1, 64) - labels := createLabels(bucketStr, leStr, boundStr) - sig := addSample(tsMap, bucket, labels, metric.Type().String()) - - bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: bound}) - } - // add le=+Inf bucket - infBucket := &prompb.Sample{ - Timestamp: timestamp, - } - if pt.Flags().NoRecordedValue() { - infBucket.Value = math.Float64frombits(value.StaleNaN) - } else { - infBucket.Value = float64(pt.Count()) - } - infLabels := createLabels(bucketStr, leStr, pInfStr) - sig := addSample(tsMap, infBucket, infLabels, metric.Type().String()) + infLabels := createLabels(baseName+bucketStr, baseLabels, leStr, pInfStr) + ts := c.addSample(infBucket, infLabels) - bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: math.Inf(1)}) - addExemplars(tsMap, promExemplars, bucketBounds) + bucketBounds = append(bucketBounds, bucketBoundsData{ts: ts, bound: math.Inf(1)}) + c.addExemplars(pt, bucketBounds) - // add _created time series if needed - startTimestamp := pt.StartTimestamp() - if settings.ExportCreatedMetric && startTimestamp != 0 { - labels := createLabels(createdSuffix) - addCreatedTimeSeriesIfNeeded(tsMap, labels, startTimestamp, pt.Timestamp(), metric.Type().String()) + startTimestamp := pt.StartTimestamp() + if settings.ExportCreatedMetric && startTimestamp != 0 { + labels := createLabels(baseName+createdSuffix, baseLabels) + c.addTimeSeriesIfNeeded(labels, startTimestamp, pt.Timestamp()) + } } } @@ -415,162 +369,203 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { case pmetric.MetricTypeGauge: dataPoints := metric.Gauge().DataPoints() for x := 0; x < dataPoints.Len(); x++ { - ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + ts = max(ts, dataPoints.At(x).Timestamp()) } case pmetric.MetricTypeSum: dataPoints := metric.Sum().DataPoints() for x := 0; x < dataPoints.Len(); x++ { - ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + ts = max(ts, dataPoints.At(x).Timestamp()) } case pmetric.MetricTypeHistogram: dataPoints := metric.Histogram().DataPoints() for x := 0; x < dataPoints.Len(); x++ { - ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + ts = max(ts, dataPoints.At(x).Timestamp()) } case pmetric.MetricTypeExponentialHistogram: dataPoints := metric.ExponentialHistogram().DataPoints() for x := 0; x < dataPoints.Len(); x++ { - ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + ts = max(ts, dataPoints.At(x).Timestamp()) } case pmetric.MetricTypeSummary: dataPoints := metric.Summary().DataPoints() for x := 0; x < dataPoints.Len(); x++ { - ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + ts = max(ts, dataPoints.At(x).Timestamp()) } } return ts } -func maxTimestamp(a, b pcommon.Timestamp) pcommon.Timestamp { - if a > b { - return a - } - return b -} +func (c *PrometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, + settings Settings, baseName string) { + for x := 0; x < dataPoints.Len(); x++ { + pt := dataPoints.At(x) + timestamp := convertTimeStamp(pt.Timestamp()) + baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false) -// addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples. -func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, - tsMap map[string]*prompb.TimeSeries, baseName string) { - timestamp := convertTimeStamp(pt.Timestamp()) - baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels) + // treat sum as a sample in an individual TimeSeries + sum := &prompb.Sample{ + Value: pt.Sum(), + Timestamp: timestamp, + } + if pt.Flags().NoRecordedValue() { + sum.Value = math.Float64frombits(value.StaleNaN) + } + // sum and count of the summary should append suffix to baseName + sumlabels := createLabels(baseName+sumStr, baseLabels) + c.addSample(sum, sumlabels) - createLabels := func(name string, extras ...string) []prompb.Label { - extraLabelCount := len(extras) / 2 - labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name - copy(labels, baseLabels) + // treat count as a sample in an individual TimeSeries + count := &prompb.Sample{ + Value: float64(pt.Count()), + Timestamp: timestamp, + } + if pt.Flags().NoRecordedValue() { + count.Value = math.Float64frombits(value.StaleNaN) + } + countlabels := createLabels(baseName+countStr, baseLabels) + c.addSample(count, countlabels) + + // process each percentile/quantile + for i := 0; i < pt.QuantileValues().Len(); i++ { + qt := pt.QuantileValues().At(i) + quantile := &prompb.Sample{ + Value: qt.Value(), + Timestamp: timestamp, + } + if pt.Flags().NoRecordedValue() { + quantile.Value = math.Float64frombits(value.StaleNaN) + } + percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) + qtlabels := createLabels(baseName, baseLabels, quantileStr, percentileStr) + c.addSample(quantile, qtlabels) + } - for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ { - labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]}) + startTimestamp := pt.StartTimestamp() + if settings.ExportCreatedMetric && startTimestamp != 0 { + createdLabels := createLabels(baseName+createdSuffix, baseLabels) + c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp()) } + } +} - labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: name}) +// createLabels returns a copy of baseLabels, adding to it the pair model.MetricNameLabel=name. +// If extras are provided, corresponding label pairs are also added to the returned slice. +// If extras is uneven length, the last (unpaired) extra will be ignored. +func createLabels(name string, baseLabels []prompb.Label, extras ...string) []prompb.Label { + extraLabelCount := len(extras) / 2 + labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name + copy(labels, baseLabels) - return labels + n := len(extras) + n -= n % 2 + for extrasIdx := 0; extrasIdx < n; extrasIdx += 2 { + labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]}) } - // treat sum as a sample in an individual TimeSeries - sum := &prompb.Sample{ - Value: pt.Sum(), - Timestamp: timestamp, - } - if pt.Flags().NoRecordedValue() { - sum.Value = math.Float64frombits(value.StaleNaN) - } - // sum and count of the summary should append suffix to baseName - sumlabels := createLabels(baseName + sumStr) - addSample(tsMap, sum, sumlabels, metric.Type().String()) - - // treat count as a sample in an individual TimeSeries - count := &prompb.Sample{ - Value: float64(pt.Count()), - Timestamp: timestamp, - } - if pt.Flags().NoRecordedValue() { - count.Value = math.Float64frombits(value.StaleNaN) - } - countlabels := createLabels(baseName + countStr) - addSample(tsMap, count, countlabels, metric.Type().String()) - - // process each percentile/quantile - for i := 0; i < pt.QuantileValues().Len(); i++ { - qt := pt.QuantileValues().At(i) - quantile := &prompb.Sample{ - Value: qt.Value(), - Timestamp: timestamp, + labels = append(labels, prompb.Label{Name: model.MetricNameLabel, Value: name}) + return labels +} + +// getOrCreateTimeSeries returns the time series corresponding to the label set if existent, and false. +// Otherwise it creates a new one and returns that, and true. +func (c *PrometheusConverter) getOrCreateTimeSeries(lbls []prompb.Label) (*prompb.TimeSeries, bool) { + h := timeSeriesSignature(lbls) + ts := c.unique[h] + if ts != nil { + if isSameMetric(ts, lbls) { + // We already have this metric + return ts, false } - if pt.Flags().NoRecordedValue() { - quantile.Value = math.Float64frombits(value.StaleNaN) + + // Look for a matching conflict + for _, cTS := range c.conflicts[h] { + if isSameMetric(cTS, lbls) { + // We already have this metric + return cTS, false + } } - percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) - qtlabels := createLabels(baseName, quantileStr, percentileStr) - addSample(tsMap, quantile, qtlabels, metric.Type().String()) + + // New conflict + ts = &prompb.TimeSeries{ + Labels: lbls, + } + c.conflicts[h] = append(c.conflicts[h], ts) + return ts, true } - // add _created time series if needed - startTimestamp := pt.StartTimestamp() - if settings.ExportCreatedMetric && startTimestamp != 0 { - createdLabels := createLabels(baseName + createdSuffix) - addCreatedTimeSeriesIfNeeded(tsMap, createdLabels, startTimestamp, pt.Timestamp(), metric.Type().String()) + // This metric is new + ts = &prompb.TimeSeries{ + Labels: lbls, } + c.unique[h] = ts + return ts, true } -// addCreatedTimeSeriesIfNeeded adds {name}_created time series with a single -// sample. If the series exists, then new samples won't be added. -func addCreatedTimeSeriesIfNeeded( - series map[string]*prompb.TimeSeries, - labels []prompb.Label, - startTimestamp pcommon.Timestamp, - timestamp pcommon.Timestamp, - metricType string, -) { - sig := timeSeriesSignature(metricType, labels) - if _, ok := series[sig]; !ok { - series[sig] = &prompb.TimeSeries{ - Labels: labels, - Samples: []prompb.Sample{ - { // convert ns to ms - Value: float64(convertTimeStamp(startTimestamp)), - Timestamp: convertTimeStamp(timestamp), - }, +// addTimeSeriesIfNeeded adds a corresponding time series if it doesn't already exist. +// If the time series doesn't already exist, it gets added with startTimestamp for its value and timestamp for its timestamp, +// both converted to milliseconds. +func (c *PrometheusConverter) addTimeSeriesIfNeeded(lbls []prompb.Label, startTimestamp pcommon.Timestamp, timestamp pcommon.Timestamp) { + ts, created := c.getOrCreateTimeSeries(lbls) + if created { + ts.Samples = []prompb.Sample{ + { + // convert ns to ms + Value: float64(convertTimeStamp(startTimestamp)), + Timestamp: convertTimeStamp(timestamp), }, } } } -// addResourceTargetInfo converts the resource to the target info metric -func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timestamp pcommon.Timestamp, tsMap map[string]*prompb.TimeSeries) { - if settings.DisableTargetInfo { +// addResourceTargetInfo converts the resource to the target info metric. +func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timestamp pcommon.Timestamp, converter *PrometheusConverter) { + if settings.DisableTargetInfo || timestamp == 0 { return } - // Use resource attributes (other than those used for job+instance) as the - // metric labels for the target info metric - attributes := pcommon.NewMap() - resource.Attributes().CopyTo(attributes) - attributes.RemoveIf(func(k string, _ pcommon.Value) bool { - switch k { - case conventions.AttributeServiceName, conventions.AttributeServiceNamespace, conventions.AttributeServiceInstanceID: - // Remove resource attributes used for job + instance - return true - default: - return false + + attributes := resource.Attributes() + identifyingAttrs := []string{ + conventions.AttributeServiceNamespace, + conventions.AttributeServiceName, + conventions.AttributeServiceInstanceID, + } + nonIdentifyingAttrsCount := attributes.Len() + for _, a := range identifyingAttrs { + _, haveAttr := attributes.Get(a) + if haveAttr { + nonIdentifyingAttrsCount-- } - }) - if attributes.Len() == 0 { + } + if nonIdentifyingAttrsCount == 0 { // If we only have job + instance, then target_info isn't useful, so don't add it. return } - // create parameters for addSample + name := targetMetricName if len(settings.Namespace) > 0 { name = settings.Namespace + "_" + name } - labels := createAttributes(resource, attributes, settings.ExternalLabels, model.MetricNameLabel, name) + + labels := createAttributes(resource, attributes, settings.ExternalLabels, identifyingAttrs, false, model.MetricNameLabel, name) + haveIdentifier := false + for _, l := range labels { + if l.Name == model.JobLabel || l.Name == model.InstanceLabel { + haveIdentifier = true + break + } + } + + if !haveIdentifier { + // We need at least one identifying label to generate target_info. + return + } + sample := &prompb.Sample{ Value: float64(1), // convert ns to ms Timestamp: convertTimeStamp(timestamp), } - addSample(tsMap, sample, labels, infoType) + converter.addSample(sample, labels) } // convertTimeStamp converts OTLP timestamp in ns to timestamp in ms diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index 14cea32c374..31d343fe4d8 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -1,58 +1,59 @@ -// DO NOT EDIT. COPIED AS-IS. SEE ../README.md - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/histograms.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. -package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite" +package prometheusremotewrite import ( "fmt" "math" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/prompb" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/prompb" ) const defaultZeroThreshold = 1e-128 -func addSingleExponentialHistogramDataPoint( - metric string, - pt pmetric.ExponentialHistogramDataPoint, - resource pcommon.Resource, - settings Settings, - series map[string]*prompb.TimeSeries, -) error { - labels := createAttributes( - resource, - pt.Attributes(), - settings.ExternalLabels, - model.MetricNameLabel, - metric, - ) - - sig := timeSeriesSignature( - pmetric.MetricTypeExponentialHistogram.String(), - labels, - ) - ts, ok := series[sig] - if !ok { - ts = &prompb.TimeSeries{ - Labels: labels, +func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice, + resource pcommon.Resource, settings Settings, baseName string) error { + for x := 0; x < dataPoints.Len(); x++ { + pt := dataPoints.At(x) + lbls := createAttributes( + resource, + pt.Attributes(), + settings.ExternalLabels, + nil, + true, + model.MetricNameLabel, + baseName, + ) + ts, _ := c.getOrCreateTimeSeries(lbls) + + histogram, err := exponentialToNativeHistogram(pt) + if err != nil { + return err } - series[sig] = ts - } + ts.Histograms = append(ts.Histograms, histogram) - histogram, err := exponentialToNativeHistogram(pt) - if err != nil { - return err + exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt) + ts.Exemplars = append(ts.Exemplars, exemplars...) } - ts.Histograms = append(ts.Histograms, histogram) - - exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt) - ts.Exemplars = append(ts.Exemplars, exemplars...) return nil } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index fb141034ad0..65dac99c50c 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -1,19 +1,31 @@ -// DO NOT EDIT. COPIED AS-IS. SEE ../README.md +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/metrics_to_prw.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite" +package prometheusremotewrite import ( "errors" "fmt" + "sort" - "github.com/prometheus/prometheus/prompb" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" + "github.com/prometheus/prometheus/prompb" prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" ) @@ -26,10 +38,21 @@ type Settings struct { SendMetadata bool } -// FromMetrics converts pmetric.Metrics to Prometheus remote write format. -func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*prompb.TimeSeries, errs error) { - tsMap = make(map[string]*prompb.TimeSeries) +// PrometheusConverter converts from OTel write format to Prometheus remote write format. +type PrometheusConverter struct { + unique map[uint64]*prompb.TimeSeries + conflicts map[uint64][]*prompb.TimeSeries +} + +func NewPrometheusConverter() *PrometheusConverter { + return &PrometheusConverter{ + unique: map[uint64]*prompb.TimeSeries{}, + conflicts: map[uint64][]*prompb.TimeSeries{}, + } +} +// FromMetrics converts pmetric.Metrics to Prometheus remote write format. +func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings) (errs error) { resourceMetricsSlice := md.ResourceMetrics() for i := 0; i < resourceMetricsSlice.Len(); i++ { resourceMetrics := resourceMetricsSlice.At(i) @@ -39,13 +62,12 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp // use with the "target" info metric var mostRecentTimestamp pcommon.Timestamp for j := 0; j < scopeMetricsSlice.Len(); j++ { - scopeMetrics := scopeMetricsSlice.At(j) - metricSlice := scopeMetrics.Metrics() + metricSlice := scopeMetricsSlice.At(j).Metrics() // TODO: decide if instrumentation library information should be exported as labels for k := 0; k < metricSlice.Len(); k++ { metric := metricSlice.At(k) - mostRecentTimestamp = maxTimestamp(mostRecentTimestamp, mostRecentTimestampInMetric(metric)) + mostRecentTimestamp = max(mostRecentTimestamp, mostRecentTimestampInMetric(metric)) if !isValidAggregationTemporality(metric) { errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name())) @@ -54,65 +76,106 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) - // handle individual metric based on type + // handle individual metrics based on type //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge: dataPoints := metric.Gauge().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) + break } - for x := 0; x < dataPoints.Len(); x++ { - addSingleGaugeNumberDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName) - } + c.addGaugeNumberDataPoints(dataPoints, resource, settings, promName) case pmetric.MetricTypeSum: dataPoints := metric.Sum().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) + break } - for x := 0; x < dataPoints.Len(); x++ { - addSingleSumNumberDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName) - } + c.addSumNumberDataPoints(dataPoints, resource, metric, settings, promName) case pmetric.MetricTypeHistogram: dataPoints := metric.Histogram().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) + break } - for x := 0; x < dataPoints.Len(); x++ { - addSingleHistogramDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName) - } + c.addHistogramDataPoints(dataPoints, resource, settings, promName) case pmetric.MetricTypeExponentialHistogram: dataPoints := metric.ExponentialHistogram().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) + break } - for x := 0; x < dataPoints.Len(); x++ { - errs = multierr.Append( - errs, - addSingleExponentialHistogramDataPoint( - promName, - dataPoints.At(x), - resource, - settings, - tsMap, - ), - ) - } + errs = multierr.Append(errs, c.addExponentialHistogramDataPoints( + dataPoints, + resource, + settings, + promName, + )) case pmetric.MetricTypeSummary: dataPoints := metric.Summary().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) + break } - for x := 0; x < dataPoints.Len(); x++ { - addSingleSummaryDataPoint(dataPoints.At(x), resource, metric, settings, tsMap, promName) - } + c.addSummaryDataPoints(dataPoints, resource, settings, promName) default: errs = multierr.Append(errs, errors.New("unsupported metric type")) } } } - addResourceTargetInfo(resource, settings, mostRecentTimestamp, tsMap) + addResourceTargetInfo(resource, settings, mostRecentTimestamp, c) } return } + +func isSameMetric(ts *prompb.TimeSeries, lbls []prompb.Label) bool { + if len(ts.Labels) != len(lbls) { + return false + } + for i, l := range ts.Labels { + if l.Name != ts.Labels[i].Name || l.Value != ts.Labels[i].Value { + return false + } + } + return true +} + +// addExemplars adds exemplars for the dataPoint. For each exemplar, if it can find a bucket bound corresponding to its value, +// the exemplar is added to the bucket bound's time series, provided that the time series' has samples. +func (c *PrometheusConverter) addExemplars(dataPoint pmetric.HistogramDataPoint, bucketBounds []bucketBoundsData) { + if len(bucketBounds) == 0 { + return + } + + exemplars := getPromExemplars(dataPoint) + if len(exemplars) == 0 { + return + } + + sort.Sort(byBucketBoundsData(bucketBounds)) + for _, exemplar := range exemplars { + for _, bound := range bucketBounds { + if len(bound.ts.Samples) > 0 && exemplar.Value <= bound.bound { + bound.ts.Exemplars = append(bound.ts.Exemplars, exemplar) + break + } + } + } +} + +// addSample finds a TimeSeries that corresponds to lbls, and adds sample to it. +// If there is no corresponding TimeSeries already, it's created. +// The corresponding TimeSeries is returned. +// If either lbls is nil/empty or sample is nil, nothing is done. +func (c *PrometheusConverter) addSample(sample *prompb.Sample, lbls []prompb.Label) *prompb.TimeSeries { + if sample == nil || len(lbls) == 0 { + // This shouldn't happen + return nil + } + + ts, _ := c.getOrCreateTimeSeries(lbls) + ts.Samples = append(ts.Samples, *sample) + return ts +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go new file mode 100644 index 00000000000..37ac6777473 --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go @@ -0,0 +1,134 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/metrics_to_prw_test.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. + +package prometheusremotewrite + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" +) + +func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { + for _, resourceAttributeCount := range []int{0, 5, 50} { + b.Run(fmt.Sprintf("resource attribute count: %v", resourceAttributeCount), func(b *testing.B) { + for _, histogramCount := range []int{0, 1000} { + b.Run(fmt.Sprintf("histogram count: %v", histogramCount), func(b *testing.B) { + nonHistogramCounts := []int{0, 1000} + + if resourceAttributeCount == 0 && histogramCount == 0 { + // Don't bother running a scenario where we'll generate no series. + nonHistogramCounts = []int{1000} + } + + for _, nonHistogramCount := range nonHistogramCounts { + b.Run(fmt.Sprintf("non-histogram count: %v", nonHistogramCount), func(b *testing.B) { + for _, labelsPerMetric := range []int{2, 20} { + b.Run(fmt.Sprintf("labels per metric: %v", labelsPerMetric), func(b *testing.B) { + for _, exemplarsPerSeries := range []int{0, 5, 10} { + b.Run(fmt.Sprintf("exemplars per series: %v", exemplarsPerSeries), func(b *testing.B) { + payload := createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCount, labelsPerMetric, exemplarsPerSeries) + + for i := 0; i < b.N; i++ { + converter := NewPrometheusConverter() + require.NoError(b, converter.FromMetrics(payload.Metrics(), Settings{})) + require.NotNil(b, converter.TimeSeries()) + } + }) + } + }) + } + }) + } + }) + } + }) + } +} + +func createExportRequest(resourceAttributeCount, histogramCount, nonHistogramCount, labelsPerMetric, exemplarsPerSeries int) pmetricotlp.ExportRequest { + request := pmetricotlp.NewExportRequest() + + rm := request.Metrics().ResourceMetrics().AppendEmpty() + generateAttributes(rm.Resource().Attributes(), "resource", resourceAttributeCount) + + metrics := rm.ScopeMetrics().AppendEmpty().Metrics() + ts := pcommon.NewTimestampFromTime(time.Now()) + + for i := 1; i <= histogramCount; i++ { + m := metrics.AppendEmpty() + m.SetEmptyHistogram() + m.SetName(fmt.Sprintf("histogram-%v", i)) + m.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + h := m.Histogram().DataPoints().AppendEmpty() + h.SetTimestamp(ts) + + // Set 50 samples, 10 each with values 0.5, 1, 2, 4, and 8 + h.SetCount(50) + h.SetSum(155) + h.BucketCounts().FromRaw([]uint64{10, 10, 10, 10, 10, 0}) + h.ExplicitBounds().FromRaw([]float64{.5, 1, 2, 4, 8, 16}) // Bucket boundaries include the upper limit (ie. each sample is on the upper limit of its bucket) + + generateAttributes(h.Attributes(), "series", labelsPerMetric) + generateExemplars(h.Exemplars(), exemplarsPerSeries, ts) + } + + for i := 1; i <= nonHistogramCount; i++ { + m := metrics.AppendEmpty() + m.SetEmptySum() + m.SetName(fmt.Sprintf("sum-%v", i)) + m.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + point := m.Sum().DataPoints().AppendEmpty() + point.SetTimestamp(ts) + point.SetDoubleValue(1.23) + generateAttributes(point.Attributes(), "series", labelsPerMetric) + generateExemplars(point.Exemplars(), exemplarsPerSeries, ts) + } + + for i := 1; i <= nonHistogramCount; i++ { + m := metrics.AppendEmpty() + m.SetEmptyGauge() + m.SetName(fmt.Sprintf("gauge-%v", i)) + point := m.Gauge().DataPoints().AppendEmpty() + point.SetTimestamp(ts) + point.SetDoubleValue(1.23) + generateAttributes(point.Attributes(), "series", labelsPerMetric) + generateExemplars(point.Exemplars(), exemplarsPerSeries, ts) + } + + return request +} + +func generateAttributes(m pcommon.Map, prefix string, count int) { + for i := 1; i <= count; i++ { + m.PutStr(fmt.Sprintf("%v-name-%v", prefix, i), fmt.Sprintf("value-%v", i)) + } +} + +func generateExemplars(exemplars pmetric.ExemplarSlice, count int, ts pcommon.Timestamp) { + for i := 1; i <= count; i++ { + e := exemplars.AppendEmpty() + e.SetTimestamp(ts) + e.SetDoubleValue(2.22) + e.SetSpanID(pcommon.SpanID{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}) + e.SetTraceID(pcommon.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f}) + } +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index b5bd8765fe8..aafebc6c46a 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -1,106 +1,110 @@ -// DO NOT EDIT. COPIED AS-IS. SEE ../README.md +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/number_data_points.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite" +package prometheusremotewrite import ( "math" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/value" - "github.com/prometheus/prometheus/prompb" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/prompb" ) -// addSingleGaugeNumberDataPoint converts the Gauge metric data point to a -// Prometheus time series with samples and labels. The result is stored in the -// series map. -func addSingleGaugeNumberDataPoint( - pt pmetric.NumberDataPoint, - resource pcommon.Resource, - metric pmetric.Metric, - settings Settings, - series map[string]*prompb.TimeSeries, - name string, -) { - labels := createAttributes( - resource, - pt.Attributes(), - settings.ExternalLabels, - model.MetricNameLabel, - name, - ) - sample := &prompb.Sample{ - // convert ns to ms - Timestamp: convertTimeStamp(pt.Timestamp()), - } - switch pt.ValueType() { - case pmetric.NumberDataPointValueTypeInt: - sample.Value = float64(pt.IntValue()) - case pmetric.NumberDataPointValueTypeDouble: - sample.Value = pt.DoubleValue() - } - if pt.Flags().NoRecordedValue() { - sample.Value = math.Float64frombits(value.StaleNaN) +func (c *PrometheusConverter) addGaugeNumberDataPoints(dataPoints pmetric.NumberDataPointSlice, + resource pcommon.Resource, settings Settings, name string) { + for x := 0; x < dataPoints.Len(); x++ { + pt := dataPoints.At(x) + labels := createAttributes( + resource, + pt.Attributes(), + settings.ExternalLabels, + nil, + true, + model.MetricNameLabel, + name, + ) + sample := &prompb.Sample{ + // convert ns to ms + Timestamp: convertTimeStamp(pt.Timestamp()), + } + switch pt.ValueType() { + case pmetric.NumberDataPointValueTypeInt: + sample.Value = float64(pt.IntValue()) + case pmetric.NumberDataPointValueTypeDouble: + sample.Value = pt.DoubleValue() + } + if pt.Flags().NoRecordedValue() { + sample.Value = math.Float64frombits(value.StaleNaN) + } + c.addSample(sample, labels) } - addSample(series, sample, labels, metric.Type().String()) } -// addSingleSumNumberDataPoint converts the Sum metric data point to a Prometheus -// time series with samples, labels and exemplars. The result is stored in the -// series map. -func addSingleSumNumberDataPoint( - pt pmetric.NumberDataPoint, - resource pcommon.Resource, - metric pmetric.Metric, - settings Settings, - series map[string]*prompb.TimeSeries, - name string, -) { - labels := createAttributes( - resource, - pt.Attributes(), - settings.ExternalLabels, - model.MetricNameLabel, name, - ) - sample := &prompb.Sample{ - // convert ns to ms - Timestamp: convertTimeStamp(pt.Timestamp()), - } - switch pt.ValueType() { - case pmetric.NumberDataPointValueTypeInt: - sample.Value = float64(pt.IntValue()) - case pmetric.NumberDataPointValueTypeDouble: - sample.Value = pt.DoubleValue() - } - if pt.Flags().NoRecordedValue() { - sample.Value = math.Float64frombits(value.StaleNaN) - } - sig := addSample(series, sample, labels, metric.Type().String()) - - if ts := series[sig]; sig != "" && ts != nil { - exemplars := getPromExemplars[pmetric.NumberDataPoint](pt) - ts.Exemplars = append(ts.Exemplars, exemplars...) - } - - // add _created time series if needed - if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() { - startTimestamp := pt.StartTimestamp() - if startTimestamp == 0 { - return +func (c *PrometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDataPointSlice, + resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string) { + for x := 0; x < dataPoints.Len(); x++ { + pt := dataPoints.At(x) + lbls := createAttributes( + resource, + pt.Attributes(), + settings.ExternalLabels, + nil, + true, + model.MetricNameLabel, + name, + ) + sample := &prompb.Sample{ + // convert ns to ms + Timestamp: convertTimeStamp(pt.Timestamp()), } + switch pt.ValueType() { + case pmetric.NumberDataPointValueTypeInt: + sample.Value = float64(pt.IntValue()) + case pmetric.NumberDataPointValueTypeDouble: + sample.Value = pt.DoubleValue() + } + if pt.Flags().NoRecordedValue() { + sample.Value = math.Float64frombits(value.StaleNaN) + } + ts := c.addSample(sample, lbls) + if ts != nil { + exemplars := getPromExemplars[pmetric.NumberDataPoint](pt) + ts.Exemplars = append(ts.Exemplars, exemplars...) + } + + // add created time series if needed + if settings.ExportCreatedMetric && metric.Sum().IsMonotonic() { + startTimestamp := pt.StartTimestamp() + if startTimestamp == 0 { + return + } - createdLabels := make([]prompb.Label, len(labels)) - copy(createdLabels, labels) - for i, l := range createdLabels { - if l.Name == model.MetricNameLabel { - createdLabels[i].Value = name + createdSuffix - break + createdLabels := make([]prompb.Label, len(lbls)) + copy(createdLabels, lbls) + for i, l := range createdLabels { + if l.Name == model.MetricNameLabel { + createdLabels[i].Value = name + createdSuffix + break + } } + c.addTimeSeriesIfNeeded(createdLabels, startTimestamp, pt.Timestamp()) } - addCreatedTimeSeriesIfNeeded(series, createdLabels, startTimestamp, pt.Timestamp(), metric.Type().String()) } } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go index e43797212e3..ba487041930 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/otlp_to_openmetrics_metadata.go @@ -1,14 +1,25 @@ -// DO NOT EDIT. COPIED AS-IS. SEE ../README.md +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package prometheusremotewrite // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite" +package prometheusremotewrite import ( - "github.com/prometheus/prometheus/prompb" "go.opentelemetry.io/collector/pdata/pmetric" + "github.com/prometheus/prometheus/prompb" prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" ) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go b/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go new file mode 100644 index 00000000000..fe973761a25 --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/timeseries.go @@ -0,0 +1,41 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: +// https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/metrics_to_prw.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. + +package prometheusremotewrite + +import ( + "github.com/prometheus/prometheus/prompb" +) + +// TimeSeries returns a slice of the prompb.TimeSeries that were converted from OTel format. +func (c *PrometheusConverter) TimeSeries() []prompb.TimeSeries { + conflicts := 0 + for _, ts := range c.conflicts { + conflicts += len(ts) + } + allTS := make([]prompb.TimeSeries, 0, len(c.unique)+conflicts) + for _, ts := range c.unique { + allTS = append(allTS, *ts) + } + for _, cTS := range c.conflicts { + for _, ts := range cTS { + allTS = append(allTS, *ts) + } + } + + return allTS +} diff --git a/storage/remote/otlptranslator/update-copy.sh b/storage/remote/otlptranslator/update-copy.sh deleted file mode 100755 index 8aa645e0bd7..00000000000 --- a/storage/remote/otlptranslator/update-copy.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -set -xe - -OTEL_VERSION=v0.95.0 - -git clone https://github.com/open-telemetry/opentelemetry-collector-contrib ./tmp -cd ./tmp -git checkout $OTEL_VERSION -cd .. - -rm -rf ./prometheusremotewrite/* -cp -r ./tmp/pkg/translator/prometheusremotewrite/*.go ./prometheusremotewrite -rm -rf ./prometheusremotewrite/*_test.go - -rm -rf ./prometheus/* -cp -r ./tmp/pkg/translator/prometheus/*.go ./prometheus -rm -rf ./prometheus/*_test.go - -rm -rf ./tmp - -case $(sed --help 2>&1) in - *GNU*) set sed -i;; - *) set sed -i '';; -esac - -"$@" -e 's#github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus#github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus#g' ./prometheusremotewrite/*.go ./prometheus/*.go -"$@" -e '1s#^#// DO NOT EDIT. COPIED AS-IS. SEE ../README.md\n\n#g' ./prometheusremotewrite/*.go ./prometheus/*.go diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index e32a3ace051..6121fb6c033 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -468,7 +468,7 @@ func TestReleaseNoninternedString(t *testing.T) { m.StoreSeries([]record.RefSeries{ { Ref: chunks.HeadSeriesRef(i), - Labels: labels.FromStrings("asdf", fmt.Sprintf("%d", i)), + Labels: labels.FromStrings("asdf", strconv.Itoa(i)), }, }, 0) m.SeriesReset(1) diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index e8e0ecb8df4..452b2922109 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -30,14 +30,14 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" - "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/teststorage" ) func TestSampledReadEndpoint(t *testing.T) { - store := promql.LoadedStorage(t, ` + store := promqltest.LoadedStorage(t, ` load 1m test_metric1{foo="bar",baz="qux"} 1 `) @@ -132,7 +132,7 @@ func TestSampledReadEndpoint(t *testing.T) { } func BenchmarkStreamReadEndpoint(b *testing.B) { - store := promql.LoadedStorage(b, ` + store := promqltest.LoadedStorage(b, ` load 1m test_metric1{foo="bar1",baz="qux"} 0+100x119 test_metric1{foo="bar2",baz="qux"} 0+100x120 @@ -200,7 +200,7 @@ func TestStreamReadEndpoint(t *testing.T) { // Second with 121 float samples, We expect 1 frame with 2 chunks. // Third with 241 float samples. We expect 1 frame with 2 chunks, and 1 frame with 1 chunk for the same series due to bytes limit. // Fourth with 25 histogram samples. We expect 1 frame with 1 chunk. - store := promql.LoadedStorage(t, ` + store := promqltest.LoadedStorage(t, ` load 1m test_metric1{foo="bar1",baz="qux"} 0+100x119 test_metric1{foo="bar2",baz="qux"} 0+100x120 diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index bb6b8423a28..ff227292b8a 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -208,21 +208,15 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - prwMetricsMap, errs := otlptranslator.FromMetrics(req.Metrics(), otlptranslator.Settings{ + converter := otlptranslator.NewPrometheusConverter() + if err := converter.FromMetrics(req.Metrics(), otlptranslator.Settings{ AddMetricSuffixes: true, - }) - if errs != nil { - level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", errs) - } - - prwMetrics := make([]prompb.TimeSeries, 0, len(prwMetricsMap)) - - for _, ts := range prwMetricsMap { - prwMetrics = append(prwMetrics, *ts) + }); err != nil { + level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err) } err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{ - Timeseries: prwMetrics, + Timeseries: converter.TimeSeries(), }) switch { diff --git a/storage/series.go b/storage/series.go index eba11b4d9bc..70e3d0a1990 100644 --- a/storage/series.go +++ b/storage/series.go @@ -55,8 +55,8 @@ func NewListSeries(lset labels.Labels, s []chunks.Sample) *SeriesEntry { } } -// NewListChunkSeriesFromSamples returns chunk series entry that allows to iterate over provided samples. -// NOTE: It uses inefficient chunks encoding implementation, not caring about chunk size. +// NewListChunkSeriesFromSamples returns a chunk series entry that allows to iterate over provided samples. +// NOTE: It uses an inefficient chunks encoding implementation, not caring about chunk size. // Use only for testing. func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]chunks.Sample) *ChunkSeriesEntry { chksFromSamples := make([]chunks.Meta, 0, len(samples)) diff --git a/tracing/tracing.go b/tracing/tracing.go index 6a191373703..3b1c61532b4 100644 --- a/tracing/tracing.go +++ b/tracing/tracing.go @@ -30,7 +30,7 @@ import ( "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/noop" "google.golang.org/grpc/credentials" diff --git a/tsdb/agent/series_test.go b/tsdb/agent/series_test.go index ae327d8582c..bc5a4af5d33 100644 --- a/tsdb/agent/series_test.go +++ b/tsdb/agent/series_test.go @@ -14,8 +14,8 @@ package agent import ( - "fmt" "math" + "strconv" "sync" "testing" "time" @@ -53,7 +53,7 @@ func TestNoDeadlock(t *testing.T) { series := &memSeries{ ref: chunks.HeadSeriesRef(i), lset: labels.FromMap(map[string]string{ - "id": fmt.Sprintf("%d", i), + "id": strconv.Itoa(i), }), } stripeSeries.Set(series.lset.Hash(), series) diff --git a/tsdb/block.go b/tsdb/block.go index abd223e4ad5..83b86a58d16 100644 --- a/tsdb/block.go +++ b/tsdb/block.go @@ -77,6 +77,10 @@ type IndexReader interface { // during background garbage collections. Postings(ctx context.Context, name string, values ...string) (index.Postings, error) + // PostingsForLabelMatching returns a sorted iterator over postings having a label with the given name and a value for which match returns true. + // If no postings are found having at least one matching label, an empty iterator is returned. + PostingsForLabelMatching(ctx context.Context, name string, match func(value string) bool) index.Postings + // SortedPostings returns a postings list that is reordered to be sorted // by the label set of the underlying series. SortedPostings(index.Postings) index.Postings @@ -518,6 +522,10 @@ func (r blockIndexReader) Postings(ctx context.Context, name string, values ...s return p, nil } +func (r blockIndexReader) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) index.Postings { + return r.ir.PostingsForLabelMatching(ctx, name, match) +} + func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings { return r.ir.SortedPostings(p) } diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 6d15d1838b4..42acc3c6930 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -36,6 +36,7 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/wlog" ) @@ -509,6 +510,86 @@ func TestLabelNamesWithMatchers(t *testing.T) { } } +func TestBlockIndexReader_PostingsForLabelMatching(t *testing.T) { + testPostingsForLabelMatching(t, 2, func(t *testing.T, series []labels.Labels) IndexReader { + var seriesEntries []storage.Series + for _, s := range series { + seriesEntries = append(seriesEntries, storage.NewListSeries(s, []chunks.Sample{sample{100, 0, nil, nil}})) + } + + blockDir := createBlock(t, t.TempDir(), seriesEntries) + files, err := sequenceFiles(chunkDir(blockDir)) + require.NoError(t, err) + require.NotEmpty(t, files, "No chunk created.") + + block, err := OpenBlock(nil, blockDir, nil) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, block.Close()) }) + + ir, err := block.Index() + require.NoError(t, err) + return ir + }) +} + +func testPostingsForLabelMatching(t *testing.T, offset storage.SeriesRef, setUp func(*testing.T, []labels.Labels) IndexReader) { + t.Helper() + + ctx := context.Background() + series := []labels.Labels{ + labels.FromStrings("n", "1"), + labels.FromStrings("n", "1", "i", "a"), + labels.FromStrings("n", "1", "i", "b"), + labels.FromStrings("n", "2"), + labels.FromStrings("n", "2.5"), + } + ir := setUp(t, series) + t.Cleanup(func() { + require.NoError(t, ir.Close()) + }) + + testCases := []struct { + name string + labelName string + match func(string) bool + exp []storage.SeriesRef + }{ + { + name: "n=1", + labelName: "n", + match: func(val string) bool { + return val == "1" + }, + exp: []storage.SeriesRef{offset + 1, offset + 2, offset + 3}, + }, + { + name: "n=2", + labelName: "n", + match: func(val string) bool { + return val == "2" + }, + exp: []storage.SeriesRef{offset + 4}, + }, + { + name: "missing label", + labelName: "missing", + match: func(val string) bool { + return true + }, + exp: nil, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + p := ir.PostingsForLabelMatching(ctx, tc.labelName, tc.match) + require.NotNil(t, p) + srs, err := index.ExpandPostings(p) + require.NoError(t, err) + require.Equal(t, tc.exp, srs) + }) + } +} + // createBlock creates a block with given set of series and returns its dir. func createBlock(tb testing.TB, dir string, series []storage.Series) string { blockDir, err := CreateBlock(series, dir, 0, log.NewNopLogger()) diff --git a/tsdb/blockwriter.go b/tsdb/blockwriter.go index 73bc5f1e35b..32346d69d00 100644 --- a/tsdb/blockwriter.go +++ b/tsdb/blockwriter.go @@ -42,7 +42,7 @@ type BlockWriter struct { // ErrNoSeriesAppended is returned if the series count is zero while flushing blocks. var ErrNoSeriesAppended = errors.New("no series appended, aborting") -// NewBlockWriter create a new block writer. +// NewBlockWriter creates a new block writer. // // The returned writer accumulates all the series in the Head block until `Flush` is called. // diff --git a/tsdb/chunkenc/bstream.go b/tsdb/chunkenc/bstream.go index 7b17f4686b3..8cc59f3ea76 100644 --- a/tsdb/chunkenc/bstream.go +++ b/tsdb/chunkenc/bstream.go @@ -52,6 +52,12 @@ type bstream struct { count uint8 // How many right-most bits are available for writing in the current byte (the last byte of the stream). } +// Reset resets b around stream. +func (b *bstream) Reset(stream []byte) { + b.stream = stream + b.count = 0 +} + func (b *bstream) bytes() []byte { return b.stream } diff --git a/tsdb/chunkenc/bstream_test.go b/tsdb/chunkenc/bstream_test.go index 66a54bc8e9e..8ac45ef0b67 100644 --- a/tsdb/chunkenc/bstream_test.go +++ b/tsdb/chunkenc/bstream_test.go @@ -19,6 +19,19 @@ import ( "github.com/stretchr/testify/require" ) +func TestBstream_Reset(t *testing.T) { + bs := bstream{ + stream: []byte("test"), + count: 10, + } + bs.Reset([]byte("was reset")) + + require.Equal(t, bstream{ + stream: []byte("was reset"), + count: 0, + }, bs) +} + func TestBstreamReader(t *testing.T) { // Write to the bit stream. w := bstream{} diff --git a/tsdb/chunkenc/chunk.go b/tsdb/chunkenc/chunk.go index 21c41257b59..1421f3b3986 100644 --- a/tsdb/chunkenc/chunk.go +++ b/tsdb/chunkenc/chunk.go @@ -87,6 +87,9 @@ type Chunk interface { // There's no strong guarantee that no samples will be appended once // Compact() is called. Implementing this function is optional. Compact() + + // Reset resets the chunk given stream. + Reset(stream []byte) } type Iterable interface { @@ -303,64 +306,47 @@ func NewPool() Pool { } func (p *pool) Get(e Encoding, b []byte) (Chunk, error) { + var c Chunk switch e { case EncXOR: - c := p.xor.Get().(*XORChunk) - c.b.stream = b - c.b.count = 0 - return c, nil + c = p.xor.Get().(*XORChunk) case EncHistogram: - c := p.histogram.Get().(*HistogramChunk) - c.b.stream = b - c.b.count = 0 - return c, nil + c = p.histogram.Get().(*HistogramChunk) case EncFloatHistogram: - c := p.floatHistogram.Get().(*FloatHistogramChunk) - c.b.stream = b - c.b.count = 0 - return c, nil + c = p.floatHistogram.Get().(*FloatHistogramChunk) + default: + return nil, fmt.Errorf("invalid chunk encoding %q", e) } - return nil, fmt.Errorf("invalid chunk encoding %q", e) + + c.Reset(b) + return c, nil } func (p *pool) Put(c Chunk) error { + var sp *sync.Pool + var ok bool switch c.Encoding() { case EncXOR: - xc, ok := c.(*XORChunk) - // This may happen often with wrapped chunks. Nothing we can really do about - // it but returning an error would cause a lot of allocations again. Thus, - // we just skip it. - if !ok { - return nil - } - xc.b.stream = nil - xc.b.count = 0 - p.xor.Put(c) + _, ok = c.(*XORChunk) + sp = &p.xor case EncHistogram: - sh, ok := c.(*HistogramChunk) - // This may happen often with wrapped chunks. Nothing we can really do about - // it but returning an error would cause a lot of allocations again. Thus, - // we just skip it. - if !ok { - return nil - } - sh.b.stream = nil - sh.b.count = 0 - p.histogram.Put(c) + _, ok = c.(*HistogramChunk) + sp = &p.histogram case EncFloatHistogram: - sh, ok := c.(*FloatHistogramChunk) + _, ok = c.(*FloatHistogramChunk) + sp = &p.floatHistogram + default: + return fmt.Errorf("invalid chunk encoding %q", c.Encoding()) + } + if !ok { // This may happen often with wrapped chunks. Nothing we can really do about // it but returning an error would cause a lot of allocations again. Thus, // we just skip it. - if !ok { - return nil - } - sh.b.stream = nil - sh.b.count = 0 - p.floatHistogram.Put(c) - default: - return fmt.Errorf("invalid chunk encoding %q", c.Encoding()) + return nil } + + c.Reset(nil) + sp.Put(c) return nil } diff --git a/tsdb/chunkenc/chunk_test.go b/tsdb/chunkenc/chunk_test.go index 9db1bf364fe..b72492a08b3 100644 --- a/tsdb/chunkenc/chunk_test.go +++ b/tsdb/chunkenc/chunk_test.go @@ -110,6 +110,96 @@ func testChunk(t *testing.T, c Chunk) { require.Equal(t, ValNone, it3.Seek(exp[len(exp)-1].t+1)) } +func TestPool(t *testing.T) { + p := NewPool() + for _, tc := range []struct { + name string + encoding Encoding + expErr error + }{ + { + name: "xor", + encoding: EncXOR, + }, + { + name: "histogram", + encoding: EncHistogram, + }, + { + name: "float histogram", + encoding: EncFloatHistogram, + }, + { + name: "invalid encoding", + encoding: EncNone, + expErr: fmt.Errorf(`invalid chunk encoding "none"`), + }, + } { + t.Run(tc.name, func(t *testing.T) { + c, err := p.Get(tc.encoding, []byte("test")) + if tc.expErr != nil { + require.EqualError(t, err, tc.expErr.Error()) + return + } + + require.NoError(t, err) + + var b *bstream + switch tc.encoding { + case EncHistogram: + b = &c.(*HistogramChunk).b + case EncFloatHistogram: + b = &c.(*FloatHistogramChunk).b + default: + b = &c.(*XORChunk).b + } + + require.Equal(t, &bstream{ + stream: []byte("test"), + count: 0, + }, b) + + b.count = 1 + require.NoError(t, p.Put(c)) + require.Equal(t, &bstream{ + stream: nil, + count: 0, + }, b) + }) + } + + t.Run("put bad chunk wrapper", func(t *testing.T) { + // When a wrapping chunk poses as an encoding it can't be converted to, Put should skip it. + c := fakeChunk{ + encoding: EncXOR, + t: t, + } + require.NoError(t, p.Put(c)) + }) + t.Run("put invalid encoding", func(t *testing.T) { + c := fakeChunk{ + encoding: EncNone, + t: t, + } + require.EqualError(t, p.Put(c), `invalid chunk encoding "none"`) + }) +} + +type fakeChunk struct { + Chunk + + encoding Encoding + t *testing.T +} + +func (c fakeChunk) Encoding() Encoding { + return c.encoding +} + +func (c fakeChunk) Reset([]byte) { + c.t.Fatal("Reset should not be called") +} + func benchmarkIterator(b *testing.B, newChunk func() Chunk) { const samplesPerChunk = 250 var ( diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index 88d189254f7..1eed46ca87f 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -44,6 +44,10 @@ func NewFloatHistogramChunk() *FloatHistogramChunk { return &FloatHistogramChunk{b: bstream{stream: b, count: 0}} } +func (c *FloatHistogramChunk) Reset(stream []byte) { + c.b.Reset(stream) +} + // xorValue holds all the necessary information to encode // and decode XOR encoded float64 values. type xorValue struct { diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index cb09eda26d4..e12aec4dcd4 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -45,6 +45,10 @@ func NewHistogramChunk() *HistogramChunk { return &HistogramChunk{b: bstream{stream: b, count: 0}} } +func (c *HistogramChunk) Reset(stream []byte) { + c.b.Reset(stream) +} + // Encoding returns the encoding type. func (c *HistogramChunk) Encoding() Encoding { return EncHistogram diff --git a/tsdb/chunkenc/varbit.go b/tsdb/chunkenc/varbit.go index b43574dcb63..574edec48b3 100644 --- a/tsdb/chunkenc/varbit.go +++ b/tsdb/chunkenc/varbit.go @@ -61,7 +61,7 @@ func putVarbitInt(b *bstream, val int64) { } } -// readVarbitInt reads an int64 encoced with putVarbitInt. +// readVarbitInt reads an int64 encoded with putVarbitInt. func readVarbitInt(b *bstreamReader) (int64, error) { var d byte for i := 0; i < 8; i++ { @@ -166,7 +166,7 @@ func putVarbitUint(b *bstream, val uint64) { } } -// readVarbitUint reads a uint64 encoced with putVarbitUint. +// readVarbitUint reads a uint64 encoded with putVarbitUint. func readVarbitUint(b *bstreamReader) (uint64, error) { var d byte for i := 0; i < 8; i++ { diff --git a/tsdb/chunkenc/xor.go b/tsdb/chunkenc/xor.go index 07b92383157..9430de39648 100644 --- a/tsdb/chunkenc/xor.go +++ b/tsdb/chunkenc/xor.go @@ -66,6 +66,10 @@ func NewXORChunk() *XORChunk { return &XORChunk{b: bstream{stream: b, count: 0}} } +func (c *XORChunk) Reset(stream []byte) { + c.b.Reset(stream) +} + // Encoding returns the encoding type. func (c *XORChunk) Encoding() Encoding { return EncXOR @@ -171,7 +175,6 @@ func (a *xorAppender) Append(t int64, v float64) { } a.writeVDelta(v) - default: tDelta = uint64(t - a.t) dod := int64(tDelta - a.tDelta) diff --git a/tsdb/chunks/chunks.go b/tsdb/chunks/chunks.go index 0826f69670a..e7df0eeed29 100644 --- a/tsdb/chunks/chunks.go +++ b/tsdb/chunks/chunks.go @@ -233,7 +233,7 @@ func ChunkMetasToSamples(chunks []Meta) (result []Sample) { // Iterator iterates over the chunks of a single time series. type Iterator interface { // At returns the current meta. - // It depends on implementation if the chunk is populated or not. + // It depends on the implementation whether the chunk is populated or not. At() Meta // Next advances the iterator by one. Next() bool @@ -478,7 +478,7 @@ func (w *Writer) WriteChunks(chks ...Meta) error { // the batch is too large to fit in the current segment. cutNewBatch := (i != 0) && (batchSize+SegmentHeaderSize > w.segmentSize) - // When the segment already has some data than + // If the segment already has some data then // the first batch size calculation should account for that. if firstBatch && w.n > SegmentHeaderSize { cutNewBatch = batchSize+w.n > w.segmentSize @@ -717,7 +717,7 @@ func nextSequenceFile(dir string) (string, int, error) { } // It is not necessary that we find the files in number order, // for example with '1000000' and '200000', '1000000' would come first. - // Though this is a very very race case, we check anyway for the max id. + // Though this is a very very rare case, we check anyway for the max id. if j > i { i = j } diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index 087f25fbb38..6c8707c57b9 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -188,8 +188,8 @@ func (f *chunkPos) bytesToWriteForChunk(chkLen uint64) uint64 { return bytes } -// ChunkDiskMapper is for writing the Head block chunks to the disk -// and access chunks via mmapped file. +// ChunkDiskMapper is for writing the Head block chunks to disk +// and access chunks via mmapped files. type ChunkDiskMapper struct { /// Writer. dir *os.File @@ -231,7 +231,7 @@ type ChunkDiskMapper struct { closed bool } -// mmappedChunkFile provides mmapp access to an entire head chunks file that holds many chunks. +// mmappedChunkFile provides mmap access to an entire head chunks file that holds many chunks. type mmappedChunkFile struct { byteSlice ByteSlice maxt int64 // Max timestamp among all of this file's chunks. @@ -240,7 +240,7 @@ type mmappedChunkFile struct { // NewChunkDiskMapper returns a new ChunkDiskMapper against the given directory // using the default head chunk file duration. // NOTE: 'IterateAllChunks' method needs to be called at least once after creating ChunkDiskMapper -// to set the maxt of all the file. +// to set the maxt of all files. func NewChunkDiskMapper(reg prometheus.Registerer, dir string, pool chunkenc.Pool, writeBufferSize, writeQueueSize int) (*ChunkDiskMapper, error) { // Validate write buffer size. if writeBufferSize < MinWriteBufferSize || writeBufferSize > MaxWriteBufferSize { @@ -381,6 +381,33 @@ func listChunkFiles(dir string) (map[int]string, error) { return res, nil } +// HardLinkChunkFiles creates hardlinks for chunk files from src to dst. +// It does nothing if src doesn't exist and ensures dst is created if not. +func HardLinkChunkFiles(src, dst string) error { + _, err := os.Stat(src) + if os.IsNotExist(err) { + return nil + } + if err != nil { + return fmt.Errorf("check source chunks dir: %w", err) + } + if err := os.MkdirAll(dst, 0o777); err != nil { + return fmt.Errorf("set up destination chunks dir: %w", err) + } + files, err := listChunkFiles(src) + if err != nil { + return fmt.Errorf("list chunks: %w", err) + } + for _, filePath := range files { + _, fileName := filepath.Split(filePath) + err := os.Link(filepath.Join(src, fileName), filepath.Join(dst, fileName)) + if err != nil { + return fmt.Errorf("hardlink a chunk: %w", err) + } + } + return nil +} + // repairLastChunkFile deletes the last file if it's empty. // Because we don't fsync when creating these files, we could end // up with an empty file at the end during an abrupt shutdown. @@ -425,7 +452,7 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro return files, nil } -// WriteChunk writes the chunk to the disk. +// WriteChunk writes the chunk to disk. // The returned chunk ref is the reference from where the chunk encoding starts for the chunk. func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, isOOO bool, callback func(err error)) (chkRef ChunkDiskMapperRef) { // cdm.evtlPosMtx must be held to serialize the calls to cdm.evtlPos.getNextChunkRef() and the writing of the chunk (either with or without queue). @@ -784,7 +811,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error // IterateAllChunks iterates all mmappedChunkFiles (in order of head chunk file name/number) and all the chunks within it // and runs the provided function with information about each chunk. It returns on the first error encountered. // NOTE: This method needs to be called at least once after creating ChunkDiskMapper -// to set the maxt of all the file. +// to set the maxt of all files. func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding, isOOO bool) error) (err error) { cdm.writePathMtx.Lock() defer cdm.writePathMtx.Unlock() @@ -904,7 +931,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu return nil } -// Truncate deletes the head chunk files whose file number is less than given fileNo. +// Truncate deletes the head chunk files with numbers less than the given fileNo. func (cdm *ChunkDiskMapper) Truncate(fileNo uint32) error { cdm.readPathMtx.RLock() diff --git a/tsdb/chunks/queue_test.go b/tsdb/chunks/queue_test.go index 5756e45856d..9f761a5f3b9 100644 --- a/tsdb/chunks/queue_test.go +++ b/tsdb/chunks/queue_test.go @@ -55,7 +55,7 @@ func (q *writeJobQueue) assertInvariants(t *testing.T) { require.Len(t, s.segment, s.nextWrite) } // Last segment must have at least one element, or we wouldn't have created it. - require.Greater(t, s.nextWrite, 0) + require.Positive(t, s.nextWrite) } require.Equal(t, q.size, totalSize) diff --git a/tsdb/compact.go b/tsdb/compact.go index e09039cf33b..c2ae23b2e40 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -272,7 +272,7 @@ func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) { meta := dms[i].meta if meta.MaxTime-meta.MinTime < c.ranges[len(c.ranges)/2] { // If the block is entirely deleted, then we don't care about the block being big enough. - // TODO: This is assuming single tombstone is for distinct series, which might be no true. + // TODO: This is assuming a single tombstone is for a distinct series, which might not be true. if meta.Stats.NumTombstones > 0 && meta.Stats.NumTombstones >= meta.Stats.NumSeries { return []string{dms[i].dir}, nil } @@ -372,7 +372,7 @@ func splitByRange(ds []dirMeta, tr int64) [][]dirMeta { t0 = tr * ((m.MinTime - tr + 1) / tr) } // Skip blocks that don't fall into the range. This can happen via mis-alignment or - // by being the multiple of the intended range. + // by being a multiple of the intended range. if m.MaxTime > t0+tr { i++ continue @@ -395,7 +395,7 @@ func splitByRange(ds []dirMeta, tr int64) [][]dirMeta { return splitDirs } -// CompactBlockMetas merges many block metas into one, combining it's source blocks together +// CompactBlockMetas merges many block metas into one, combining its source blocks together // and adjusting compaction level. Min/Max time of result block meta covers all input blocks. func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { res := &BlockMeta{ @@ -833,7 +833,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa chksIter = s.Iterator(chksIter) chks = chks[:0] for chksIter.Next() { - // We are not iterating in streaming way over chunk as + // We are not iterating in a streaming way over chunks as // it's more efficient to do bulk write for index and // chunk file purposes. chks = append(chks, chksIter.At()) @@ -842,7 +842,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa return fmt.Errorf("chunk iter: %w", err) } - // Skip the series with all deleted chunks. + // Skip series with all deleted chunks. if len(chks) == 0 { continue } diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index b2d2ea6e7f1..7a353a556a4 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -22,6 +22,7 @@ import ( "os" "path" "path/filepath" + "strconv" "sync" "testing" "time" @@ -1129,7 +1130,7 @@ func BenchmarkCompactionFromHead(b *testing.B) { for ln := 0; ln < labelNames; ln++ { app := h.Appender(context.Background()) for lv := 0; lv < labelValues; lv++ { - app.Append(0, labels.FromStrings(fmt.Sprintf("%d", ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)), 0, 0) + app.Append(0, labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)), 0, 0) } require.NoError(b, app.Commit()) } @@ -1161,7 +1162,7 @@ func BenchmarkCompactionFromOOOHead(b *testing.B) { for ln := 0; ln < labelNames; ln++ { app := h.Appender(context.Background()) for lv := 0; lv < labelValues; lv++ { - lbls := labels.FromStrings(fmt.Sprintf("%d", ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)) + lbls := labels.FromStrings(strconv.Itoa(ln), fmt.Sprintf("%d%s%d", lv, postingsBenchSuffix, ln)) _, err = app.Append(0, lbls, int64(totalSamples), 0) require.NoError(b, err) for ts := 0; ts < totalSamples; ts++ { @@ -1297,7 +1298,7 @@ func TestCancelCompactions(t *testing.T) { // This checks that the `context.Canceled` error is properly checked at all levels: // - tsdb_errors.NewMulti() should have the Is() method implemented for correct checks. // - callers should check with errors.Is() instead of ==. - readOnlyDB, err := OpenDBReadOnly(tmpdirCopy, log.NewNopLogger()) + readOnlyDB, err := OpenDBReadOnly(tmpdirCopy, "", log.NewNopLogger()) require.NoError(t, err) blocks, err := readOnlyDB.Blocks() require.NoError(t, err) diff --git a/tsdb/db.go b/tsdb/db.go index 22292ab16ec..5651b403e5c 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -189,8 +189,13 @@ type Options struct { // EnableSharding enables query sharding support in TSDB. EnableSharding bool + + // NewCompactorFunc is a function that returns a TSDB compactor. + NewCompactorFunc NewCompactorFunc } +type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) + type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{} // DB handles reads and writes of time series falling into @@ -206,7 +211,7 @@ type DB struct { compactor Compactor blocksToDelete BlocksToDeleteFunc - // Mutex for that must be held when modifying the general block layout or lastGarbageCollectedMmapRef. + // mtx must be held when modifying the general block layout or lastGarbageCollectedMmapRef. mtx sync.RWMutex blocks []*Block @@ -383,26 +388,36 @@ var ErrClosed = errors.New("db already closed") // Current implementation doesn't support concurrency so // all API calls should happen in the same go routine. type DBReadOnly struct { - logger log.Logger - dir string - closers []io.Closer - closed chan struct{} + logger log.Logger + dir string + sandboxDir string + closers []io.Closer + closed chan struct{} } // OpenDBReadOnly opens DB in the given directory for read only operations. -func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) { +func OpenDBReadOnly(dir, sandboxDirRoot string, l log.Logger) (*DBReadOnly, error) { if _, err := os.Stat(dir); err != nil { return nil, fmt.Errorf("opening the db dir: %w", err) } + if sandboxDirRoot == "" { + sandboxDirRoot = dir + } + sandboxDir, err := os.MkdirTemp(sandboxDirRoot, "tmp_dbro_sandbox") + if err != nil { + return nil, fmt.Errorf("setting up sandbox dir: %w", err) + } + if l == nil { l = log.NewNopLogger() } return &DBReadOnly{ - logger: l, - dir: dir, - closed: make(chan struct{}), + logger: l, + dir: dir, + sandboxDir: sandboxDir, + closed: make(chan struct{}), }, nil } @@ -491,7 +506,14 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue } opts := DefaultHeadOptions() - opts.ChunkDirRoot = db.dir + // Hard link the chunk files to a dir in db.sandboxDir in case the Head needs to truncate some of them + // or cut new ones while replaying the WAL. + // See https://github.com/prometheus/prometheus/issues/11618. + err = chunks.HardLinkChunkFiles(mmappedChunksDir(db.dir), mmappedChunksDir(db.sandboxDir)) + if err != nil { + return nil, err + } + opts.ChunkDirRoot = db.sandboxDir head, err := NewHead(nil, db.logger, nil, nil, opts, NewHeadStats()) if err != nil { return nil, err @@ -519,7 +541,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue } } opts := DefaultHeadOptions() - opts.ChunkDirRoot = db.dir + opts.ChunkDirRoot = db.sandboxDir head, err = NewHead(nil, db.logger, w, wbl, opts, NewHeadStats()) if err != nil { return nil, err @@ -690,8 +712,14 @@ func (db *DBReadOnly) Block(blockID string) (BlockReader, error) { return block, nil } -// Close all block readers. +// Close all block readers and delete the sandbox dir. func (db *DBReadOnly) Close() error { + defer func() { + // Delete the temporary sandbox directory that was created when opening the DB. + if err := os.RemoveAll(db.sandboxDir); err != nil { + level.Error(db.logger).Log("msg", "delete sandbox dir", "err", err) + } + }() select { case <-db.closed: return ErrClosed @@ -828,13 +856,17 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } ctx, cancel := context.WithCancel(context.Background()) - db.compactor, err = NewLeveledCompactorWithOptions(ctx, r, l, rngs, db.chunkPool, LeveledCompactorOptions{ - MaxBlockChunkSegmentSize: opts.MaxBlockChunkSegmentSize, - EnableOverlappingCompaction: opts.EnableOverlappingCompaction, - }) + if opts.NewCompactorFunc != nil { + db.compactor, err = opts.NewCompactorFunc(ctx, r, l, rngs, db.chunkPool, opts) + } else { + db.compactor, err = NewLeveledCompactorWithOptions(ctx, r, l, rngs, db.chunkPool, LeveledCompactorOptions{ + MaxBlockChunkSegmentSize: opts.MaxBlockChunkSegmentSize, + EnableOverlappingCompaction: opts.EnableOverlappingCompaction, + }) + } if err != nil { cancel() - return nil, fmt.Errorf("create leveled compactor: %w", err) + return nil, fmt.Errorf("create compactor: %w", err) } db.compactCancel = cancel @@ -1431,7 +1463,7 @@ func (db *DB) reloadBlocks() (err error) { db.metrics.reloads.Inc() }() - // Now that we reload TSDB every minute, there is high chance for race condition with a reload + // Now that we reload TSDB every minute, there is a high chance for a race condition with a reload // triggered by CleanTombstones(). We need to lock the reload to avoid the situation where // a normal reload and CleanTombstones try to delete the same block. db.mtx.Lock() diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 71b2f05ac73..69c9f60e323 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -25,6 +25,7 @@ import ( "os" "path" "path/filepath" + "runtime" "sort" "strconv" "sync" @@ -502,7 +503,7 @@ func TestAmendHistogramDatapointCausesError(t *testing.T) { _, err = app.Append(0, labels.FromStrings("a", "b"), 0, 0) require.NoError(t, err) _, err = app.Append(0, labels.FromStrings("a", "b"), 0, 1) - require.Equal(t, storage.ErrDuplicateSampleForTimestamp, err) + require.ErrorIs(t, err, storage.ErrDuplicateSampleForTimestamp) require.NoError(t, app.Rollback()) h := histogram.Histogram{ @@ -578,7 +579,7 @@ func TestNonDuplicateNaNDatapointsCausesAmendError(t *testing.T) { app = db.Appender(ctx) _, err = app.Append(0, labels.FromStrings("a", "b"), 0, math.Float64frombits(0x7ff0000000000002)) - require.Equal(t, storage.ErrDuplicateSampleForTimestamp, err) + require.ErrorIs(t, err, storage.ErrDuplicateSampleForTimestamp) } func TestEmptyLabelsetCausesError(t *testing.T) { @@ -1065,7 +1066,7 @@ func TestWALSegmentSizeOptions(t *testing.T) { for i := int64(0); i < 155; i++ { app := db.Appender(context.Background()) - ref, err := app.Append(0, labels.FromStrings("wal"+fmt.Sprintf("%d", i), "size"), i, rand.Float64()) + ref, err := app.Append(0, labels.FromStrings("wal"+strconv.Itoa(int(i)), "size"), i, rand.Float64()) require.NoError(t, err) for j := int64(1); j <= 78; j++ { _, err := app.Append(ref, labels.EmptyLabels(), i+j, rand.Float64()) @@ -2494,7 +2495,7 @@ func TestDBReadOnly(t *testing.T) { } // Open a read only db and ensure that the API returns the same result as the normal DB. - dbReadOnly, err := OpenDBReadOnly(dbDir, logger) + dbReadOnly, err := OpenDBReadOnly(dbDir, "", logger) require.NoError(t, err) defer func() { require.NoError(t, dbReadOnly.Close()) }() @@ -2548,10 +2549,14 @@ func TestDBReadOnly(t *testing.T) { // TestDBReadOnlyClosing ensures that after closing the db // all api methods return an ErrClosed. func TestDBReadOnlyClosing(t *testing.T) { - dbDir := t.TempDir() - db, err := OpenDBReadOnly(dbDir, log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))) + sandboxDir := t.TempDir() + db, err := OpenDBReadOnly(t.TempDir(), sandboxDir, log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))) require.NoError(t, err) + // The sandboxDir was there. + require.DirExists(t, db.sandboxDir) require.NoError(t, db.Close()) + // The sandboxDir was deleted when closing. + require.NoDirExists(t, db.sandboxDir) require.Equal(t, db.Close(), ErrClosed) _, err = db.Blocks() require.Equal(t, err, ErrClosed) @@ -2587,7 +2592,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) { } // Flush WAL. - db, err := OpenDBReadOnly(dbDir, logger) + db, err := OpenDBReadOnly(dbDir, "", logger) require.NoError(t, err) flush := t.TempDir() @@ -2595,7 +2600,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) { require.NoError(t, db.Close()) // Reopen the DB from the flushed WAL block. - db, err = OpenDBReadOnly(flush, logger) + db, err = OpenDBReadOnly(flush, "", logger) require.NoError(t, err) defer func() { require.NoError(t, db.Close()) }() blocks, err := db.Blocks() @@ -2624,6 +2629,80 @@ func TestDBReadOnly_FlushWAL(t *testing.T) { require.Equal(t, 1000.0, sum) } +func TestDBReadOnly_Querier_NoAlteration(t *testing.T) { + countChunks := func(dir string) int { + files, err := os.ReadDir(mmappedChunksDir(dir)) + require.NoError(t, err) + return len(files) + } + + dirHash := func(dir string) (hash []byte) { + // Windows requires the DB to be closed: "xxx\lock: The process cannot access the file because it is being used by another process." + // But closing the DB alters the directory in this case (it'll cut a new chunk). + if runtime.GOOS != "windows" { + hash = testutil.DirHash(t, dir) + } + return + } + + spinUpQuerierAndCheck := func(dir, sandboxDir string, chunksCount int) { + dBDirHash := dirHash(dir) + // Bootsrap a RO db from the same dir and set up a querier. + dbReadOnly, err := OpenDBReadOnly(dir, sandboxDir, nil) + require.NoError(t, err) + require.Equal(t, chunksCount, countChunks(dir)) + q, err := dbReadOnly.Querier(math.MinInt, math.MaxInt) + require.NoError(t, err) + require.NoError(t, q.Close()) + require.NoError(t, dbReadOnly.Close()) + // The RO Head doesn't alter RW db chunks_head/. + require.Equal(t, chunksCount, countChunks(dir)) + require.Equal(t, dirHash(dir), dBDirHash) + } + + t.Run("doesn't cut chunks while replaying WAL", func(t *testing.T) { + db := openTestDB(t, nil, nil) + defer func() { + require.NoError(t, db.Close()) + }() + + // Append until the first mmaped head chunk. + for i := 0; i < 121; i++ { + app := db.Appender(context.Background()) + _, err := app.Append(0, labels.FromStrings("foo", "bar"), int64(i), 0) + require.NoError(t, err) + require.NoError(t, app.Commit()) + } + + spinUpQuerierAndCheck(db.dir, t.TempDir(), 0) + + // The RW Head should have no problem cutting its own chunk, + // this also proves that a chunk needed to be cut. + require.NotPanics(t, func() { db.ForceHeadMMap() }) + require.Equal(t, 1, countChunks(db.dir)) + }) + + t.Run("doesn't truncate corrupted chunks", func(t *testing.T) { + db := openTestDB(t, nil, nil) + require.NoError(t, db.Close()) + + // Simulate a corrupted chunk: without a header. + _, err := os.Create(path.Join(mmappedChunksDir(db.dir), "000001")) + require.NoError(t, err) + + spinUpQuerierAndCheck(db.dir, t.TempDir(), 1) + + // The RW Head should have no problem truncating its corrupted file: + // this proves that the chunk needed to be truncated. + db, err = Open(db.dir, nil, nil, nil, nil) + defer func() { + require.NoError(t, db.Close()) + }() + require.NoError(t, err) + require.Equal(t, 0, countChunks(db.dir)) + }) +} + func TestDBCannotSeePartialCommits(t *testing.T) { if defaultIsolationDisabled { t.Skip("skipping test since tsdb isolation is disabled") @@ -4495,7 +4574,7 @@ func TestOOOCompaction(t *testing.T) { ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) require.NoError(t, err) require.False(t, created) - require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0) + require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples()) require.Len(t, ms.ooo.oooMmappedChunks, 14) // 7 original, 7 duplicate. } checkNonEmptyOOOChunk(series1) @@ -4636,7 +4715,7 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) { ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) require.NoError(t, err) require.False(t, created) - require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0) + require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples()) } // If the normal Head is not compacted, the OOO head compaction does not take place. @@ -4737,7 +4816,7 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) { ms, created, err := db.head.getOrCreate(lbls.Hash(), lbls) require.NoError(t, err) require.False(t, created) - require.Greater(t, ms.ooo.oooHeadChunk.chunk.NumSamples(), 0) + require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples()) } // If the normal Head is not compacted, the OOO head compaction does not take place. @@ -5438,8 +5517,8 @@ func TestWBLAndMmapReplay(t *testing.T) { addedRecs++ require.NoError(t, newWbl.Log(rec)) } - require.Greater(t, markers, 0) - require.Greater(t, addedRecs, 0) + require.Positive(t, markers) + require.Positive(t, addedRecs) require.NoError(t, newWbl.Close()) require.NoError(t, sr.Close()) require.NoError(t, os.RemoveAll(wblDir)) @@ -7046,3 +7125,35 @@ func TestAbortBlockCompactions(t *testing.T) { require.True(t, db.head.compactable(), "head should be compactable") require.Equal(t, 4, compactions, "expected 4 compactions to be completed") } + +func TestNewCompactorFunc(t *testing.T) { + opts := DefaultOptions() + block1 := ulid.MustNew(1, nil) + block2 := ulid.MustNew(2, nil) + opts.NewCompactorFunc = func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) { + return &mockCompactorFn{ + planFn: func() ([]string, error) { + return []string{block1.String(), block2.String()}, nil + }, + compactFn: func() (ulid.ULID, error) { + return block1, nil + }, + writeFn: func() (ulid.ULID, error) { + return block2, nil + }, + }, nil + } + db := openTestDB(t, opts, nil) + defer func() { + require.NoError(t, db.Close()) + }() + plans, err := db.compactor.Plan("") + require.NoError(t, err) + require.Equal(t, []string{block1.String(), block2.String()}, plans) + ulid, err := db.compactor.Compact("", nil, nil) + require.NoError(t, err) + require.Equal(t, block1, ulid) + ulid, err = db.compactor.Write("", nil, 0, 1, nil) + require.NoError(t, err) + require.Equal(t, block2, ulid) +} diff --git a/tsdb/docs/format/head_chunks.md b/tsdb/docs/format/head_chunks.md index 813d4008ca3..5737f420589 100644 --- a/tsdb/docs/format/head_chunks.md +++ b/tsdb/docs/format/head_chunks.md @@ -27,10 +27,10 @@ in-file offset (lower 4 bytes) and segment sequence number (upper 4 bytes). # Chunk -Unlike chunks in the on-disk blocks, here we additionally store series -reference that the chunks belongs to and the mint/maxt of the chunks. This is -because we don't have an index associated with these chunks, hence these meta -information are used while replaying the chunks. +Unlike chunks in the on-disk blocks, here we additionally store the series +reference that each chunk belongs to and the mint/maxt of the chunks. This is +because we don't have an index associated with these chunks, hence this metadata +is used while replaying the chunks. ``` ┌─────────────────────┬───────────────────────┬───────────────────────┬───────────────────┬───────────────┬──────────────┬────────────────┐ diff --git a/tsdb/docs/format/index.md b/tsdb/docs/format/index.md index 53b77d9abeb..e0ef21bd5bb 100644 --- a/tsdb/docs/format/index.md +++ b/tsdb/docs/format/index.md @@ -40,7 +40,7 @@ Most of the sections described below start with a `len` field. It always specifi ### Symbol Table -The symbol table holds a sorted list of deduplicated strings that occurred in label pairs of the stored series. They can be referenced from subsequent sections and significantly reduce the total index size. +The symbol table holds a sorted list of deduplicated strings that occur in label pairs of the stored series. They can be referenced from subsequent sections and significantly reduce the total index size. The section contains a sequence of the string entries, each prefixed with the string's length in raw bytes. All strings are utf-8 encoded. Strings are referenced by sequential indexing. The strings are sorted in lexicographically ascending order. diff --git a/tsdb/docs/usage.md b/tsdb/docs/usage.md index e70b248137c..7bc1ae6c5ff 100644 --- a/tsdb/docs/usage.md +++ b/tsdb/docs/usage.md @@ -1,6 +1,6 @@ # Usage -TSDB can be - and is - used by other applications such as [Cortex](https://cortexmetrics.io/) and [Thanos](https://thanos.io/). +TSDB can be - and is - used by other applications such as [Cortex](https://cortexmetrics.io/), [Thanos](https://thanos.io/), and [Grafana Mimir](https://grafana.com/oss/mimir/). This directory contains documentation for any developers who wish to work on or with TSDB. For a full example of instantiating a database, adding and querying data, see the [tsdb example in the docs](https://pkg.go.dev/github.com/prometheus/prometheus/tsdb). @@ -18,7 +18,7 @@ A `DB` has the following main components: * [`Head`](https://pkg.go.dev/github.com/prometheus/prometheus/tsdb#DB.Head) * [Blocks (persistent blocks)](https://pkg.go.dev/github.com/prometheus/prometheus/tsdb#DB.Blocks) -The `Head` is responsible for a lot. Here are its main components: +The `Head` is responsible for a lot. Here are its main components: * [WAL](https://pkg.go.dev/github.com/prometheus/prometheus/tsdb/wal#WAL) (Write Ahead Log). * [`stripeSeries`](https://github.com/prometheus/prometheus/blob/411021ada9ab41095923b8d2df9365b632fd40c3/tsdb/head.go#L1292): diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 3dd784c6234..7545ab9a602 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -37,7 +37,7 @@ const ( type CircularExemplarStorage struct { lock sync.RWMutex - exemplars []*circularBufferEntry + exemplars []circularBufferEntry nextIndex int metrics *ExemplarMetrics @@ -111,7 +111,7 @@ func NewExemplarMetrics(reg prometheus.Registerer) *ExemplarMetrics { return &m } -// NewCircularExemplarStorage creates an circular in memory exemplar storage. +// NewCircularExemplarStorage creates a circular in memory exemplar storage. // If we assume the average case 95 bytes per exemplar we can fit 5651272 exemplars in // 1GB of extra memory, accounting for the fact that this is heap allocated space. // If len <= 0, then the exemplar storage is essentially a noop storage but can later be @@ -121,7 +121,7 @@ func NewCircularExemplarStorage(length int64, m *ExemplarMetrics) (ExemplarStora length = 0 } c := &CircularExemplarStorage{ - exemplars: make([]*circularBufferEntry, length), + exemplars: make([]circularBufferEntry, length), index: make(map[string]*indexEntry, length/estimatedExemplarsPerSeries), metrics: m, } @@ -214,12 +214,12 @@ func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar. // Optimize by moving the lock to be per series (& benchmark it). ce.lock.RLock() defer ce.lock.RUnlock() - return ce.validateExemplar(seriesLabels, e, false) + return ce.validateExemplar(ce.index[string(seriesLabels)], e, false) } // Not thread safe. The appended parameters tells us whether this is an external validation, or internal // as a result of an AddExemplar call, in which case we should update any relevant metrics. -func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, appended bool) error { +func (ce *CircularExemplarStorage) validateExemplar(idx *indexEntry, e exemplar.Exemplar, appended bool) error { if len(ce.exemplars) == 0 { return storage.ErrExemplarsDisabled } @@ -239,8 +239,7 @@ func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemp return err } - idx, ok := ce.index[string(key)] - if !ok { + if idx == nil { return nil } @@ -292,7 +291,7 @@ func (ce *CircularExemplarStorage) Resize(l int64) int { oldBuffer := ce.exemplars oldNextIndex := int64(ce.nextIndex) - ce.exemplars = make([]*circularBufferEntry, l) + ce.exemplars = make([]circularBufferEntry, l) ce.index = make(map[string]*indexEntry, l/estimatedExemplarsPerSeries) ce.nextIndex = 0 @@ -311,10 +310,11 @@ func (ce *CircularExemplarStorage) Resize(l int64) int { // This way we don't migrate exemplars that would just be overwritten when migrating later exemplars. startIndex := (oldNextIndex - count + int64(len(oldBuffer))) % int64(len(oldBuffer)) + var buf [1024]byte for i := int64(0); i < count; i++ { idx := (startIndex + i) % int64(len(oldBuffer)) - if entry := oldBuffer[idx]; entry != nil { - ce.migrate(entry) + if oldBuffer[idx].ref != nil { + ce.migrate(&oldBuffer[idx], buf[:]) migrated++ } } @@ -328,9 +328,8 @@ func (ce *CircularExemplarStorage) Resize(l int64) int { // migrate is like AddExemplar but reuses existing structs. Expected to be called in batch and requires // external lock and does not compute metrics. -func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry) { - var buf [1024]byte - seriesLabels := entry.ref.seriesLabels.Bytes(buf[:]) +func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry, buf []byte) { + seriesLabels := entry.ref.seriesLabels.Bytes(buf[:0]) idx, ok := ce.index[string(seriesLabels)] if !ok { @@ -344,7 +343,7 @@ func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry) { idx.newest = ce.nextIndex entry.next = noExemplar - ce.exemplars[ce.nextIndex] = entry + ce.exemplars[ce.nextIndex] = *entry ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars) } @@ -362,7 +361,8 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp ce.lock.Lock() defer ce.lock.Unlock() - err := ce.validateExemplar(seriesLabels, e, true) + idx, ok := ce.index[string(seriesLabels)] + err := ce.validateExemplar(idx, e, true) if err != nil { if errors.Is(err, storage.ErrDuplicateExemplar) { // Duplicate exemplar, noop. @@ -371,25 +371,23 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp return err } - _, ok := ce.index[string(seriesLabels)] if !ok { - ce.index[string(seriesLabels)] = &indexEntry{oldest: ce.nextIndex, seriesLabels: l} + idx = &indexEntry{oldest: ce.nextIndex, seriesLabels: l} + ce.index[string(seriesLabels)] = idx } else { - ce.exemplars[ce.index[string(seriesLabels)].newest].next = ce.nextIndex + ce.exemplars[idx.newest].next = ce.nextIndex } - if prev := ce.exemplars[ce.nextIndex]; prev == nil { - ce.exemplars[ce.nextIndex] = &circularBufferEntry{} - } else { + if prev := &ce.exemplars[ce.nextIndex]; prev.ref != nil { // There exists an exemplar already on this ce.nextIndex entry, // drop it, to make place for others. - var buf [1024]byte - prevLabels := prev.ref.seriesLabels.Bytes(buf[:]) if prev.next == noExemplar { // Last item for this series, remove index entry. + var buf [1024]byte + prevLabels := prev.ref.seriesLabels.Bytes(buf[:]) delete(ce.index, string(prevLabels)) } else { - ce.index[string(prevLabels)].oldest = prev.next + prev.ref.oldest = prev.next } } @@ -397,8 +395,8 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp // since this is the first exemplar stored for this series. ce.exemplars[ce.nextIndex].next = noExemplar ce.exemplars[ce.nextIndex].exemplar = e - ce.exemplars[ce.nextIndex].ref = ce.index[string(seriesLabels)] - ce.index[string(seriesLabels)].newest = ce.nextIndex + ce.exemplars[ce.nextIndex].ref = idx + idx.newest = ce.nextIndex ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars) @@ -416,15 +414,15 @@ func (ce *CircularExemplarStorage) computeMetrics() { return } - if next := ce.exemplars[ce.nextIndex]; next != nil { + if ce.exemplars[ce.nextIndex].ref != nil { ce.metrics.exemplarsInStorage.Set(float64(len(ce.exemplars))) - ce.metrics.lastExemplarsTs.Set(float64(next.exemplar.Ts) / 1000) + ce.metrics.lastExemplarsTs.Set(float64(ce.exemplars[ce.nextIndex].exemplar.Ts) / 1000) return } // We did not yet fill the buffer. ce.metrics.exemplarsInStorage.Set(float64(ce.nextIndex)) - if ce.exemplars[0] != nil { + if ce.exemplars[0].ref != nil { ce.metrics.lastExemplarsTs.Set(float64(ce.exemplars[0].exemplar.Ts) / 1000) } } @@ -438,7 +436,7 @@ func (ce *CircularExemplarStorage) IterateExemplars(f func(seriesLabels labels.L idx := ce.nextIndex l := len(ce.exemplars) for i := 0; i < l; i, idx = i+1, (idx+1)%l { - if ce.exemplars[idx] == nil { + if ce.exemplars[idx].ref == nil { continue } err := f(ce.exemplars[idx].ref.seriesLabels, ce.exemplars[idx].exemplar) diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go index 21030e4577e..7723ec38942 100644 --- a/tsdb/exemplar_test.go +++ b/tsdb/exemplar_test.go @@ -415,27 +415,29 @@ func BenchmarkAddExemplar(b *testing.B) { // before adding. exLabels := labels.FromStrings("trace_id", "89620921") - for _, n := range []int{10000, 100000, 1000000} { - b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { - for j := 0; j < b.N; j++ { - b.StopTimer() - exs, err := NewCircularExemplarStorage(int64(n), eMetrics) - require.NoError(b, err) - es := exs.(*CircularExemplarStorage) - var l labels.Labels - b.StartTimer() - - for i := 0; i < n; i++ { - if i%100 == 0 { - l = labels.FromStrings("service", strconv.Itoa(i)) - } - err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: int64(i), Labels: exLabels}) - if err != nil { - require.NoError(b, err) + for _, capacity := range []int{1000, 10000, 100000} { + for _, n := range []int{10000, 100000, 1000000} { + b.Run(fmt.Sprintf("%d/%d", n, capacity), func(b *testing.B) { + for j := 0; j < b.N; j++ { + b.StopTimer() + exs, err := NewCircularExemplarStorage(int64(capacity), eMetrics) + require.NoError(b, err) + es := exs.(*CircularExemplarStorage) + var l labels.Labels + b.StartTimer() + + for i := 0; i < n; i++ { + if i%100 == 0 { + l = labels.FromStrings("service", strconv.Itoa(i)) + } + err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: int64(i), Labels: exLabels}) + if err != nil { + require.NoError(b, err) + } } } - } - }) + }) + } } } @@ -480,8 +482,11 @@ func BenchmarkResizeExemplars(b *testing.B) { require.NoError(b, err) es := exs.(*CircularExemplarStorage) + var l labels.Labels for i := 0; i < int(float64(tc.startSize)*float64(1.5)); i++ { - l := labels.FromStrings("service", strconv.Itoa(i)) + if i%100 == 0 { + l = labels.FromStrings("service", strconv.Itoa(i)) + } err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: int64(i)}) if err != nil { diff --git a/tsdb/head.go b/tsdb/head.go index 8b3d9787ca7..d5f7144fdbc 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -310,12 +310,22 @@ func (h *Head) resetInMemoryState() error { return err } + if h.series != nil { + // reset the existing series to make sure we call the appropriated hooks + // and increment the series removed metrics + fs := h.series.iterForDeletion(func(_ int, _ uint64, s *memSeries, flushedForCallback map[chunks.HeadSeriesRef]labels.Labels) { + // All series should be flushed + flushedForCallback[s.ref] = s.lset + }) + h.metrics.seriesRemoved.Add(float64(fs)) + } + + h.series = newStripeSeries(h.opts.StripeSize, h.opts.SeriesCallback) h.iso = newIsolation(h.opts.IsolationDisabled) h.oooIso = newOOOIsolation() - + h.numSeries.Store(0) h.exemplarMetrics = em h.exemplars = es - h.series = newStripeSeries(h.opts.StripeSize, h.opts.SeriesCallback) h.postings = index.NewUnorderedMemPostings() h.tombstones = tombstones.NewMemTombstones() h.deleted = map[chunks.HeadSeriesRef]int{} @@ -1861,11 +1871,10 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st // minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series. func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _, _ int64, minMmapFile int) { var ( - deleted = map[storage.SeriesRef]struct{}{} - rmChunks = 0 - actualMint int64 = math.MaxInt64 - minOOOTime int64 = math.MaxInt64 - deletedFromPrevStripe = 0 + deleted = map[storage.SeriesRef]struct{}{} + rmChunks = 0 + actualMint int64 = math.MaxInt64 + minOOOTime int64 = math.MaxInt64 ) minMmapFile = math.MaxInt32 @@ -1923,33 +1932,42 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( deletedForCallback[series.ref] = series.lset } - // Run through all series shard by shard, checking which should be deleted. + s.iterForDeletion(check) + + if actualMint == math.MaxInt64 { + actualMint = mint + } + + return deleted, rmChunks, actualMint, minOOOTime, minMmapFile +} + +// The iterForDeletion function iterates through all series, invoking the checkDeletedFunc for each. +// The checkDeletedFunc takes a map as input and should add to it all series that were deleted and should be included +// when invoking the PostDeletion hook. +func (s *stripeSeries) iterForDeletion(checkDeletedFunc func(int, uint64, *memSeries, map[chunks.HeadSeriesRef]labels.Labels)) int { + seriesSetFromPrevStripe := 0 + totalDeletedSeries := 0 + // Run through all series shard by shard for i := 0; i < s.size; i++ { - deletedForCallback := make(map[chunks.HeadSeriesRef]labels.Labels, deletedFromPrevStripe) + seriesSet := make(map[chunks.HeadSeriesRef]labels.Labels, seriesSetFromPrevStripe) s.locks[i].Lock() - - // Delete conflicts first so seriesHashmap.del doesn't move them to the `unique` field, + // Iterate conflicts first so f doesn't move them to the `unique` field, // after deleting `unique`. for hash, all := range s.hashes[i].conflicts { for _, series := range all { - check(i, hash, series, deletedForCallback) + checkDeletedFunc(i, hash, series, seriesSet) } } + for hash, series := range s.hashes[i].unique { - check(i, hash, series, deletedForCallback) + checkDeletedFunc(i, hash, series, seriesSet) } - s.locks[i].Unlock() - - s.seriesLifecycleCallback.PostDeletion(deletedForCallback) - deletedFromPrevStripe = len(deletedForCallback) + s.seriesLifecycleCallback.PostDeletion(seriesSet) + totalDeletedSeries += len(seriesSet) + seriesSetFromPrevStripe = len(seriesSet) } - - if actualMint == math.MaxInt64 { - actualMint = mint - } - - return deleted, rmChunks, actualMint, minOOOTime, minMmapFile + return totalDeletedSeries } func (s *stripeSeries) getByID(id chunks.HeadSeriesRef) *memSeries { diff --git a/tsdb/head_append.go b/tsdb/head_append.go index efd573b4104..62c3727e28a 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -467,7 +467,7 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi // This only checks against the latest in-order sample. // The OOO headchunk has its own method to detect these duplicates. if math.Float64bits(s.lastValue) != math.Float64bits(v) { - return false, 0, storage.ErrDuplicateSampleForTimestamp + return false, 0, storage.NewDuplicateFloatErr(t, s.lastValue, v) } // Sample is identical (ts + value) with most current (highest ts) sample in sampleBuf. return false, 0, nil @@ -1467,8 +1467,8 @@ func (s *memSeries) mmapChunks(chunkDiskMapper *chunks.ChunkDiskMapper) (count i return } - // Write chunks starting from the oldest one and stop before we get to current s.headChunk. - // If we have this chain: s.headChunk{t4} -> t3 -> t2 -> t1 -> t0 + // Write chunks starting from the oldest one and stop before we get to current s.headChunks. + // If we have this chain: s.headChunks{t4} -> t3 -> t2 -> t1 -> t0 // then we need to write chunks t0 to t3, but skip s.headChunks. for i := s.headChunks.len() - 1; i > 0; i-- { chk := s.headChunks.atOffset(i) diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 45bbc81f182..df15abcd500 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -121,6 +121,10 @@ func (h *headIndexReader) Postings(ctx context.Context, name string, values ...s } } +func (h *headIndexReader) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) index.Postings { + return h.head.postings.PostingsForLabelMatching(ctx, name, match) +} + func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { series := make([]*memSeries, 0, 128) diff --git a/tsdb/head_read_test.go b/tsdb/head_read_test.go index de97d70a565..8d835e943a1 100644 --- a/tsdb/head_read_test.go +++ b/tsdb/head_read_test.go @@ -14,6 +14,7 @@ package tsdb import ( + "context" "fmt" "sync" "testing" @@ -552,3 +553,25 @@ func TestMemSeries_chunk(t *testing.T) { }) } } + +func TestHeadIndexReader_PostingsForLabelMatching(t *testing.T) { + testPostingsForLabelMatching(t, 0, func(t *testing.T, series []labels.Labels) IndexReader { + opts := DefaultHeadOptions() + opts.ChunkRange = 1000 + opts.ChunkDirRoot = t.TempDir() + h, err := NewHead(nil, nil, nil, nil, opts, nil) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, h.Close()) + }) + app := h.Appender(context.Background()) + for _, s := range series { + app.Append(0, s, 0, 0) + } + require.NoError(t, app.Commit()) + + ir, err := h.Index() + require.NoError(t, err) + return ir + }) +} diff --git a/tsdb/head_test.go b/tsdb/head_test.go index d9631b3b910..bb437ab5984 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -3383,7 +3383,7 @@ func TestWaitForPendingReadersInTimeRange(t *testing.T) { func TestAppendHistogram(t *testing.T) { l := labels.FromStrings("a", "b") for _, numHistograms := range []int{1, 10, 150, 200, 250, 300} { - t.Run(fmt.Sprintf("%d", numHistograms), func(t *testing.T) { + t.Run(strconv.Itoa(numHistograms), func(t *testing.T) { head, _ := newTestHead(t, 1000, wlog.CompressionNone, false) t.Cleanup(func() { require.NoError(t, head.Close()) @@ -3557,7 +3557,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { expMmapChunks = append(expMmapChunks, &cpy) } expHeadChunkSamples := ms.headChunks.chunk.NumSamples() - require.Greater(t, expHeadChunkSamples, 0) + require.Positive(t, expHeadChunkSamples) // Series with mix of histograms and float. s2 := labels.FromStrings("a", "b2") @@ -3692,7 +3692,7 @@ func TestChunkSnapshot(t *testing.T) { e := ex{ seriesLabels: lbls, e: exemplar.Exemplar{ - Labels: labels.FromStrings("trace_id", fmt.Sprintf("%d", rand.Int())), + Labels: labels.FromStrings("trace_id", strconv.Itoa(rand.Int())), Value: rand.Float64(), Ts: ts, }, @@ -4007,6 +4007,9 @@ func TestSnapshotError(t *testing.T) { require.NoError(t, err) f, err := os.OpenFile(path.Join(snapDir, files[0].Name()), os.O_RDWR, 0) require.NoError(t, err) + // Create snapshot backup to be restored on future test cases. + snapshotBackup, err := io.ReadAll(f) + require.NoError(t, err) _, err = f.WriteAt([]byte{0b11111111}, 18) require.NoError(t, err) require.NoError(t, f.Close()) @@ -4021,10 +4024,44 @@ func TestSnapshotError(t *testing.T) { // There should be no series in the memory after snapshot error since WAL was removed. require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal)) + require.Equal(t, uint64(0), head.NumSeries()) require.Nil(t, head.series.getByHash(lbls.Hash(), lbls)) tm, err = head.tombstones.Get(1) require.NoError(t, err) require.Empty(t, tm) + require.NoError(t, head.Close()) + + // Test corruption in the middle of the snapshot. + f, err = os.OpenFile(path.Join(snapDir, files[0].Name()), os.O_RDWR, 0) + require.NoError(t, err) + _, err = f.WriteAt(snapshotBackup, 0) + require.NoError(t, err) + _, err = f.WriteAt([]byte{0b11111111}, 300) + require.NoError(t, err) + require.NoError(t, f.Close()) + + c := &countSeriesLifecycleCallback{} + opts := head.opts + opts.SeriesCallback = c + + w, err = wlog.NewSize(nil, nil, head.wal.Dir(), 32768, wlog.CompressionNone) + require.NoError(t, err) + head, err = NewHead(prometheus.NewRegistry(), nil, w, nil, head.opts, nil) + require.NoError(t, err) + require.NoError(t, head.Init(math.MinInt64)) + + // There should be no series in the memory after snapshot error since WAL was removed. + require.Equal(t, 1.0, prom_testutil.ToFloat64(head.metrics.snapshotReplayErrorTotal)) + require.Nil(t, head.series.getByHash(lbls.Hash(), lbls)) + require.Equal(t, uint64(0), head.NumSeries()) + + // Since the snapshot could replay certain series, we continue invoking the create hooks. + // In such instances, we need to ensure that we also trigger the delete hooks when resetting the memory. + require.Equal(t, int64(2), c.created.Load()) + require.Equal(t, int64(2), c.deleted.Load()) + + require.Equal(t, 2.0, prom_testutil.ToFloat64(head.metrics.seriesRemoved)) + require.Equal(t, 2.0, prom_testutil.ToFloat64(head.metrics.seriesCreated)) } func TestHistogramMetrics(t *testing.T) { @@ -4601,7 +4638,7 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { require.NoError(t, err) require.NotEqual(t, "", name) require.Equal(t, 0, idx) - require.Greater(t, offset, 0) + require.Positive(t, offset) } // TestWBLReplay checks the replay at a low level. @@ -5032,7 +5069,7 @@ func TestOOOAppendWithNoSeries(t *testing.T) { require.Equal(t, expSamples, ms.headChunks.chunk.NumSamples()) } - newLabels := func(idx int) labels.Labels { return labels.FromStrings("foo", fmt.Sprintf("%d", idx)) } + newLabels := func(idx int) labels.Labels { return labels.FromStrings("foo", strconv.Itoa(idx)) } s1 := newLabels(1) appendSample(s1, 300) // At 300m. @@ -5829,3 +5866,14 @@ func TestHeadCompactableDoesNotCompactEmptyHead(t *testing.T) { require.False(t, head.compactable()) } + +type countSeriesLifecycleCallback struct { + created atomic.Int64 + deleted atomic.Int64 +} + +func (c *countSeriesLifecycleCallback) PreCreation(labels.Labels) error { return nil } +func (c *countSeriesLifecycleCallback) PostCreation(labels.Labels) { c.created.Inc() } +func (c *countSeriesLifecycleCallback) PostDeletion(s map[chunks.HeadSeriesRef]labels.Labels) { + c.deleted.Add(int64(len(s))) +} diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 076768f4ef7..41f7dd46b21 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -1496,7 +1496,7 @@ Outer: } default: - // This is a record type we don't understand. It is either and old format from earlier versions, + // This is a record type we don't understand. It is either an old format from earlier versions, // or a new format and the code was rolled back to old version. loopErr = fmt.Errorf("unsupported snapshot record type 0b%b", rec[0]) break Outer diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 89c2041a765..480e6a8fc7a 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -51,6 +51,9 @@ const ( indexFilename = "index" seriesByteAlign = 16 + + // checkContextEveryNIterations is used in some tight loops to check if the context is done. + checkContextEveryNIterations = 128 ) type indexWriterSeries struct { @@ -158,7 +161,7 @@ type Writer struct { postingsEncoder PostingsEncoder } -// TOC represents index Table Of Content that states where each section of index starts. +// TOC represents the index Table Of Contents that states where each section of the index starts. type TOC struct { Symbols uint64 Series uint64 @@ -168,7 +171,7 @@ type TOC struct { PostingsTable uint64 } -// NewTOCFromByteSlice return parsed TOC from given index byte slice. +// NewTOCFromByteSlice returns a parsed TOC from the given index byte slice. func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) { if bs.Len() < indexTOCLen { return nil, encoding.ErrInvalidSize @@ -1536,36 +1539,14 @@ func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labe if len(e) == 0 { return nil, nil } - values := make([]string, 0, len(e)*symbolFactor) - d := encoding.NewDecbufAt(r.b, int(r.toc.PostingsTable), nil) - d.Skip(e[0].off) + values := make([]string, 0, len(e)*symbolFactor) lastVal := e[len(e)-1].value - - skip := 0 - for d.Err() == nil && ctx.Err() == nil { - if skip == 0 { - // These are always the same number of bytes, - // and it's faster to skip than parse. - skip = d.Len() - d.Uvarint() // Keycount. - d.UvarintBytes() // Label name. - skip -= d.Len() - } else { - d.Skip(skip) - } - s := yoloString(d.UvarintBytes()) // Label value. - values = append(values, s) - if s == lastVal { - break - } - d.Uvarint64() // Offset. - } - if d.Err() != nil { - return nil, fmt.Errorf("get postings offset entry: %w", d.Err()) - } - - return values, ctx.Err() + err := r.traversePostingOffsets(ctx, e[0].off, func(val string, _ uint64) (bool, error) { + values = append(values, val) + return val != lastVal, nil + }) + return values, err } // LabelNamesFor returns all the label names for the series referred to by IDs. @@ -1662,6 +1643,44 @@ func (r *Reader) Series(id storage.SeriesRef, builder *labels.ScratchBuilder, ch return nil } +// traversePostingOffsets traverses r's posting offsets table, starting at off, and calls cb with every label value and postings offset. +// If cb returns false (or an error), the traversing is interrupted. +func (r *Reader) traversePostingOffsets(ctx context.Context, off int, cb func(string, uint64) (bool, error)) error { + // Don't Crc32 the entire postings offset table, this is very slow + // so hope any issues were caught at startup. + d := encoding.NewDecbufAt(r.b, int(r.toc.PostingsTable), nil) + d.Skip(off) + skip := 0 + ctxErr := ctx.Err() + for d.Err() == nil && ctxErr == nil { + if skip == 0 { + // These are always the same number of bytes, + // and it's faster to skip than to parse. + skip = d.Len() + d.Uvarint() // Keycount. + d.UvarintBytes() // Label name. + skip -= d.Len() + } else { + d.Skip(skip) + } + v := yoloString(d.UvarintBytes()) // Label value. + postingsOff := d.Uvarint64() // Offset. + if ok, err := cb(v, postingsOff); err != nil { + return err + } else if !ok { + break + } + ctxErr = ctx.Err() + } + if d.Err() != nil { + return fmt.Errorf("get postings offset entry: %w", d.Err()) + } + if ctxErr != nil { + return fmt.Errorf("get postings offset entry: %w", ctxErr) + } + return nil +} + func (r *Reader) Postings(ctx context.Context, name string, values ...string) (Postings, error) { if r.version == FormatV1 { e, ok := r.postingsV1[name] @@ -1696,7 +1715,6 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P slices.Sort(values) // Values must be in order so we can step through the table on disk. res := make([]Postings, 0, len(values)) - skip := 0 valueIndex := 0 for valueIndex < len(values) && values[valueIndex] < e[0].value { // Discard values before the start. @@ -1714,33 +1732,15 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P // Need to look from previous entry. i-- } - // Don't Crc32 the entire postings offset table, this is very slow - // so hope any issues were caught at startup. - d := encoding.NewDecbufAt(r.b, int(r.toc.PostingsTable), nil) - d.Skip(e[i].off) - - // Iterate on the offset table. - var postingsOff uint64 // The offset into the postings table. - for d.Err() == nil && ctx.Err() == nil { - if skip == 0 { - // These are always the same number of bytes, - // and it's faster to skip than parse. - skip = d.Len() - d.Uvarint() // Keycount. - d.UvarintBytes() // Label name. - skip -= d.Len() - } else { - d.Skip(skip) - } - v := d.UvarintBytes() // Label value. - postingsOff = d.Uvarint64() // Offset. - for string(v) >= value { - if string(v) == value { + + if err := r.traversePostingOffsets(ctx, e[i].off, func(val string, postingsOff uint64) (bool, error) { + for val >= value { + if val == value { // Read from the postings table. d2 := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable) _, p, err := r.dec.Postings(d2.Get()) if err != nil { - return nil, fmt.Errorf("decode postings: %w", err) + return false, fmt.Errorf("decode postings: %w", err) } res = append(res, p) } @@ -1752,18 +1752,75 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P } if i+1 == len(e) || value >= e[i+1].value || valueIndex == len(values) { // Need to go to a later postings offset entry, if there is one. - break + return false, nil } + return true, nil + }); err != nil { + return nil, err } - if d.Err() != nil { - return nil, fmt.Errorf("get postings offset entry: %w", d.Err()) + } + + return Merge(ctx, res...), nil +} + +func (r *Reader) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) Postings { + if r.version == FormatV1 { + return r.postingsForLabelMatchingV1(ctx, name, match) + } + + e := r.postings[name] + if len(e) == 0 { + return EmptyPostings() + } + + lastVal := e[len(e)-1].value + var its []Postings + if err := r.traversePostingOffsets(ctx, e[0].off, func(val string, postingsOff uint64) (bool, error) { + if match(val) { + // We want this postings iterator since the value is a match + postingsDec := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable) + _, p, err := r.dec.PostingsFromDecbuf(postingsDec) + if err != nil { + return false, fmt.Errorf("decode postings: %w", err) + } + its = append(its, p) } - if ctx.Err() != nil { - return nil, fmt.Errorf("get postings offset entry: %w", ctx.Err()) + return val != lastVal, nil + }); err != nil { + return ErrPostings(err) + } + + return Merge(ctx, its...) +} + +func (r *Reader) postingsForLabelMatchingV1(ctx context.Context, name string, match func(string) bool) Postings { + e := r.postingsV1[name] + if len(e) == 0 { + return EmptyPostings() + } + + var its []Postings + count := 1 + for val, offset := range e { + if count%checkContextEveryNIterations == 0 && ctx.Err() != nil { + return ErrPostings(ctx.Err()) + } + count++ + if !match(val) { + continue + } + + // Read from the postings table. + d := encoding.NewDecbufAt(r.b, int(offset), castagnoliTable) + _, p, err := r.dec.PostingsFromDecbuf(d) + if err != nil { + return ErrPostings(fmt.Errorf("decode postings: %w", err)) } + + its = append(its, p) } - return Merge(ctx, res...), nil + return Merge(ctx, its...) } // SortedPostings returns the given postings list reordered so that the backing series @@ -1856,6 +1913,11 @@ type Decoder struct { // Postings returns a postings list for b and its number of elements. func (dec *Decoder) Postings(b []byte) (int, Postings, error) { d := encoding.Decbuf{B: b} + return dec.PostingsFromDecbuf(d) +} + +// PostingsFromDecbuf returns a postings list for d and its number of elements. +func (dec *Decoder) PostingsFromDecbuf(d encoding.Decbuf) (int, Postings, error) { n := d.Be32int() l := d.Get() if d.Err() != nil { diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index c451c38dd23..038caacf8e1 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -20,7 +20,9 @@ import ( "hash/crc32" "os" "path/filepath" + "slices" "sort" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -160,39 +162,14 @@ func TestIndexRW_Create_Open(t *testing.T) { } func TestIndexRW_Postings(t *testing.T) { - dir := t.TempDir() ctx := context.Background() - - fn := filepath.Join(dir, indexFilename) - - iw, err := NewWriter(context.Background(), fn) - require.NoError(t, err) - - series := []labels.Labels{ - labels.FromStrings("a", "1", "b", "1"), - labels.FromStrings("a", "1", "b", "2"), - labels.FromStrings("a", "1", "b", "3"), - labels.FromStrings("a", "1", "b", "4"), + var input indexWriterSeriesSlice + for i := 1; i < 5; i++ { + input = append(input, &indexWriterSeries{ + labels: labels.FromStrings("a", "1", "b", strconv.Itoa(i)), + }) } - - require.NoError(t, iw.AddSymbol("1")) - require.NoError(t, iw.AddSymbol("2")) - require.NoError(t, iw.AddSymbol("3")) - require.NoError(t, iw.AddSymbol("4")) - require.NoError(t, iw.AddSymbol("a")) - require.NoError(t, iw.AddSymbol("b")) - - // Postings lists are only written if a series with the respective - // reference was added before. - require.NoError(t, iw.AddSeries(1, series[0])) - require.NoError(t, iw.AddSeries(2, series[1])) - require.NoError(t, iw.AddSeries(3, series[2])) - require.NoError(t, iw.AddSeries(4, series[3])) - - require.NoError(t, iw.Close()) - - ir, err := NewFileReader(fn) - require.NoError(t, err) + ir, fn, _ := createFileReader(ctx, t, input) p, err := ir.Postings(ctx, "a", "1") require.NoError(t, err) @@ -205,7 +182,7 @@ func TestIndexRW_Postings(t *testing.T) { require.NoError(t, err) require.Empty(t, c) - testutil.RequireEqual(t, series[i], builder.Labels()) + testutil.RequireEqual(t, input[i].labels, builder.Labels()) } require.NoError(t, p.Err()) @@ -240,8 +217,6 @@ func TestIndexRW_Postings(t *testing.T) { "b": {"1", "2", "3", "4"}, }, labelIndices) - require.NoError(t, ir.Close()) - t.Run("ShardedPostings()", func(t *testing.T) { ir, err := NewFileReader(fn) require.NoError(t, err) @@ -296,42 +271,16 @@ func TestIndexRW_Postings(t *testing.T) { } func TestPostingsMany(t *testing.T) { - dir := t.TempDir() ctx := context.Background() - - fn := filepath.Join(dir, indexFilename) - - iw, err := NewWriter(context.Background(), fn) - require.NoError(t, err) - // Create a label in the index which has 999 values. - symbols := map[string]struct{}{} - series := []labels.Labels{} + var input indexWriterSeriesSlice for i := 1; i < 1000; i++ { v := fmt.Sprintf("%03d", i) - series = append(series, labels.FromStrings("i", v, "foo", "bar")) - symbols[v] = struct{}{} - } - symbols["i"] = struct{}{} - symbols["foo"] = struct{}{} - symbols["bar"] = struct{}{} - syms := []string{} - for s := range symbols { - syms = append(syms, s) - } - sort.Strings(syms) - for _, s := range syms { - require.NoError(t, iw.AddSymbol(s)) + input = append(input, &indexWriterSeries{ + labels: labels.FromStrings("i", v, "foo", "bar"), + }) } - - for i, s := range series { - require.NoError(t, iw.AddSeries(storage.SeriesRef(i), s)) - } - require.NoError(t, iw.Close()) - - ir, err := NewFileReader(fn) - require.NoError(t, err) - defer func() { require.NoError(t, ir.Close()) }() + ir, _, symbols := createFileReader(ctx, t, input) cases := []struct { in []string @@ -387,25 +336,13 @@ func TestPostingsMany(t *testing.T) { } func TestPersistence_index_e2e(t *testing.T) { - dir := t.TempDir() ctx := context.Background() - lbls, err := labels.ReadLabels(filepath.Join("..", "testdata", "20kseries.json"), 20000) require.NoError(t, err) - // Sort labels as the index writer expects series in sorted order. sort.Sort(labels.Slice(lbls)) - symbols := map[string]struct{}{} - for _, lset := range lbls { - lset.Range(func(l labels.Label) { - symbols[l.Name] = struct{}{} - symbols[l.Value] = struct{}{} - }) - } - var input indexWriterSeriesSlice - ref := uint64(0) // Generate ChunkMetas for every label set. for i, lset := range lbls { @@ -426,17 +363,7 @@ func TestPersistence_index_e2e(t *testing.T) { }) } - iw, err := NewWriter(context.Background(), filepath.Join(dir, indexFilename)) - require.NoError(t, err) - - syms := []string{} - for s := range symbols { - syms = append(syms, s) - } - sort.Strings(syms) - for _, s := range syms { - require.NoError(t, iw.AddSymbol(s)) - } + ir, _, _ := createFileReader(ctx, t, input) // Population procedure as done by compaction. var ( @@ -447,8 +374,6 @@ func TestPersistence_index_e2e(t *testing.T) { mi := newMockIndex() for i, s := range input { - err = iw.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...) - require.NoError(t, err) require.NoError(t, mi.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...)) s.labels.Range(func(l labels.Label) { @@ -462,12 +387,6 @@ func TestPersistence_index_e2e(t *testing.T) { postings.Add(storage.SeriesRef(i), s.labels) } - err = iw.Close() - require.NoError(t, err) - - ir, err := NewFileReader(filepath.Join(dir, indexFilename)) - require.NoError(t, err) - for p := range mi.postings { gotp, err := ir.Postings(ctx, p.Name, p.Value) require.NoError(t, err) @@ -523,8 +442,6 @@ func TestPersistence_index_e2e(t *testing.T) { } sort.Strings(expSymbols) require.Equal(t, expSymbols, gotSymbols) - - require.NoError(t, ir.Close()) } func TestWriter_ShouldReturnErrorOnSeriesWithDuplicatedLabelNames(t *testing.T) { @@ -624,39 +541,14 @@ func BenchmarkReader_ShardedPostings(b *testing.B) { numShards = 16 ) - dir, err := os.MkdirTemp("", "benchmark_reader_sharded_postings") - require.NoError(b, err) - defer func() { - require.NoError(b, os.RemoveAll(dir)) - }() - ctx := context.Background() - - // Generate an index. - fn := filepath.Join(dir, indexFilename) - - iw, err := NewWriter(ctx, fn) - require.NoError(b, err) - - for i := 1; i <= numSeries; i++ { - require.NoError(b, iw.AddSymbol(fmt.Sprintf("%10d", i))) - } - require.NoError(b, iw.AddSymbol("const")) - require.NoError(b, iw.AddSymbol("unique")) - + var input indexWriterSeriesSlice for i := 1; i <= numSeries; i++ { - require.NoError(b, iw.AddSeries(storage.SeriesRef(i), - labels.FromStrings("const", fmt.Sprintf("%10d", 1), "unique", fmt.Sprintf("%10d", i)))) + input = append(input, &indexWriterSeries{ + labels: labels.FromStrings("const", fmt.Sprintf("%10d", 1), "unique", fmt.Sprintf("%10d", i)), + }) } - - require.NoError(b, iw.Close()) - - b.ResetTimer() - - // Create a reader to read back all postings from the index. - ir, err := NewFileReader(fn) - require.NoError(b, err) - + ir, _, _ := createFileReader(ctx, b, input) b.ResetTimer() for n := 0; n < b.N; n++ { @@ -719,3 +611,64 @@ func TestChunksTimeOrdering(t *testing.T) { require.NoError(t, idx.Close()) } + +func TestReader_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) { + const seriesCount = 1000 + var input indexWriterSeriesSlice + for i := 1; i <= seriesCount; i++ { + input = append(input, &indexWriterSeries{ + labels: labels.FromStrings("__name__", fmt.Sprintf("%4d", i)), + chunks: []chunks.Meta{ + {Ref: 1, MinTime: 0, MaxTime: 10}, + }, + }) + } + ir, _, _ := createFileReader(context.Background(), t, input) + + failAfter := uint64(seriesCount / 2) // Fail after processing half of the series. + ctx := &testutil.MockContextErrAfter{FailAfter: failAfter} + p := ir.PostingsForLabelMatching(ctx, "__name__", func(string) bool { + return true + }) + require.Error(t, p.Err()) + require.Equal(t, failAfter, ctx.Count()) +} + +// createFileReader creates a temporary index file. It writes the provided input to this file. +// It returns a Reader for this file, the file's name, and the symbol map. +func createFileReader(ctx context.Context, tb testing.TB, input indexWriterSeriesSlice) (*Reader, string, map[string]struct{}) { + tb.Helper() + + fn := filepath.Join(tb.TempDir(), indexFilename) + + iw, err := NewWriter(ctx, fn) + require.NoError(tb, err) + + symbols := map[string]struct{}{} + for _, s := range input { + s.labels.Range(func(l labels.Label) { + symbols[l.Name] = struct{}{} + symbols[l.Value] = struct{}{} + }) + } + + syms := []string{} + for s := range symbols { + syms = append(syms, s) + } + slices.Sort(syms) + for _, s := range syms { + require.NoError(tb, iw.AddSymbol(s)) + } + for i, s := range input { + require.NoError(tb, iw.AddSeries(storage.SeriesRef(i), s.labels, s.chunks...)) + } + require.NoError(tb, iw.Close()) + + ir, err := NewFileReader(fn) + require.NoError(tb, err) + tb.Cleanup(func() { + require.NoError(tb, ir.Close()) + }) + return ir, fn, symbols +} diff --git a/tsdb/index/postings.go b/tsdb/index/postings.go index 61a5560ee48..159f6416e2e 100644 --- a/tsdb/index/postings.go +++ b/tsdb/index/postings.go @@ -397,6 +397,41 @@ func (p *MemPostings) addFor(id storage.SeriesRef, l labels.Label) { } } +func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) Postings { + p.mtx.RLock() + + e := p.m[name] + if len(e) == 0 { + p.mtx.RUnlock() + return EmptyPostings() + } + + // Benchmarking shows that first copying the values into a slice and then matching over that is + // faster than matching over the map keys directly, at least on AMD64. + vals := make([]string, 0, len(e)) + for v, srs := range e { + if len(srs) > 0 { + vals = append(vals, v) + } + } + + var its []Postings + count := 1 + for _, v := range vals { + if count%checkContextEveryNIterations == 0 && ctx.Err() != nil { + p.mtx.RUnlock() + return ErrPostings(ctx.Err()) + } + count++ + if match(v) { + its = append(its, NewListPostings(e[v])) + } + } + p.mtx.RUnlock() + + return Merge(ctx, its...) +} + // ExpandPostings returns the postings expanded as a slice. func ExpandPostings(p Postings) (res []storage.SeriesRef, err error) { for p.Next() { diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go index 9e6bd23f8c8..2cbc14ac64e 100644 --- a/tsdb/index/postings_test.go +++ b/tsdb/index/postings_test.go @@ -22,12 +22,15 @@ import ( "math/rand" "sort" "strconv" + "strings" "testing" + "github.com/grafana/regexp" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/testutil" ) func TestMemPostings_addFor(t *testing.T) { @@ -49,7 +52,7 @@ func TestMemPostings_ensureOrder(t *testing.T) { for j := range l { l[j] = storage.SeriesRef(rand.Uint64()) } - v := fmt.Sprintf("%d", i) + v := strconv.Itoa(i) p.m["a"][v] = l } @@ -390,7 +393,7 @@ func BenchmarkMerge(t *testing.B) { its := make([]Postings, len(refs)) for _, nSeries := range []int{1, 10, 100, 1000, 10000, 100000} { - t.Run(fmt.Sprint(nSeries), func(bench *testing.B) { + t.Run(strconv.Itoa(nSeries), func(bench *testing.B) { ctx := context.Background() for i := 0; i < bench.N; i++ { // Reset the ListPostings to their original values each time round the loop. @@ -1282,3 +1285,71 @@ func BenchmarkListPostings(b *testing.B) { }) } } + +func slowRegexpString() string { + nums := map[int]struct{}{} + for i := 10_000; i < 20_000; i++ { + if i%3 == 0 { + nums[i] = struct{}{} + } + } + + var sb strings.Builder + sb.WriteString(".*(9999") + for i := range nums { + sb.WriteString("|") + sb.WriteString(strconv.Itoa(i)) + } + sb.WriteString(").*") + return sb.String() +} + +func BenchmarkMemPostings_PostingsForLabelMatching(b *testing.B) { + fast := regexp.MustCompile("^(100|200)$") + slowRegexp := "^" + slowRegexpString() + "$" + b.Logf("Slow regexp length = %d", len(slowRegexp)) + slow := regexp.MustCompile(slowRegexp) + + for _, labelValueCount := range []int{1_000, 10_000, 100_000} { + b.Run(fmt.Sprintf("labels=%d", labelValueCount), func(b *testing.B) { + mp := NewMemPostings() + for i := 0; i < labelValueCount; i++ { + mp.Add(storage.SeriesRef(i), labels.FromStrings("label", strconv.Itoa(i))) + } + + fp, err := ExpandPostings(mp.PostingsForLabelMatching(context.Background(), "label", fast.MatchString)) + require.NoError(b, err) + b.Logf("Fast matcher matches %d series", len(fp)) + b.Run("matcher=fast", func(b *testing.B) { + for i := 0; i < b.N; i++ { + mp.PostingsForLabelMatching(context.Background(), "label", fast.MatchString).Next() + } + }) + + sp, err := ExpandPostings(mp.PostingsForLabelMatching(context.Background(), "label", slow.MatchString)) + require.NoError(b, err) + b.Logf("Slow matcher matches %d series", len(sp)) + b.Run("matcher=slow", func(b *testing.B) { + for i := 0; i < b.N; i++ { + mp.PostingsForLabelMatching(context.Background(), "label", slow.MatchString).Next() + } + }) + }) + } +} + +func TestMemPostings_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) { + memP := NewMemPostings() + seriesCount := 10 * checkContextEveryNIterations + for i := 1; i <= seriesCount; i++ { + memP.Add(storage.SeriesRef(i), labels.FromStrings("__name__", fmt.Sprintf("%4d", i))) + } + + failAfter := uint64(seriesCount / 2 / checkContextEveryNIterations) + ctx := &testutil.MockContextErrAfter{FailAfter: failAfter} + p := memP.PostingsForLabelMatching(ctx, "__name__", func(string) bool { + return true + }) + require.Error(t, p.Err()) + require.Equal(t, failAfter+1, ctx.Count()) // Plus one for the Err() call that puts the error in the result. +} diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index ed0b3fd2278..af431d678f7 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -446,6 +446,10 @@ func (ir *OOOCompactionHeadIndexReader) Postings(_ context.Context, name string, return index.NewListPostings(ir.ch.postings), nil } +func (ir *OOOCompactionHeadIndexReader) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings { + return index.ErrPostings(errors.New("not supported")) +} + func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.Postings { // This will already be sorted from the Postings() call above. return p diff --git a/tsdb/querier.go b/tsdb/querier.go index 8ebedfe52d8..1071c4a716d 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -33,6 +33,9 @@ import ( "github.com/prometheus/prometheus/util/annotations" ) +// checkContextEveryNIterations is used in some tight loops to check if the context is done. +const checkContextEveryNIterations = 100 + type blockBaseQuerier struct { blockID ulid.ULID index IndexReader @@ -326,23 +329,8 @@ func postingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Matcher) } } - vals, err := ix.LabelValues(ctx, m.Name) - if err != nil { - return nil, err - } - - var res []string - for _, val := range vals { - if m.Matches(val) { - res = append(res, val) - } - } - - if len(res) == 0 { - return index.EmptyPostings(), nil - } - - return ix.Postings(ctx, m.Name, res...) + it := ix.PostingsForLabelMatching(ctx, m.Name, m.Matches) + return it, it.Err() } // inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher. @@ -368,12 +356,17 @@ func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Ma return nil, err } - var res []string - // If the inverse match is ="", we just want all the values. - if m.Type == labels.MatchEqual && m.Value == "" { + res := vals[:0] + // If the match before inversion was !="" or !~"", we just want all the values. + if m.Value == "" && (m.Type == labels.MatchRegexp || m.Type == labels.MatchEqual) { res = vals } else { + count := 1 for _, val := range vals { + if count%checkContextEveryNIterations == 0 && ctx.Err() != nil { + return nil, ctx.Err() + } + count++ if !m.Matches(val) { res = append(res, val) } @@ -402,7 +395,12 @@ func labelValuesWithMatchers(ctx context.Context, r IndexReader, name string, ma // re-use the allValues slice to avoid allocations // this is safe because the iteration is always ahead of the append filteredValues := allValues[:0] + count := 1 for _, v := range allValues { + if count%checkContextEveryNIterations == 0 && ctx.Err() != nil { + return nil, ctx.Err() + } + count++ if m.Matches(v) { filteredValues = append(filteredValues, v) } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index a293a983daf..c7e60a0e161 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -38,6 +38,7 @@ import ( "github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/annotations" + "github.com/prometheus/prometheus/util/testutil" ) // TODO(bwplotka): Replace those mocks with remote.concreteSeriesSet. @@ -2326,6 +2327,16 @@ func (m mockIndex) SortedPostings(p index.Postings) index.Postings { return index.NewListPostings(ep) } +func (m mockIndex) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) index.Postings { + var res []index.Postings + for l, srs := range m.postings { + if l.Name == name && match(l.Value) { + res = append(res, index.NewListPostings(srs)) + } + } + return index.Merge(ctx, res...) +} + func (m mockIndex) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings { out := make([]storage.SeriesRef, 0, 128) @@ -2797,6 +2808,13 @@ func TestPostingsForMatchers(t *testing.T) { }, }, // Not regex. + { + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "i", "")}, + exp: []labels.Labels{ + labels.FromStrings("n", "1", "i", "a"), + labels.FromStrings("n", "1", "i", "b"), + }, + }, { matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "^1$")}, exp: []labels.Labels{ @@ -3238,6 +3256,10 @@ func (m mockMatcherIndex) LabelNames(context.Context, ...*labels.Matcher) ([]str return []string{}, nil } +func (m mockMatcherIndex) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings { + return index.ErrPostings(fmt.Errorf("PostingsForLabelMatching called")) +} + func TestPostingsForMatcher(t *testing.T) { ctx := context.Background() @@ -3624,3 +3646,77 @@ func TestQueryWithOneChunkCompletelyDeleted(t *testing.T) { require.NoError(t, css.Err()) require.Equal(t, 1, seriesCount) } + +func TestReader_PostingsForLabelMatchingHonorsContextCancel(t *testing.T) { + ir := mockReaderOfLabels{} + + failAfter := uint64(mockReaderOfLabelsSeriesCount / 2 / checkContextEveryNIterations) + ctx := &testutil.MockContextErrAfter{FailAfter: failAfter} + _, err := labelValuesWithMatchers(ctx, ir, "__name__", labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".+")) + + require.Error(t, err) + require.Equal(t, failAfter+1, ctx.Count()) // Plus one for the Err() call that puts the error in the result. +} + +func TestReader_InversePostingsForMatcherHonorsContextCancel(t *testing.T) { + ir := mockReaderOfLabels{} + + failAfter := uint64(mockReaderOfLabelsSeriesCount / 2 / checkContextEveryNIterations) + ctx := &testutil.MockContextErrAfter{FailAfter: failAfter} + _, err := inversePostingsForMatcher(ctx, ir, labels.MustNewMatcher(labels.MatchRegexp, "__name__", ".*")) + + require.Error(t, err) + require.Equal(t, failAfter+1, ctx.Count()) // Plus one for the Err() call that puts the error in the result. +} + +type mockReaderOfLabels struct{} + +const mockReaderOfLabelsSeriesCount = checkContextEveryNIterations * 10 + +func (m mockReaderOfLabels) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) { + return make([]string, mockReaderOfLabelsSeriesCount), nil +} + +func (m mockReaderOfLabels) LabelValueFor(context.Context, storage.SeriesRef, string) (string, error) { + panic("LabelValueFor called") +} + +func (m mockReaderOfLabels) SortedLabelValues(context.Context, string, ...*labels.Matcher) ([]string, error) { + panic("SortedLabelValues called") +} + +func (m mockReaderOfLabels) Close() error { + return nil +} + +func (m mockReaderOfLabels) LabelNames(context.Context, ...*labels.Matcher) ([]string, error) { + panic("LabelNames called") +} + +func (m mockReaderOfLabels) LabelNamesFor(context.Context, ...storage.SeriesRef) ([]string, error) { + panic("LabelNamesFor called") +} + +func (m mockReaderOfLabels) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings { + panic("PostingsForLabelMatching called") +} + +func (m mockReaderOfLabels) Postings(context.Context, string, ...string) (index.Postings, error) { + panic("Postings called") +} + +func (m mockReaderOfLabels) ShardedPostings(index.Postings, uint64, uint64) index.Postings { + panic("Postings called") +} + +func (m mockReaderOfLabels) SortedPostings(index.Postings) index.Postings { + panic("SortedPostings called") +} + +func (m mockReaderOfLabels) Series(storage.SeriesRef, *labels.ScratchBuilder, *[]chunks.Meta) error { + panic("Series called") +} + +func (m mockReaderOfLabels) Symbols() index.StringIter { + panic("Series called") +} diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 8a8409e55f9..c95b25f06ef 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -163,7 +163,7 @@ type RefMetadata struct { Help string } -// RefExemplar is an exemplar with it's labels, timestamp, value the exemplar was collected/observed with, and a reference to a series. +// RefExemplar is an exemplar with the labels, timestamp, value the exemplar was collected/observed with, and a reference to a series. type RefExemplar struct { Ref chunks.HeadSeriesRef T int64 @@ -798,7 +798,7 @@ func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b return buf.Get() } -// Encode encodes the Float Histogram into a byte slice. +// EncodeFloatHistogram encodes the Float Histogram into a byte slice. func EncodeFloatHistogram(buf *encoding.Encbuf, h *histogram.FloatHistogram) { buf.PutByte(byte(h.CounterResetHint)) diff --git a/tsdb/wlog/checkpoint_test.go b/tsdb/wlog/checkpoint_test.go index 279f7c4356b..a9786454de7 100644 --- a/tsdb/wlog/checkpoint_test.go +++ b/tsdb/wlog/checkpoint_test.go @@ -19,6 +19,7 @@ import ( "os" "path/filepath" "sort" + "strconv" "strings" "testing" @@ -232,10 +233,10 @@ func TestCheckpoint(t *testing.T) { // Write changing metadata for each series. In the end, only the latest // version should end up in the checkpoint. b = enc.Metadata([]record.RefMetadata{ - {Ref: 0, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)}, - {Ref: 1, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)}, - {Ref: 2, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)}, - {Ref: 3, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)}, + {Ref: 0, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)}, + {Ref: 1, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)}, + {Ref: 2, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)}, + {Ref: 3, Unit: strconv.FormatInt(last, 10), Help: strconv.FormatInt(last, 10)}, }, nil) require.NoError(t, w.Log(b)) @@ -250,7 +251,7 @@ func TestCheckpoint(t *testing.T) { require.NoError(t, w.Truncate(107)) require.NoError(t, DeleteCheckpoints(w.Dir(), 106)) require.Equal(t, histogramsInWAL+floatHistogramsInWAL+samplesInWAL, stats.TotalSamples) - require.Greater(t, stats.DroppedSamples, 0) + require.Positive(t, stats.DroppedSamples) // Only the new checkpoint should be left. files, err := os.ReadDir(dir) @@ -324,8 +325,8 @@ func TestCheckpoint(t *testing.T) { testutil.RequireEqual(t, expectedRefSeries, series) expectedRefMetadata := []record.RefMetadata{ - {Ref: 0, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)}, - {Ref: 2, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)}, + {Ref: 0, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)}, + {Ref: 2, Unit: strconv.FormatInt(last-100, 10), Help: strconv.FormatInt(last-100, 10)}, {Ref: 4, Unit: "unit", Help: "help"}, } sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref }) diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 8ebd9249aad..d836e3d868d 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -262,6 +262,11 @@ func (w *Watcher) loop() { // Run the watcher, which will tail the WAL until the quit channel is closed // or an error case is hit. func (w *Watcher) Run() error { + _, lastSegment, err := w.firstAndLast() + if err != nil { + return fmt.Errorf("wal.Segments: %w", err) + } + // We want to ensure this is false across iterations since // Run will be called again if there was a failure to read the WAL. w.sendSamples = false @@ -286,20 +291,14 @@ func (w *Watcher) Run() error { return err } - level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment) + level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment) for !isClosed(w.quit) { w.currentSegmentMetric.Set(float64(currentSegment)) - // Re-check on each iteration in case a new segment was added, - // because watch() will wait for notifications on the last segment. - _, lastSegment, err := w.firstAndLast() - if err != nil { - return fmt.Errorf("wal.Segments: %w", err) - } - tail := currentSegment >= lastSegment - - level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment, "lastSegment", lastSegment) - if err := w.watch(currentSegment, tail); err != nil && !errors.Is(err, ErrIgnorable) { + // On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment. + // On subsequent calls to this function, currentSegment will have been incremented and we should open that segment. + level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment) + if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) { return err } diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index ff006cb8174..b39a0fa8abf 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -17,6 +17,7 @@ import ( "math/rand" "os" "path" + "runtime" "sync" "testing" "time" @@ -698,11 +699,46 @@ func TestRun_StartupTime(t *testing.T) { } } +func generateWALRecords(w *WL, segment, seriesCount, samplesCount int) error { + enc := record.Encoder{} + for j := 0; j < seriesCount; j++ { + ref := j + (segment * 100) + series := enc.Series([]record.RefSeries{ + { + Ref: chunks.HeadSeriesRef(ref), + Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", segment)), + }, + }, nil) + if err := w.Log(series); err != nil { + return err + } + + for k := 0; k < samplesCount; k++ { + inner := rand.Intn(ref + 1) + sample := enc.Samples([]record.RefSample{ + { + Ref: chunks.HeadSeriesRef(inner), + T: int64(segment), + V: float64(segment), + }, + }, nil) + if err := w.Log(sample); err != nil { + return err + } + } + } + return nil +} + func TestRun_AvoidNotifyWhenBehind(t *testing.T) { - const pageSize = 32 * 1024 - const segments = 10 - const seriesCount = 20 - const samplesCount = 300 + if runtime.GOOS == "windows" { // Takes a really long time, perhaps because min sleep time is 15ms. + t.SkipNow() + } + const segmentSize = pageSize // Smallest allowed segment size. + const segmentsToWrite = 5 + const segmentsToRead = segmentsToWrite - 1 + const seriesCount = 10 + const samplesCount = 50 // This test can take longer than intended to finish in cloud CI. readTimeout := 10 * time.Second @@ -715,73 +751,37 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) { err := os.Mkdir(wdir, 0o777) require.NoError(t, err) - enc := record.Encoder{} - w, err := NewSize(nil, nil, wdir, pageSize, compress) + w, err := NewSize(nil, nil, wdir, segmentSize, compress) require.NoError(t, err) var wg sync.WaitGroup - // add one segment initially to ensure there's a value > 0 for the last segment id - for i := 0; i < 1; i++ { - for j := 0; j < seriesCount; j++ { - ref := j + (i * 100) - series := enc.Series([]record.RefSeries{ - { - Ref: chunks.HeadSeriesRef(ref), - Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)), - }, - }, nil) - require.NoError(t, w.Log(series)) - - for k := 0; k < samplesCount; k++ { - inner := rand.Intn(ref + 1) - sample := enc.Samples([]record.RefSample{ - { - Ref: chunks.HeadSeriesRef(inner), - T: int64(i), - V: float64(i), - }, - }, nil) - require.NoError(t, w.Log(sample)) - } - } - } + // Generate one segment initially to ensure that watcher.Run() finds at least one segment on disk. + require.NoError(t, generateWALRecords(w, 0, seriesCount, samplesCount)) + w.NextSegment() // Force creation of the next segment wg.Add(1) go func() { defer wg.Done() - for i := 1; i < segments; i++ { - for j := 0; j < seriesCount; j++ { - ref := j + (i * 100) - series := enc.Series([]record.RefSeries{ - { - Ref: chunks.HeadSeriesRef(ref), - Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)), - }, - }, nil) - require.NoError(t, w.Log(series)) - - for k := 0; k < samplesCount; k++ { - inner := rand.Intn(ref + 1) - sample := enc.Samples([]record.RefSample{ - { - Ref: chunks.HeadSeriesRef(inner), - T: int64(i), - V: float64(i), - }, - }, nil) - require.NoError(t, w.Log(sample)) - } - } + for i := 1; i < segmentsToWrite; i++ { + require.NoError(t, generateWALRecords(w, i, seriesCount, samplesCount)) + w.NextSegment() } }() wt := newWriteToMock(time.Millisecond) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false) - watcher.MaxSegment = segments + watcher.MaxSegment = segmentsToRead watcher.setMetrics() startTime := time.Now() err = watcher.Run() wg.Wait() require.Less(t, time.Since(startTime), readTimeout) + + // But samples records shouldn't get dropped + retry(t, defaultRetryInterval, defaultRetries, func() bool { + return wt.checkNumSeries() > 0 + }) + require.Equal(t, segmentsToRead*seriesCount*samplesCount, wt.samplesAppended) + require.NoError(t, err) require.NoError(t, w.Close()) }) diff --git a/util/almost/almost.go b/util/almost/almost.go new file mode 100644 index 00000000000..34f1290a5fc --- /dev/null +++ b/util/almost/almost.go @@ -0,0 +1,41 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package almost + +import "math" + +var minNormal = math.Float64frombits(0x0010000000000000) // The smallest positive normal value of type float64. + +// Equal returns true if a and b differ by less than their sum +// multiplied by epsilon. +func Equal(a, b, epsilon float64) bool { + // NaN has no equality but for testing we still want to know whether both values + // are NaN. + if math.IsNaN(a) && math.IsNaN(b) { + return true + } + + // Cf. http://floating-point-gui.de/errors/comparison/ + if a == b { + return true + } + + absSum := math.Abs(a) + math.Abs(b) + diff := math.Abs(a - b) + + if a == 0 || b == 0 || absSum < minNormal { + return diff < epsilon*minNormal + } + return diff/math.Min(absSum, math.MaxFloat64) < epsilon +} diff --git a/util/testutil/context.go b/util/testutil/context.go index 3f63b030d7c..0c9e0f6f649 100644 --- a/util/testutil/context.go +++ b/util/testutil/context.go @@ -13,7 +13,12 @@ package testutil -import "time" +import ( + "context" + "time" + + "go.uber.org/atomic" +) // A MockContext provides a simple stub implementation of a Context. type MockContext struct { @@ -40,3 +45,23 @@ func (c *MockContext) Err() error { func (c *MockContext) Value(interface{}) interface{} { return nil } + +// MockContextErrAfter is a MockContext that will return an error after a certain +// number of calls to Err(). +type MockContextErrAfter struct { + MockContext + count atomic.Uint64 + FailAfter uint64 +} + +func (c *MockContextErrAfter) Err() error { + c.count.Inc() + if c.count.Load() >= c.FailAfter { + return context.Canceled + } + return c.MockContext.Err() +} + +func (c *MockContextErrAfter) Count() uint64 { + return c.count.Load() +} diff --git a/web/api/v1/api.go b/web/api/v1/api.go index dc22365073a..f0884926e11 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -116,9 +116,11 @@ type RulesRetriever interface { AlertingRules() []*rules.AlertingRule } +// StatsRenderer converts engine statistics into a format suitable for the API. type StatsRenderer func(context.Context, *stats.Statistics, string) stats.QueryStats -func defaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats { +// DefaultStatsRenderer is the default stats renderer for the API. +func DefaultStatsRenderer(_ context.Context, s *stats.Statistics, param string) stats.QueryStats { if param != "" { return stats.NewQueryStats(s) } @@ -272,7 +274,7 @@ func NewAPI( buildInfo: buildInfo, gatherer: gatherer, isAgent: isAgent, - statsRenderer: defaultStatsRenderer, + statsRenderer: DefaultStatsRenderer, remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), } @@ -461,7 +463,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { // Optional stats field in response if parameter "stats" is not empty. sr := api.statsRenderer if sr == nil { - sr = defaultStatsRenderer + sr = DefaultStatsRenderer } qs := sr(ctx, qry.Stats(), r.FormValue("stats")) @@ -563,7 +565,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { // Optional stats field in response if parameter "stats" is not empty. sr := api.statsRenderer if sr == nil { - sr = defaultStatsRenderer + sr = DefaultStatsRenderer } qs := sr(ctx, qry.Stats(), r.FormValue("stats")) @@ -702,7 +704,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { names = []string{} } - if len(names) >= limit { + if len(names) > limit { names = names[:limit] warnings = warnings.Add(errors.New("results truncated due to limit")) } @@ -791,7 +793,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { slices.Sort(vals) - if len(vals) >= limit { + if len(vals) > limit { vals = vals[:limit] warnings = warnings.Add(errors.New("results truncated due to limit")) } @@ -887,7 +889,8 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { } metrics = append(metrics, set.At().Labels()) - if len(metrics) >= limit { + if len(metrics) > limit { + metrics = metrics[:limit] warnings.Add(errors.New("results truncated due to limit")) return apiFuncResult{metrics, nil, warnings, closer} } diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index bb2a73f6dbc..b30890893b7 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -25,6 +25,7 @@ import ( "reflect" "runtime" "sort" + "strconv" "strings" "testing" "time" @@ -49,6 +50,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" @@ -338,7 +340,7 @@ var sampleFlagMap = map[string]string{ } func TestEndpoints(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m test_metric1{foo="bar"} 0+100x100 test_metric1{foo="boo"} 1+0x100 @@ -502,7 +504,7 @@ func (b byLabels) Less(i, j int) bool { return labels.Compare(b[i], b[j]) < 0 } func TestGetSeries(t *testing.T) { // TestEndpoints doesn't have enough label names to test api.labelNames // endpoint properly. Hence we test it separately. - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m test_metric1{foo1="bar", baz="abc"} 0+100x100 test_metric1{foo2="boo"} 1+0x100 @@ -606,7 +608,7 @@ func TestGetSeries(t *testing.T) { func TestQueryExemplars(t *testing.T) { start := time.Unix(0, 0) - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m test_metric1{foo="bar"} 0+100x100 test_metric1{foo="boo"} 1+0x100 @@ -725,7 +727,7 @@ func TestQueryExemplars(t *testing.T) { func TestLabelNames(t *testing.T) { // TestEndpoints doesn't have enough label names to test api.labelNames // endpoint properly. Hence we test it separately. - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m test_metric1{foo1="bar", baz="abc"} 0+100x100 test_metric1{foo2="boo"} 1+0x100 @@ -1058,6 +1060,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E responseLen int // If nonzero, check only the length; `response` is ignored. responseMetadataTotal int responseAsJSON string + warningsCount int errType errorType sorter func(interface{}) metadata []targetMetadata @@ -1415,7 +1418,17 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E "match[]": []string{"test_metric1"}, "limit": []string{"1"}, }, - responseLen: 1, // API does not specify which particular value will come back. + responseLen: 1, // API does not specify which particular value will come back. + warningsCount: 1, + }, + { + endpoint: api.series, + query: url.Values{ + "match[]": []string{"test_metric1"}, + "limit": []string{"2"}, + }, + responseLen: 2, // API does not specify which particular value will come back. + warningsCount: 0, // No warnings if limit isn't exceeded. }, // Missing match[] query params in series requests. { @@ -2698,7 +2711,19 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E query: url.Values{ "limit": []string{"2"}, }, - responseLen: 2, // API does not specify which particular values will come back. + responseLen: 2, // API does not specify which particular values will come back. + warningsCount: 1, + }, + { + endpoint: api.labelValues, + params: map[string]string{ + "name": "__name__", + }, + query: url.Values{ + "limit": []string{"4"}, + }, + responseLen: 4, // API does not specify which particular values will come back. + warningsCount: 0, // No warnings if limit isn't exceeded. }, // Label names. { @@ -2845,7 +2870,16 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E query: url.Values{ "limit": []string{"2"}, }, - responseLen: 2, // API does not specify which particular values will come back. + responseLen: 2, // API does not specify which particular values will come back. + warningsCount: 1, + }, + { + endpoint: api.labelNames, + query: url.Values{ + "limit": []string{"3"}, + }, + responseLen: 3, // API does not specify which particular values will come back. + warningsCount: 0, // No warnings if limit isn't exceeded. }, }...) } @@ -2922,6 +2956,8 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E require.NoError(t, err) require.JSONEq(t, test.responseAsJSON, string(s)) } + + require.Len(t, res.warnings, test.warningsCount) }) } }) @@ -2937,8 +2973,10 @@ func assertAPIError(t *testing.T, got *apiError, exp errorType) { t.Helper() if exp == errorNone { + //nolint:testifylint require.Nil(t, got) } else { + //nolint:testifylint require.NotNil(t, got) require.Equal(t, exp, got.typ, "(%q)", got) } @@ -3543,7 +3581,7 @@ func TestTSDBStatus(t *testing.T) { }, } { tc := tc - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Run(strconv.Itoa(i), func(t *testing.T) { api := &API{db: tc.db, gatherer: prometheus.DefaultGatherer} endpoint := tc.endpoint(api) req, err := http.NewRequest(tc.method, fmt.Sprintf("?%s", tc.values.Encode()), nil) @@ -3835,7 +3873,7 @@ func TestExtractQueryOpts(t *testing.T) { // Test query timeout parameter. func TestQueryTimeout(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m test_metric1{foo="bar"} 0+100x100 `) diff --git a/web/federate_test.go b/web/federate_test.go index f201210ec07..056a95d67f2 100644 --- a/web/federate_test.go +++ b/web/federate_test.go @@ -22,6 +22,7 @@ import ( "net/http" "net/http/httptest" "sort" + "strconv" "strings" "testing" "time" @@ -34,6 +35,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/util/teststorage" @@ -201,7 +203,7 @@ test_metric_without_labels{instance="baz"} 1001 6000000 } func TestFederation(t *testing.T) { - storage := promql.LoadedStorage(t, ` + storage := promqltest.LoadedStorage(t, ` load 1m test_metric1{foo="bar",instance="i"} 0+100x100 test_metric1{foo="boo",instance="i"} 1+0x100 @@ -340,8 +342,8 @@ func TestFederationWithNativeHistograms(t *testing.T) { } app := db.Appender(context.Background()) for i := 0; i < 6; i++ { - l := labels.FromStrings("__name__", "test_metric", "foo", fmt.Sprintf("%d", i)) - expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", fmt.Sprintf("%d", i)) + l := labels.FromStrings("__name__", "test_metric", "foo", strconv.Itoa(i)) + expL := labels.FromStrings("__name__", "test_metric", "instance", "", "foo", strconv.Itoa(i)) var err error switch i { case 0, 3: diff --git a/web/ui/embed.go b/web/ui/embed.go index 204ee9282af..1ee6dbc85a4 100644 --- a/web/ui/embed.go +++ b/web/ui/embed.go @@ -18,5 +18,5 @@ package ui import "embed" -//go:embed static/css/prom_console.css.gz static/js/prom_console.js.gz static/react/asset-manifest.json.gz static/react/favicon.ico.gz static/react/index.html.gz static/react/manifest.json.gz static/react/static/css/main.e075b686.css.gz static/react/static/js/main.87eaac7d.js.LICENSE.txt.gz static/react/static/js/main.87eaac7d.js.gz static/react/static/media/codicon.b3726f0165bf67ac6849.ttf.gz static/react/static/media/index.cd351d7c31d0d3fccf96.cjs.gz static/react/static/media/prometheus_logo_grey.3cf697e5443028ca5e5255b93c7906c5.svg.gz static/vendor/bootstrap-4.5.2/css/bootstrap-grid.css.gz static/vendor/bootstrap-4.5.2/css/bootstrap-grid.css.map.gz static/vendor/bootstrap-4.5.2/css/bootstrap-grid.min.css.gz static/vendor/bootstrap-4.5.2/css/bootstrap-grid.min.css.map.gz static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.css.gz static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.css.map.gz static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.min.css.gz static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.min.css.map.gz static/vendor/bootstrap-4.5.2/css/bootstrap.css.gz static/vendor/bootstrap-4.5.2/css/bootstrap.css.map.gz static/vendor/bootstrap-4.5.2/css/bootstrap.min.css.gz static/vendor/bootstrap-4.5.2/css/bootstrap.min.css.map.gz static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.js.gz static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.js.map.gz static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.min.js.gz static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.min.js.map.gz static/vendor/bootstrap-4.5.2/js/bootstrap.js.gz static/vendor/bootstrap-4.5.2/js/bootstrap.js.map.gz static/vendor/bootstrap-4.5.2/js/bootstrap.min.js.gz static/vendor/bootstrap-4.5.2/js/bootstrap.min.js.map.gz static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.css.gz static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.eot.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.svg.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.ttf.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.woff.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.woff2.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.eot.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.svg.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.ttf.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.woff.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.woff2.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.eot.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.svg.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.ttf.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.woff.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.woff2.gz static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.eot.gz static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.svg.gz static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.ttf.gz static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.woff.gz static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.woff2.gz static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.css.gz static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.less.gz static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.min.css.gz static/vendor/js/jquery-3.5.1.min.js.gz static/vendor/js/jquery.hotkeys.js.gz static/vendor/js/jquery.selection.js.gz static/vendor/js/popper.min.js.gz static/vendor/rickshaw/rickshaw.min.css.gz static/vendor/rickshaw/rickshaw.min.js.gz static/vendor/rickshaw/vendor/d3.layout.min.js.gz static/vendor/rickshaw/vendor/d3.v3.js.gz +//go:embed static/css/prom_console.css.gz static/js/prom_console.js.gz static/react/asset-manifest.json.gz static/react/favicon.ico.gz static/react/index.html.gz static/react/manifest.json.gz static/react/static/css/main.e075b686.css.gz static/react/static/js/main.b9e603c4.js.LICENSE.txt.gz static/react/static/js/main.b9e603c4.js.gz static/react/static/media/codicon.b3726f0165bf67ac6849.ttf.gz static/react/static/media/index.cd351d7c31d0d3fccf96.cjs.gz static/react/static/media/prometheus_logo_grey.3cf697e5443028ca5e5255b93c7906c5.svg.gz static/vendor/bootstrap-4.5.2/css/bootstrap-grid.css.gz static/vendor/bootstrap-4.5.2/css/bootstrap-grid.css.map.gz static/vendor/bootstrap-4.5.2/css/bootstrap-grid.min.css.gz static/vendor/bootstrap-4.5.2/css/bootstrap-grid.min.css.map.gz static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.css.gz static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.css.map.gz static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.min.css.gz static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.min.css.map.gz static/vendor/bootstrap-4.5.2/css/bootstrap.css.gz static/vendor/bootstrap-4.5.2/css/bootstrap.css.map.gz static/vendor/bootstrap-4.5.2/css/bootstrap.min.css.gz static/vendor/bootstrap-4.5.2/css/bootstrap.min.css.map.gz static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.js.gz static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.js.map.gz static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.min.js.gz static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.min.js.map.gz static/vendor/bootstrap-4.5.2/js/bootstrap.js.gz static/vendor/bootstrap-4.5.2/js/bootstrap.js.map.gz static/vendor/bootstrap-4.5.2/js/bootstrap.min.js.gz static/vendor/bootstrap-4.5.2/js/bootstrap.min.js.map.gz static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.css.gz static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.eot.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.svg.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.ttf.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.woff.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.woff2.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.eot.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.svg.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.ttf.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.woff.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.woff2.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.eot.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.svg.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.ttf.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.woff.gz static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.woff2.gz static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.eot.gz static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.svg.gz static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.ttf.gz static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.woff.gz static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.woff2.gz static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.css.gz static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.less.gz static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.min.css.gz static/vendor/js/jquery-3.5.1.min.js.gz static/vendor/js/jquery.hotkeys.js.gz static/vendor/js/jquery.selection.js.gz static/vendor/js/popper.min.js.gz static/vendor/rickshaw/rickshaw.min.css.gz static/vendor/rickshaw/rickshaw.min.js.gz static/vendor/rickshaw/vendor/d3.layout.min.js.gz static/vendor/rickshaw/vendor/d3.v3.js.gz var EmbedFS embed.FS diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 96ebbd78fda..c9efe349134 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.52.0", + "version": "0.53.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.52.0", + "@prometheus-io/lezer-promql": "0.53.1", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts b/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts index 0f1a8b80a93..7b20bfce38b 100644 --- a/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts +++ b/web/ui/module/codemirror-promql/src/complete/hybrid.test.ts @@ -251,6 +251,12 @@ describe('analyzeCompletion test', () => { pos: 11, // cursor is between the bracket after the string myL expectedContext: [{ kind: ContextKind.LabelName }], }, + { + title: 'continue to autocomplete QuotedLabelName in aggregate modifier', + expr: 'sum by ("myL")', + pos: 12, // cursor is between the bracket after the string myL + expectedContext: [{ kind: ContextKind.LabelName }], + }, { title: 'autocomplete labelName in a list', expr: 'sum by (myLabel1,)', @@ -263,6 +269,12 @@ describe('analyzeCompletion test', () => { pos: 23, // cursor is between the bracket after the string myLab expectedContext: [{ kind: ContextKind.LabelName }], }, + { + title: 'autocomplete labelName in a list 2', + expr: 'sum by ("myLabel1", "myLab")', + pos: 27, // cursor is between the bracket after the string myLab + expectedContext: [{ kind: ContextKind.LabelName }], + }, { title: 'autocomplete labelName associated to a metric', expr: 'metric_name{}', @@ -299,6 +311,12 @@ describe('analyzeCompletion test', () => { pos: 22, // cursor is between the bracket after the comma expectedContext: [{ kind: ContextKind.LabelName, metricName: '' }], }, + { + title: 'continue to autocomplete quoted labelName associated to a metric', + expr: '{"metric_"}', + pos: 10, // cursor is between the bracket after the string metric_ + expectedContext: [{ kind: ContextKind.MetricName, metricName: 'metric_' }], + }, { title: 'autocomplete the labelValue with metricName + labelName', expr: 'metric_name{labelName=""}', @@ -342,6 +360,30 @@ describe('analyzeCompletion test', () => { }, ], }, + { + title: 'autocomplete the labelValue with metricName + quoted labelName', + expr: 'metric_name{labelName="labelValue", "labelName"!=""}', + pos: 50, // cursor is between the quotes + expectedContext: [ + { + kind: ContextKind.LabelValue, + metricName: 'metric_name', + labelName: 'labelName', + matchers: [ + { + name: 'labelName', + type: Neq, + value: '', + }, + { + name: 'labelName', + type: EqlSingle, + value: 'labelValue', + }, + ], + }, + ], + }, { title: 'autocomplete the labelValue associated to a labelName', expr: '{labelName=""}', @@ -427,6 +469,12 @@ describe('analyzeCompletion test', () => { pos: 22, // cursor is after '!' expectedContext: [{ kind: ContextKind.MatchOp }], }, + { + title: 'autocomplete matchOp 3', + expr: 'metric_name{"labelName"!}', + pos: 24, // cursor is after '!' + expectedContext: [{ kind: ContextKind.BinOp }], + }, { title: 'autocomplete duration with offset', expr: 'http_requests_total offset 5', diff --git a/web/ui/module/codemirror-promql/src/complete/hybrid.ts b/web/ui/module/codemirror-promql/src/complete/hybrid.ts index cf23aa11ac1..46748d5dc37 100644 --- a/web/ui/module/codemirror-promql/src/complete/hybrid.ts +++ b/web/ui/module/codemirror-promql/src/complete/hybrid.ts @@ -29,7 +29,6 @@ import { GroupingLabels, Gte, Gtr, - LabelMatcher, LabelMatchers, LabelName, Lss, @@ -52,6 +51,9 @@ import { SubqueryExpr, Unless, VectorSelector, + UnquotedLabelMatcher, + QuotedLabelMatcher, + QuotedLabelName, } from '@prometheus-io/lezer-promql'; import { Completion, CompletionContext, CompletionResult } from '@codemirror/autocomplete'; import { EditorState } from '@codemirror/state'; @@ -181,7 +183,10 @@ export function computeStartCompletePosition(node: SyntaxNode, pos: number): num let start = node.from; if (node.type.id === LabelMatchers || node.type.id === GroupingLabels) { start = computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel(node, pos); - } else if (node.type.id === FunctionCallBody || (node.type.id === StringLiteral && node.parent?.type.id === LabelMatcher)) { + } else if ( + node.type.id === FunctionCallBody || + (node.type.id === StringLiteral && (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher)) + ) { // When the cursor is between bracket, quote, we need to increment the starting position to avoid to consider the open bracket/ first string. start++; } else if ( @@ -212,7 +217,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context result.push({ kind: ContextKind.Duration }); break; } - if (node.parent?.type.id === LabelMatcher) { + if (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher) { // In this case the current token is not itself a valid match op yet: // metric_name{labelName!} result.push({ kind: ContextKind.MatchOp }); @@ -380,7 +385,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context // sum by (myL) // So we have to continue to autocomplete any kind of labelName result.push({ kind: ContextKind.LabelName }); - } else if (node.parent?.type.id === LabelMatcher) { + } else if (node.parent?.type.id === UnquotedLabelMatcher) { // In that case we are in the given situation: // metric_name{myL} or {myL} // so we have or to continue to autocomplete any kind of labelName or @@ -389,9 +394,9 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context } break; case StringLiteral: - if (node.parent?.type.id === LabelMatcher) { + if (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher) { // In this case we are in the given situation: - // metric_name{labelName=""} + // metric_name{labelName=""} or metric_name{"labelName"=""} // So we can autocomplete the labelValue // Get the labelName. @@ -399,18 +404,34 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context let labelName = ''; if (node.parent.firstChild?.type.id === LabelName) { labelName = state.sliceDoc(node.parent.firstChild.from, node.parent.firstChild.to); + } else if (node.parent.firstChild?.type.id === QuotedLabelName) { + labelName = state.sliceDoc(node.parent.firstChild.from, node.parent.firstChild.to).slice(1, -1); } // then find the metricName if it exists const metricName = getMetricNameInVectorSelector(node, state); // finally get the full matcher available const matcherNode = walkBackward(node, LabelMatchers); - const labelMatchers = buildLabelMatchers(matcherNode ? matcherNode.getChildren(LabelMatcher) : [], state); + const labelMatcherOpts = [QuotedLabelName, QuotedLabelMatcher, UnquotedLabelMatcher]; + let labelMatchers: Matcher[] = []; + for (const labelMatcherOpt of labelMatcherOpts) { + labelMatchers = labelMatchers.concat(buildLabelMatchers(matcherNode ? matcherNode.getChildren(labelMatcherOpt) : [], state)); + } result.push({ kind: ContextKind.LabelValue, metricName: metricName, labelName: labelName, matchers: labelMatchers, }); + } else if (node.parent?.parent?.type.id === GroupingLabels) { + // In this case we are in the given situation: + // sum by ("myL") + // So we have to continue to autocomplete any kind of labelName + result.push({ kind: ContextKind.LabelName }); + } else if (node.parent?.parent?.type.id === LabelMatchers) { + // In that case we are in the given situation: + // {""} or {"metric_"} + // since this is for the QuotedMetricName we need to continue to autocomplete for the metric names + result.push({ kind: ContextKind.MetricName, metricName: state.sliceDoc(node.from, node.to).slice(1, -1) }); } break; case NumberLiteral: diff --git a/web/ui/module/codemirror-promql/src/parser/matcher.ts b/web/ui/module/codemirror-promql/src/parser/matcher.ts index f432ffe2879..99e2e396996 100644 --- a/web/ui/module/codemirror-promql/src/parser/matcher.ts +++ b/web/ui/module/codemirror-promql/src/parser/matcher.ts @@ -12,33 +12,75 @@ // limitations under the License. import { SyntaxNode } from '@lezer/common'; -import { EqlRegex, EqlSingle, LabelName, MatchOp, Neq, NeqRegex, StringLiteral } from '@prometheus-io/lezer-promql'; +import { + EqlRegex, + EqlSingle, + LabelName, + MatchOp, + Neq, + NeqRegex, + StringLiteral, + UnquotedLabelMatcher, + QuotedLabelMatcher, + QuotedLabelName, +} from '@prometheus-io/lezer-promql'; import { EditorState } from '@codemirror/state'; import { Matcher } from '../types'; function createMatcher(labelMatcher: SyntaxNode, state: EditorState): Matcher { const matcher = new Matcher(0, '', ''); const cursor = labelMatcher.cursor(); - if (!cursor.next()) { - // weird case, that would mean the labelMatcher doesn't have any child. - return matcher; - } - do { - switch (cursor.type.id) { - case LabelName: - matcher.name = state.sliceDoc(cursor.from, cursor.to); - break; - case MatchOp: - const ope = cursor.node.firstChild; - if (ope) { - matcher.type = ope.type.id; + switch (cursor.type.id) { + case QuotedLabelMatcher: + if (!cursor.next()) { + // weird case, that would mean the QuotedLabelMatcher doesn't have any child. + return matcher; + } + do { + switch (cursor.type.id) { + case QuotedLabelName: + matcher.name = state.sliceDoc(cursor.from, cursor.to).slice(1, -1); + break; + case MatchOp: + const ope = cursor.node.firstChild; + if (ope) { + matcher.type = ope.type.id; + } + break; + case StringLiteral: + matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1); + break; } - break; - case StringLiteral: - matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1); - break; - } - } while (cursor.nextSibling()); + } while (cursor.nextSibling()); + break; + case UnquotedLabelMatcher: + if (!cursor.next()) { + // weird case, that would mean the UnquotedLabelMatcher doesn't have any child. + return matcher; + } + do { + switch (cursor.type.id) { + case LabelName: + matcher.name = state.sliceDoc(cursor.from, cursor.to); + break; + case MatchOp: + const ope = cursor.node.firstChild; + if (ope) { + matcher.type = ope.type.id; + } + break; + case StringLiteral: + matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1); + break; + } + } while (cursor.nextSibling()); + break; + case QuotedLabelName: + matcher.name = '__name__'; + matcher.value = state.sliceDoc(cursor.from, cursor.to).slice(1, -1); + matcher.type = EqlSingle; + break; + } return matcher; } diff --git a/web/ui/module/codemirror-promql/src/parser/parser.test.ts b/web/ui/module/codemirror-promql/src/parser/parser.test.ts index 54b95553cbc..2bc7e67ff09 100644 --- a/web/ui/module/codemirror-promql/src/parser/parser.test.ts +++ b/web/ui/module/codemirror-promql/src/parser/parser.test.ts @@ -204,6 +204,11 @@ describe('promql operations', () => { expectedValueType: ValueType.vector, expectedDiag: [] as Diagnostic[], }, + { + expr: 'foo and on(test,"blub") bar', + expectedValueType: ValueType.vector, + expectedDiag: [] as Diagnostic[], + }, { expr: 'foo and on() bar', expectedValueType: ValueType.vector, @@ -214,6 +219,11 @@ describe('promql operations', () => { expectedValueType: ValueType.vector, expectedDiag: [] as Diagnostic[], }, + { + expr: 'foo and ignoring(test,"blub") bar', + expectedValueType: ValueType.vector, + expectedDiag: [] as Diagnostic[], + }, { expr: 'foo and ignoring() bar', expectedValueType: ValueType.vector, @@ -229,6 +239,11 @@ describe('promql operations', () => { expectedValueType: ValueType.vector, expectedDiag: [] as Diagnostic[], }, + { + expr: 'foo / on(test,blub) group_left("bar") bar', + expectedValueType: ValueType.vector, + expectedDiag: [] as Diagnostic[], + }, { expr: 'foo / ignoring(test,blub) group_left(blub) bar', expectedValueType: ValueType.vector, @@ -825,6 +840,134 @@ describe('promql operations', () => { expectedValueType: ValueType.vector, expectedDiag: [], }, + { + expr: '{"foo"}', + expectedValueType: ValueType.vector, + expectedDiag: [], + }, + { + // with metric name in the middle + expr: '{a="b","foo",c~="d"}', + expectedValueType: ValueType.vector, + expectedDiag: [], + }, + { + expr: '{"foo", a="bc"}', + expectedValueType: ValueType.vector, + expectedDiag: [], + }, + { + expr: '{"colon:in:the:middle"}', + expectedValueType: ValueType.vector, + expectedDiag: [], + }, + { + expr: '{"dot.in.the.middle"}', + expectedValueType: ValueType.vector, + expectedDiag: [], + }, + { + expr: '{"😀 in metric name"}', + expectedValueType: ValueType.vector, + expectedDiag: [], + }, + { + // quotes with escape + expr: '{"this is \"foo\" metric"}', // eslint-disable-line + expectedValueType: ValueType.vector, + expectedDiag: [], + }, + { + expr: '{"foo","colon:in:the:middle"="val"}', + expectedValueType: ValueType.vector, + expectedDiag: [], + }, + { + expr: '{"foo","dot.in.the.middle"="val"}', + expectedValueType: ValueType.vector, + expectedDiag: [], + }, + { + expr: '{"foo","😀 in label name"="val"}', + expectedValueType: ValueType.vector, + expectedDiag: [], + }, + { + // quotes with escape + expr: '{"foo","this is \"bar\" label"="val"}', // eslint-disable-line + expectedValueType: ValueType.vector, + expectedDiag: [], + }, + { + expr: 'foo{"bar"}', + expectedValueType: ValueType.vector, + expectedDiag: [ + { + from: 0, + message: 'metric name must not be set twice: foo or bar', + severity: 'error', + to: 10, + }, + ], + }, + { + expr: '{"foo", __name__="bar"}', + expectedValueType: ValueType.vector, + expectedDiag: [ + { + from: 0, + message: 'metric name must not be set twice: foo or bar', + severity: 'error', + to: 23, + }, + ], + }, + { + expr: '{"foo", "__name__"="bar"}', + expectedValueType: ValueType.vector, + expectedDiag: [ + { + from: 0, + message: 'metric name must not be set twice: foo or bar', + severity: 'error', + to: 25, + }, + ], + }, + { + expr: '{"__name__"="foo", __name__="bar"}', + expectedValueType: ValueType.vector, + expectedDiag: [ + { + from: 0, + message: 'metric name must not be set twice: foo or bar', + severity: 'error', + to: 34, + }, + ], + }, + { + expr: '{"foo", "bar"}', + expectedValueType: ValueType.vector, + expectedDiag: [ + { + from: 0, + to: 14, + message: 'metric name must not be set twice: foo or bar', + severity: 'error', + }, + ], + }, + { + expr: `{'foo\`metric':'bar'}`, // eslint-disable-line + expectedValueType: ValueType.vector, + expectedDiag: [], + }, + { + expr: '{`foo\"metric`=`bar`}', // eslint-disable-line + expectedValueType: ValueType.vector, + expectedDiag: [], + }, ]; testCases.forEach((value) => { const state = createEditorState(value.expr); diff --git a/web/ui/module/codemirror-promql/src/parser/parser.ts b/web/ui/module/codemirror-promql/src/parser/parser.ts index 58e56185c93..fba7b7b6ba8 100644 --- a/web/ui/module/codemirror-promql/src/parser/parser.ts +++ b/web/ui/module/codemirror-promql/src/parser/parser.ts @@ -27,7 +27,6 @@ import { Gte, Gtr, Identifier, - LabelMatcher, LabelMatchers, Lss, Lte, @@ -36,11 +35,14 @@ import { Or, ParenExpr, Quantile, + QuotedLabelMatcher, + QuotedLabelName, StepInvariantExpr, SubqueryExpr, Topk, UnaryExpr, Unless, + UnquotedLabelMatcher, VectorSelector, } from '@prometheus-io/lezer-promql'; import { containsAtLeastOneChild } from './path-finder'; @@ -282,7 +284,11 @@ export class Parser { private checkVectorSelector(node: SyntaxNode): void { const matchList = node.getChild(LabelMatchers); - const labelMatchers = buildLabelMatchers(matchList ? matchList.getChildren(LabelMatcher) : [], this.state); + const labelMatcherOpts = [QuotedLabelName, QuotedLabelMatcher, UnquotedLabelMatcher]; + let labelMatchers: Matcher[] = []; + for (const labelMatcherOpt of labelMatcherOpts) { + labelMatchers = labelMatchers.concat(buildLabelMatchers(matchList ? matchList.getChildren(labelMatcherOpt) : [], this.state)); + } let vectorSelectorName = ''; // VectorSelector ( Identifier ) // https://github.com/promlabs/lezer-promql/blob/71e2f9fa5ae6f5c5547d5738966cd2512e6b99a8/src/promql.grammar#L200 @@ -301,6 +307,14 @@ export class Parser { // adding the metric name as a Matcher to avoid a false positive for this kind of expression: // foo{bare=''} labelMatchers.push(new Matcher(EqlSingle, '__name__', vectorSelectorName)); + } else { + // In this case when metric name is not set outside the braces + // It is checking whether metric name is set twice like in : + // {__name__:"foo", "foo"}, {"foo", "bar"} + const labelMatchersMetricName = labelMatchers.filter((lm) => lm.name === '__name__'); + if (labelMatchersMetricName.length > 1) { + this.addDiagnostic(node, `metric name must not be set twice: ${labelMatchersMetricName[0].value} or ${labelMatchersMetricName[1].value}`); + } } // A Vector selector must contain at least one non-empty matcher to prevent diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 6734a770f3d..b6147ff11f9 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.52.0", + "version": "0.53.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/module/lezer-promql/src/promql.grammar b/web/ui/module/lezer-promql/src/promql.grammar index 49664831793..fd4edddf24a 100644 --- a/web/ui/module/lezer-promql/src/promql.grammar +++ b/web/ui/module/lezer-promql/src/promql.grammar @@ -97,7 +97,7 @@ binModifiers { } GroupingLabels { - "(" (LabelName ("," LabelName)* ","?)? ")" + "(" ((LabelName | QuotedLabelName) ("," (LabelName | QuotedLabelName))* ","?)? ")" } FunctionCall { @@ -220,7 +220,7 @@ VectorSelector { } LabelMatchers { - "{" (LabelMatcher ("," LabelMatcher)* ","?)? "}" + "{" ((UnquotedLabelMatcher | QuotedLabelMatcher | QuotedLabelName)("," (UnquotedLabelMatcher | QuotedLabelMatcher | QuotedLabelName))* ","?)? "}" } MatchOp { @@ -230,8 +230,16 @@ MatchOp { NeqRegex } -LabelMatcher { - LabelName MatchOp StringLiteral +UnquotedLabelMatcher { + LabelName MatchOp StringLiteral +} + +QuotedLabelMatcher { + QuotedLabelName MatchOp StringLiteral +} + +QuotedLabelName { + StringLiteral } StepInvariantExpr { diff --git a/web/ui/module/lezer-promql/test/expression.txt b/web/ui/module/lezer-promql/test/expression.txt index 2e2b2f40bc1..daba7d8002f 100644 --- a/web/ui/module/lezer-promql/test/expression.txt +++ b/web/ui/module/lezer-promql/test/expression.txt @@ -112,6 +112,54 @@ PromQL( ) ) +# Quoted label name in grouping labels + +sum by("job", mode) (test_metric) / on("job") group_left sum by("job")(test_metric) + +==> + +PromQL( + BinaryExpr( + AggregateExpr( + AggregateOp(Sum), + AggregateModifier( + By, + GroupingLabels( + QuotedLabelName(StringLiteral), + LabelName + ) + ), + FunctionCallBody( + VectorSelector( + Identifier + ) + ) + ), + Div, + MatchingModifierClause( + On, + GroupingLabels( + QuotedLabelName(StringLiteral) + ) + GroupLeft + ), + AggregateExpr( + AggregateOp(Sum), + AggregateModifier( + By, + GroupingLabels( + QuotedLabelName(StringLiteral) + ) + ), + FunctionCallBody( + VectorSelector( + Identifier + ) + ) + ) + ) +) + # Case insensitivity for aggregations and binop modifiers. SuM BY(testlabel1) (testmetric1) / IGNOring(testlabel2) AVG withOUT(testlabel3) (testmetric2) @@ -226,25 +274,25 @@ PromQL( VectorSelector( Identifier, LabelMatchers( - LabelMatcher( - LabelName, - MatchOp(EqlSingle), - StringLiteral + UnquotedLabelMatcher( + LabelName, + MatchOp(EqlSingle), + StringLiteral ), - LabelMatcher( - LabelName, - MatchOp(Neq), - StringLiteral + UnquotedLabelMatcher( + LabelName, + MatchOp(Neq), + StringLiteral ), - LabelMatcher( - LabelName, - MatchOp(EqlRegex), - StringLiteral + UnquotedLabelMatcher( + LabelName, + MatchOp(EqlRegex), + StringLiteral ), - LabelMatcher( - LabelName, - MatchOp(NeqRegex), - StringLiteral + UnquotedLabelMatcher( + LabelName, + MatchOp(NeqRegex), + StringLiteral ) ) ) @@ -571,14 +619,14 @@ PromQL(NumberLiteral) NaN{foo="bar"} ==> -PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(LabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral))))) +PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(UnquotedLabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral))))) # Trying to illegally use Inf as a metric name. Inf{foo="bar"} ==> -PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(LabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral))))) +PromQL(BinaryExpr(NumberLiteral,⚠,VectorSelector(LabelMatchers(UnquotedLabelMatcher(LabelName,MatchOp(EqlSingle),StringLiteral))))) # Negative offset @@ -614,3 +662,24 @@ MetricName(Identifier) ==> PromQL(BinaryExpr(NumberLiteral,Add,BinaryExpr(VectorSelector(Identifier),Atan2,VectorSelector(Identifier)))) + +# Testing quoted metric name + +{"metric_name"} + +==> +PromQL(VectorSelector(LabelMatchers(QuotedLabelName(StringLiteral)))) + +# Testing quoted label name + +{"foo"="bar"} + +==> +PromQL(VectorSelector(LabelMatchers(QuotedLabelMatcher(QuotedLabelName(StringLiteral), MatchOp(EqlSingle), StringLiteral)))) + +# Testing quoted metric name and label name + +{"metric_name", "foo"="bar"} + +==> +PromQL(VectorSelector(LabelMatchers(QuotedLabelName(StringLiteral), QuotedLabelMatcher(QuotedLabelName(StringLiteral), MatchOp(EqlSingle), StringLiteral)))) \ No newline at end of file diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 30fa41adf7d..8a473e327f9 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.52.0", + "version": "0.53.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.52.0", + "version": "0.53.1", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.52.0", + "version": "0.53.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.52.0", + "@prometheus-io/lezer-promql": "0.53.1", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -69,7 +69,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.52.0", + "version": "0.53.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.5.1", @@ -19233,7 +19233,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.52.0", + "version": "0.53.1", "dependencies": { "@codemirror/autocomplete": "^6.11.1", "@codemirror/commands": "^6.3.2", @@ -19251,7 +19251,7 @@ "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.52.0", + "@prometheus-io/codemirror-promql": "0.53.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.2", diff --git a/web/ui/package.json b/web/ui/package.json index e3899ec78eb..d59e294e2ab 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.1.1", "typescript": "^4.9.5" }, - "version": "0.52.0" + "version": "0.53.1" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 3e79da949c2..90381cba57a 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.52.0", + "version": "0.53.1", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.11.1", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.52.0", + "@prometheus-io/codemirror-promql": "0.53.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.2", diff --git a/web/ui/static/css/prom_console.css.gz b/web/ui/static/css/prom_console.css.gz index 03132d1c640..f3f6d9979ee 100644 Binary files a/web/ui/static/css/prom_console.css.gz and b/web/ui/static/css/prom_console.css.gz differ diff --git a/web/ui/static/js/prom_console.js.gz b/web/ui/static/js/prom_console.js.gz index 2d4f8833804..8446e81bdad 100644 Binary files a/web/ui/static/js/prom_console.js.gz and b/web/ui/static/js/prom_console.js.gz differ diff --git a/web/ui/static/react/asset-manifest.json.gz b/web/ui/static/react/asset-manifest.json.gz index e65b723f3c4..5ae5af0dc54 100644 Binary files a/web/ui/static/react/asset-manifest.json.gz and b/web/ui/static/react/asset-manifest.json.gz differ diff --git a/web/ui/static/react/favicon.ico.gz b/web/ui/static/react/favicon.ico.gz index d9ee38834ef..f782747de7c 100755 Binary files a/web/ui/static/react/favicon.ico.gz and b/web/ui/static/react/favicon.ico.gz differ diff --git a/web/ui/static/react/index.html.gz b/web/ui/static/react/index.html.gz index 3a5ae174102..bec0dfea3f8 100644 Binary files a/web/ui/static/react/index.html.gz and b/web/ui/static/react/index.html.gz differ diff --git a/web/ui/static/react/manifest.json.gz b/web/ui/static/react/manifest.json.gz index e06cac22652..ddb7a77c185 100755 Binary files a/web/ui/static/react/manifest.json.gz and b/web/ui/static/react/manifest.json.gz differ diff --git a/web/ui/static/react/static/css/main.e075b686.css.gz b/web/ui/static/react/static/css/main.e075b686.css.gz index 834a88740d3..43f933d542f 100644 Binary files a/web/ui/static/react/static/css/main.e075b686.css.gz and b/web/ui/static/react/static/css/main.e075b686.css.gz differ diff --git a/web/ui/static/react/static/js/main.87eaac7d.js.gz b/web/ui/static/react/static/js/main.87eaac7d.js.gz deleted file mode 100644 index a9f5cebe43d..00000000000 Binary files a/web/ui/static/react/static/js/main.87eaac7d.js.gz and /dev/null differ diff --git a/web/ui/static/react/static/js/main.87eaac7d.js.LICENSE.txt.gz b/web/ui/static/react/static/js/main.b9e603c4.js.LICENSE.txt.gz similarity index 91% rename from web/ui/static/react/static/js/main.87eaac7d.js.LICENSE.txt.gz rename to web/ui/static/react/static/js/main.b9e603c4.js.LICENSE.txt.gz index 8aff1e0141f..90c5c6514f6 100644 Binary files a/web/ui/static/react/static/js/main.87eaac7d.js.LICENSE.txt.gz and b/web/ui/static/react/static/js/main.b9e603c4.js.LICENSE.txt.gz differ diff --git a/web/ui/static/react/static/js/main.b9e603c4.js.gz b/web/ui/static/react/static/js/main.b9e603c4.js.gz new file mode 100644 index 00000000000..f0c70f10d77 Binary files /dev/null and b/web/ui/static/react/static/js/main.b9e603c4.js.gz differ diff --git a/web/ui/static/react/static/media/codicon.b3726f0165bf67ac6849.ttf.gz b/web/ui/static/react/static/media/codicon.b3726f0165bf67ac6849.ttf.gz index 18ae89586ec..6b761cae1ef 100644 Binary files a/web/ui/static/react/static/media/codicon.b3726f0165bf67ac6849.ttf.gz and b/web/ui/static/react/static/media/codicon.b3726f0165bf67ac6849.ttf.gz differ diff --git a/web/ui/static/react/static/media/index.cd351d7c31d0d3fccf96.cjs.gz b/web/ui/static/react/static/media/index.cd351d7c31d0d3fccf96.cjs.gz index b0c544fa86f..0702c16e665 100644 Binary files a/web/ui/static/react/static/media/index.cd351d7c31d0d3fccf96.cjs.gz and b/web/ui/static/react/static/media/index.cd351d7c31d0d3fccf96.cjs.gz differ diff --git a/web/ui/static/react/static/media/prometheus_logo_grey.3cf697e5443028ca5e5255b93c7906c5.svg.gz b/web/ui/static/react/static/media/prometheus_logo_grey.3cf697e5443028ca5e5255b93c7906c5.svg.gz index 0ff4d82ad50..eb93377dad4 100644 Binary files a/web/ui/static/react/static/media/prometheus_logo_grey.3cf697e5443028ca5e5255b93c7906c5.svg.gz and b/web/ui/static/react/static/media/prometheus_logo_grey.3cf697e5443028ca5e5255b93c7906c5.svg.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.css.gz b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.css.gz index a55e782b29e..95b8d20668a 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.css.gz and b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.css.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.css.map.gz b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.css.map.gz index e03f704ae19..43f8df8db12 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.css.map.gz and b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.css.map.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.min.css.gz b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.min.css.gz index 866ef360b82..b7cca6be276 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.min.css.gz and b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.min.css.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.min.css.map.gz b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.min.css.map.gz index 657e1766c20..562c3ed7910 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.min.css.map.gz and b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-grid.min.css.map.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.css.gz b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.css.gz index d53ea6181e2..09cbccbfb44 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.css.gz and b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.css.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.css.map.gz b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.css.map.gz index a0604476c0f..420de4a26fe 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.css.map.gz and b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.css.map.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.min.css.gz b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.min.css.gz index 3fd799d0314..25c56cd2be5 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.min.css.gz and b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.min.css.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.min.css.map.gz b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.min.css.map.gz index 9314c219694..6d4aeb3cb50 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.min.css.map.gz and b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap-reboot.min.css.map.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.css.gz b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.css.gz index a2378671b47..c5a36cf42e9 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.css.gz and b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.css.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.css.map.gz b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.css.map.gz index cf523e3d23c..7b960bc57c6 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.css.map.gz and b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.css.map.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css.gz b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css.gz index f49754eefef..fdc13a588e8 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css.gz and b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css.map.gz b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css.map.gz index 3c481152866..9b905baea24 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css.map.gz and b/web/ui/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css.map.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.js.gz b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.js.gz index b2a7dafe105..b09da536170 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.js.gz and b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.js.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.js.map.gz b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.js.map.gz index 1512e97c477..5c0680dc820 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.js.map.gz and b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.js.map.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.min.js.gz b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.min.js.gz index 1cff50d948f..f6684639ffc 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.min.js.gz and b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.min.js.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.min.js.map.gz b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.min.js.map.gz index dcada32b420..cf565cc3130 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.min.js.map.gz and b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.bundle.min.js.map.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.js.gz b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.js.gz index 00e0878773f..80ac8474749 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.js.gz and b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.js.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.js.map.gz b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.js.map.gz index cd590eaa1ae..cc9dac885dc 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.js.map.gz and b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.js.map.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js.gz b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js.gz index 6b667f1c6b4..b8d5ea615cf 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js.gz and b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js.gz differ diff --git a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js.map.gz b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js.map.gz index 98ec942b595..17610edffbe 100644 Binary files a/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js.map.gz and b/web/ui/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js.map.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.css.gz b/web/ui/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.css.gz index 3adfcbb320c..577ce5113ba 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.css.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.css.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css.gz b/web/ui/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css.gz index bccfa27c705..cf6dd6bc38d 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.eot.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.eot.gz index 500e6b41d58..8e89edd5e88 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.eot.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.eot.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.svg.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.svg.gz index 6d970d4ca9b..4c27a288da2 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.svg.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.svg.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.ttf.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.ttf.gz index 16130c3c9d0..7930bcde9b1 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.ttf.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.ttf.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.woff.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.woff.gz index 305af92494b..c40b5f596d7 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.woff.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.woff.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.woff2.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.woff2.gz index 0af81c70bd1..955bb709a55 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.woff2.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-brands-400.woff2.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.eot.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.eot.gz index d967be1d7f1..eea860555ba 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.eot.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.eot.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.svg.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.svg.gz index 787d2e88cb4..804ce0065dc 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.svg.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.svg.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.ttf.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.ttf.gz index eefce7f31c0..e7f30356a31 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.ttf.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.ttf.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.woff.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.woff.gz index d3b5944bdd2..e331634e2a5 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.woff.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.woff.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.woff2.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.woff2.gz index 11772b049ba..2f52ab1b6a7 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.woff2.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-regular-400.woff2.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.eot.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.eot.gz index c4d195b95fc..51b10078cbf 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.eot.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.eot.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.svg.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.svg.gz index 413208023cd..1ef0c27d505 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.svg.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.svg.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.ttf.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.ttf.gz index 37e1d26fead..5d36c394a7e 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.ttf.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.ttf.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.woff.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.woff.gz index 7039278dcac..857b578b5a9 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.woff.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.woff.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.woff2.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.woff2.gz index 863a0d761d6..0c7bff8754c 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.woff2.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/fontawesome/fa-solid-900.woff2.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.eot.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.eot.gz index 509c1b2a258..15df5b247c5 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.eot.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.eot.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.svg.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.svg.gz index a52bf9ce5cb..bbf9cd69a52 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.svg.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.svg.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.ttf.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.ttf.gz index 868923fa678..f1e74a3abdb 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.ttf.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.ttf.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.woff.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.woff.gz index d212ce9c9bf..f167a0b7c28 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.woff.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.woff.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.woff2.gz b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.woff2.gz index 7f0df988757..4fb443a647a 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.woff2.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/fonts/glyphicons/glyphicons-halflings-regular.woff2.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.css.gz b/web/ui/static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.css.gz index 337047f5737..1b5766f05a7 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.css.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.css.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.less.gz b/web/ui/static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.less.gz index 9271d917188..e5b7d940091 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.less.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.less.gz differ diff --git a/web/ui/static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.min.css.gz b/web/ui/static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.min.css.gz index 08e5890252e..db963c77a0a 100644 Binary files a/web/ui/static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.min.css.gz and b/web/ui/static/vendor/bootstrap4-glyphicons/maps/glyphicons-fontawesome.min.css.gz differ diff --git a/web/ui/static/vendor/js/jquery-3.5.1.min.js.gz b/web/ui/static/vendor/js/jquery-3.5.1.min.js.gz index cc52399513d..7f86223b4d0 100644 Binary files a/web/ui/static/vendor/js/jquery-3.5.1.min.js.gz and b/web/ui/static/vendor/js/jquery-3.5.1.min.js.gz differ diff --git a/web/ui/static/vendor/js/jquery.hotkeys.js.gz b/web/ui/static/vendor/js/jquery.hotkeys.js.gz index 717fdf8f478..d50e58811f9 100644 Binary files a/web/ui/static/vendor/js/jquery.hotkeys.js.gz and b/web/ui/static/vendor/js/jquery.hotkeys.js.gz differ diff --git a/web/ui/static/vendor/js/jquery.selection.js.gz b/web/ui/static/vendor/js/jquery.selection.js.gz index d09cb7fc770..a6beebf5be8 100644 Binary files a/web/ui/static/vendor/js/jquery.selection.js.gz and b/web/ui/static/vendor/js/jquery.selection.js.gz differ diff --git a/web/ui/static/vendor/js/popper.min.js.gz b/web/ui/static/vendor/js/popper.min.js.gz index 9087b9366e1..bd2a47dbacf 100644 Binary files a/web/ui/static/vendor/js/popper.min.js.gz and b/web/ui/static/vendor/js/popper.min.js.gz differ diff --git a/web/ui/static/vendor/rickshaw/rickshaw.min.css.gz b/web/ui/static/vendor/rickshaw/rickshaw.min.css.gz index 467018ae03d..7c00a0a45d5 100644 Binary files a/web/ui/static/vendor/rickshaw/rickshaw.min.css.gz and b/web/ui/static/vendor/rickshaw/rickshaw.min.css.gz differ diff --git a/web/ui/static/vendor/rickshaw/rickshaw.min.js.gz b/web/ui/static/vendor/rickshaw/rickshaw.min.js.gz index 8401ea97fcf..b71f23dd63d 100644 Binary files a/web/ui/static/vendor/rickshaw/rickshaw.min.js.gz and b/web/ui/static/vendor/rickshaw/rickshaw.min.js.gz differ diff --git a/web/ui/static/vendor/rickshaw/vendor/d3.layout.min.js.gz b/web/ui/static/vendor/rickshaw/vendor/d3.layout.min.js.gz index 38f7cc0d2ae..2e3576cb046 100644 Binary files a/web/ui/static/vendor/rickshaw/vendor/d3.layout.min.js.gz and b/web/ui/static/vendor/rickshaw/vendor/d3.layout.min.js.gz differ diff --git a/web/ui/static/vendor/rickshaw/vendor/d3.v3.js.gz b/web/ui/static/vendor/rickshaw/vendor/d3.v3.js.gz index 0118bf07290..0b311f47a9b 100644 Binary files a/web/ui/static/vendor/rickshaw/vendor/d3.v3.js.gz and b/web/ui/static/vendor/rickshaw/vendor/d3.v3.js.gz differ