diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 1aae1fff9..7f7cec9cd 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,7 +1,7 @@
/web/ui @juliusv
/web/ui/module @juliusv @nexucis
-/storage/remote @csmarchbanks @cstyan @bwplotka @tomwilkie
-/storage/remote/otlptranslator @gouthamve @jesusvazquez
+/storage/remote @cstyan @bwplotka @tomwilkie
+/storage/remote/otlptranslator @aknuds1 @jesusvazquez
/discovery/kubernetes @brancz
/tsdb @jesusvazquez
/promql @roidelapluie
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 9617e04a4..3d56ff2b2 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -6,11 +6,11 @@ updates:
interval: "monthly"
groups:
k8s.io:
- patterns:
- - "k8s.io/*"
+ patterns:
+ - "k8s.io/*"
go.opentelemetry.io:
- patterns:
- - "go.opentelemetry.io/*"
+ patterns:
+ - "go.opentelemetry.io/*"
- package-ecosystem: "gomod"
directory: "/documentation/examples/remote_storage"
schedule:
diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml
index 0f3c5d277..fe8c4704b 100644
--- a/.github/workflows/buf-lint.yml
+++ b/.github/workflows/buf-lint.yml
@@ -12,14 +12,14 @@ jobs:
name: lint
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- - uses: bufbuild/buf-setup-action@382440cdb8ec7bc25a68d7b4711163d95f7cc3aa # v1.28.1
+ - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- - uses: bufbuild/buf-lint-action@bd48f53224baaaf0fc55de9a913e7680ca6dbea4 # v1.0.3
+ - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
with:
input: 'prompb'
- - uses: bufbuild/buf-breaking-action@f47418c81c00bfd65394628385593542f64db477 # v1.1.2
+ - uses: bufbuild/buf-breaking-action@c57b3d842a5c3f3b454756ef65305a50a587c5ba # v1.1.4
with:
input: 'prompb'
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD,subdir=prompb'
diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml
index f6d5c9191..2156e8f19 100644
--- a/.github/workflows/buf.yml
+++ b/.github/workflows/buf.yml
@@ -12,18 +12,18 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'prometheus'
steps:
- - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- - uses: bufbuild/buf-setup-action@382440cdb8ec7bc25a68d7b4711163d95f7cc3aa # v1.28.1
+ - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- - uses: bufbuild/buf-lint-action@bd48f53224baaaf0fc55de9a913e7680ca6dbea4 # v1.0.3
+ - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
with:
input: 'prompb'
- - uses: bufbuild/buf-breaking-action@f47418c81c00bfd65394628385593542f64db477 # v1.1.2
+ - uses: bufbuild/buf-breaking-action@c57b3d842a5c3f3b454756ef65305a50a587c5ba # v1.1.4
with:
input: 'prompb'
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb'
- - uses: bufbuild/buf-push-action@342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 # v1.1.1
+ - uses: bufbuild/buf-push-action@a654ff18effe4641ebea4a4ce242c49800728459 # v1.1.1
with:
input: 'prompb'
buf_token: ${{ secrets.BUF_TOKEN }}
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 8ba154e25..cead7abfd 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -8,34 +8,56 @@ jobs:
test_go:
name: Go tests
runs-on: ubuntu-latest
- # Whenever the Go version is updated here, .promu.yml
- # should also be updated.
container:
- image: quay.io/prometheus/golang-builder:1.21-base
+ # Whenever the Go version is updated here, .promu.yml
+ # should also be updated.
+ image: quay.io/prometheus/golang-builder:1.22-base
steps:
- - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment
- - run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1
- - run: go test ./tsdb/ -test.tsdb-isolation=false
- - run: go test --tags=stringlabels ./...
- - run: GOARCH=386 go test ./cmd/prometheus
+ - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
+ - run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
- run: make -C documentation/examples/remote_storage
- run: make -C documentation/examples
+
+ test_go_more:
+ name: More Go tests
+ runs-on: ubuntu-latest
+ container:
+ image: quay.io/prometheus/golang-builder:1.22-base
+ steps:
+ - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
+ - uses: ./.github/promci/actions/setup_environment
+ - run: go test --tags=dedupelabels ./...
+ - run: GOARCH=386 go test ./cmd/prometheus
- uses: ./.github/promci/actions/check_proto
with:
version: "3.15.8"
+ test_go_oldest:
+ name: Go tests with previous Go version
+ runs-on: ubuntu-latest
+ container:
+ # The go version in this image should be N-1 wrt test_go.
+ image: quay.io/prometheus/golang-builder:1.21-base
+ steps:
+ - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - run: make build
+ # Don't run NPM build; don't run race-detector.
+ - run: make test GO_ONLY=1 test-flags=""
+
test_ui:
name: UI tests
runs-on: ubuntu-latest
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
container:
- image: quay.io/prometheus/golang-builder:1.21-base
+ image: quay.io/prometheus/golang-builder:1.22-base
steps:
- - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment
with:
@@ -52,36 +74,24 @@ jobs:
name: Go tests on Windows
runs-on: windows-latest
steps:
- - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- - uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
+ - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
- go-version: 1.21.x
+ go-version: 1.22.x
- run: |
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
go test $TestTargets -vet=off -v
shell: powershell
- test_golang_oldest:
- name: Go tests with previous Go version
- runs-on: ubuntu-latest
- # The go verson in this image should be N-1 wrt test_go.
- container:
- image: quay.io/prometheus/golang-builder:1.20-base
- steps:
- - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- - run: make build
- - run: go test ./tsdb/...
- - run: go test ./tsdb/ -test.tsdb-isolation=false
-
test_mixins:
name: Mixins tests
runs-on: ubuntu-latest
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
container:
- image: quay.io/prometheus/golang-builder:1.20-base
+ image: quay.io/prometheus/golang-builder:1.22-base
steps:
- - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- run: go install ./cmd/promtool/.
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
@@ -104,7 +114,7 @@ jobs:
matrix:
thread: [ 0, 1, 2 ]
steps:
- - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/build
with:
@@ -127,32 +137,45 @@ jobs:
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
steps:
- - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/build
with:
parallelism: 12
thread: ${{ matrix.thread }}
+ check_generated_parser:
+ name: Check generated parser
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - name: Install Go
+ uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
+ with:
+ cache: false
+ go-version: 1.22.x
+ - name: Run goyacc and check for diff
+ run: make install-goyacc check-generated-parser
golangci:
name: golangci-lint
runs-on: ubuntu-latest
steps:
- name: Checkout repository
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Install Go
- uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
+ uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
cache: false
- go-version: 1.21.x
+ go-version: 1.22.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
- uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0
+ uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0
with:
args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
- version: v1.55.2
+ version: v1.56.2
fuzzing:
uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request'
@@ -162,10 +185,10 @@ jobs:
publish_main:
name: Publish main branch artifacts
runs-on: ubuntu-latest
- needs: [test_ui, test_go, test_windows, golangci, codeql, build_all]
+ needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps:
- - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/publish_main
with:
@@ -176,10 +199,10 @@ jobs:
publish_release:
name: Publish release artefacts
runs-on: ubuntu-latest
- needs: [test_ui, test_go, test_windows, golangci, codeql, build_all]
+ needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
steps:
- - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/publish_release
with:
@@ -194,14 +217,14 @@ jobs:
needs: [test_ui, codeql]
steps:
- name: Checkout
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- name: Install nodejs
- uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1
+ uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2
with:
node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org"
- - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
+ - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 5e14936a9..561c22eab 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -20,22 +20,19 @@ jobs:
strategy:
fail-fast: false
matrix:
- language: ["go", "javascript"]
+ language: ["javascript"]
steps:
- name: Checkout repository
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- - uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
- with:
- go-version: 1.21.x
+ uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Initialize CodeQL
- uses: github/codeql-action/init@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8
+ uses: github/codeql-action/init@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
with:
languages: ${{ matrix.language }}
- name: Autobuild
- uses: github/codeql-action/autobuild@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8
+ uses: github/codeql-action/autobuild@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8
+ uses: github/codeql-action/analyze@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
diff --git a/.github/workflows/container_description.yml b/.github/workflows/container_description.yml
new file mode 100644
index 000000000..a7d7e150c
--- /dev/null
+++ b/.github/workflows/container_description.yml
@@ -0,0 +1,52 @@
+---
+name: Push README to Docker Hub
+on:
+ push:
+ paths:
+ - "README.md"
+ - ".github/workflows/container_description.yml"
+ branches: [ main, master ]
+
+permissions:
+ contents: read
+
+jobs:
+ PushDockerHubReadme:
+ runs-on: ubuntu-latest
+ name: Push README to Docker Hub
+ if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
+ steps:
+ - name: git checkout
+ uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - name: Set docker hub repo name
+ run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
+ - name: Push README to Dockerhub
+ uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
+ env:
+ DOCKER_USER: ${{ secrets.DOCKER_HUB_LOGIN }}
+ DOCKER_PASS: ${{ secrets.DOCKER_HUB_PASSWORD }}
+ with:
+ destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
+ provider: dockerhub
+ short_description: ${{ env.DOCKER_REPO_NAME }}
+ readme_file: 'README.md'
+
+ PushQuayIoReadme:
+ runs-on: ubuntu-latest
+ name: Push README to quay.io
+ if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
+ steps:
+ - name: git checkout
+ uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
+ - name: Set quay.io org name
+ run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
+ - name: Set quay.io repo name
+ run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
+ - name: Push README to quay.io
+ uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
+ env:
+ DOCKER_APIKEY: ${{ secrets.QUAY_IO_API_TOKEN }}
+ with:
+ destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
+ provider: quay
+ readme_file: 'README.md'
diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml
index 13f04f772..dc510e596 100644
--- a/.github/workflows/fuzzing.yml
+++ b/.github/workflows/fuzzing.yml
@@ -21,7 +21,7 @@ jobs:
fuzz-seconds: 600
dry-run: false
- name: Upload Crash
- uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
+ uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure() && steps.build.outcome == 'success'
with:
name: artifacts
diff --git a/.github/workflows/repo_sync.yml b/.github/workflows/repo_sync.yml
index 1cf2eee24..f1c7ca5d0 100644
--- a/.github/workflows/repo_sync.yml
+++ b/.github/workflows/repo_sync.yml
@@ -13,7 +13,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder
steps:
- - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+ - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- run: ./scripts/sync_repo_files.sh
env:
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index f71e1331b..0be780f30 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -21,7 +21,7 @@ jobs:
steps:
- name: "Checkout code"
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1
+ uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # tag=v4.1.4
with:
persist-credentials: false
@@ -37,7 +37,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
- uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # tag=v3.1.3
+ uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3
with:
name: SARIF file
path: results.sarif
@@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@407ffafae6a767df3e0230c3df91b6443ae8df75 # tag=v2.22.8
+ uses: github/codeql-action/upload-sarif@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # tag=v3.22.12
with:
sarif_file: results.sarif
diff --git a/.golangci.yml b/.golangci.yml
index 166b2e0d4..c63184877 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -21,11 +21,14 @@ linters:
- goimports
- misspell
- nolintlint
+ - perfsprint
- predeclared
- revive
- testifylint
- unconvert
- unused
+ - usestdlibvars
+ - whitespace
issues:
max-same-issues: 0
@@ -42,24 +45,32 @@ issues:
- linters:
- godot
source: "^// ==="
-
+ - linters:
+ - perfsprint
+ text: "fmt.Sprintf can be replaced with string addition"
linters-settings:
depguard:
rules:
main:
deny:
- - pkg: "sync/atomic"
- desc: "Use go.uber.org/atomic instead of sync/atomic"
- - pkg: "github.com/stretchr/testify/assert"
- desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
- - pkg: "github.com/go-kit/kit/log"
- desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
- - pkg: "io/ioutil"
- desc: "Use corresponding 'os' or 'io' functions instead."
- - pkg: "regexp"
- desc: "Use github.com/grafana/regexp instead of regexp"
- - pkg: "github.com/pkg/errors"
- desc: "Use 'errors' or 'fmt' instead of github.com/pkg/errors"
+ - pkg: "sync/atomic"
+ desc: "Use go.uber.org/atomic instead of sync/atomic"
+ - pkg: "github.com/stretchr/testify/assert"
+ desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
+ - pkg: "github.com/go-kit/kit/log"
+ desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
+ - pkg: "io/ioutil"
+ desc: "Use corresponding 'os' or 'io' functions instead."
+ - pkg: "regexp"
+ desc: "Use github.com/grafana/regexp instead of regexp"
+ - pkg: "github.com/pkg/errors"
+ desc: "Use 'errors' or 'fmt' instead of github.com/pkg/errors"
+ - pkg: "gzip"
+ desc: "Use github.com/klauspost/compress instead of gzip"
+ - pkg: "zlib"
+ desc: "Use github.com/klauspost/compress instead of zlib"
+ - pkg: "golang.org/x/exp/slices"
+ desc: "Use 'slices' instead."
errcheck:
exclude-functions:
# Don't flag lines such as "io.Copy(io.Discard, resp.Body)".
@@ -77,6 +88,9 @@ linters-settings:
local-prefixes: github.com/prometheus/prometheus
gofumpt:
extra-rules: true
+ perfsprint:
+ # Optimizes `fmt.Errorf`.
+ errorf: false
revive:
# By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly set in configuration all required rules.
@@ -129,4 +143,3 @@ linters-settings:
- require-error
- suite-dont-use-pkg
- suite-extra-assert-call
-
diff --git a/.promu.yml b/.promu.yml
index e5e01181c..0aa51d6d3 100644
--- a/.promu.yml
+++ b/.promu.yml
@@ -1,7 +1,7 @@
go:
# Whenever the Go version is updated here,
- # .circle/config.yml should also be updated.
- version: 1.21
+ # .github/workflows should also be updated.
+ version: 1.22
repository:
path: github.com/prometheus/prometheus
build:
diff --git a/.yamllint b/.yamllint
index 955a5a627..1859cb624 100644
--- a/.yamllint
+++ b/.yamllint
@@ -1,5 +1,7 @@
---
extends: default
+ignore: |
+ ui/react-app/node_modules
rules:
braces:
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 71b8c97fe..079dd7595 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,8 +2,164 @@
## unreleased
-* [ENHANCEMENT] TSDB: Make the wlog watcher read segments synchronously when not tailing. #13224
-* [BUGFIX] Agent: Participate in notify calls. #13223
+* [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980
+* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` to measure the time it takes to restore a rule group. #13974
+* [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991
+* [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620
+* [BUGFIX] OTLP: Don't generate target_info unless at least one identifying label is defined. #13991
+* [BUGFIX] OTLP: Don't generate target_info unless there are metrics. #13991
+
+## 2.52.0-rc.1 / 2024-05-03
+
+* [BUGFIX] API: Fix missing comma during JSON encoding of API results. #14047
+
+## 2.52.0-rc.0 / 2024-04-22
+
+* [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633
+* [FEATURE] Kubernetes SD: Add a new metric `prometheus_sd_kubernetes_failures_total` to track failed requests to Kubernetes API. #13554
+* [FEATURE] Kubernetes SD: Add node and zone metadata labels when using the endpointslice role. #13935
+* [FEATURE] Azure SD/Remote Write: Allow usage of Azure authorization SDK. #13099
+* [FEATURE] Alerting: Support native histogram templating. #13731
+* [FEATURE] Linode SD: Support IPv6 range discovery and region filtering. #13774
+* [ENHANCEMENT] PromQL: Performance improvements for queries with regex matchers. #13461
+* [ENHANCEMENT] PromQL: Performance improvements when using aggregation operators. #13744
+* [ENHANCEMENT] PromQL: Validate label_join destination label. #13803
+* [ENHANCEMENT] Scrape: Increment `prometheus_target_scrapes_sample_duplicate_timestamp_total` metric on duplicated series during one scrape. #12933
+* [ENHANCEMENT] TSDB: Many improvements in performance. #13742 #13673 #13782
+* [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754
+* [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772
+* [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823
+* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838
+* [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846
+* [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667
+* [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852
+* [BUGFIX] PromQL: Fix possible duplicated label name and values in a metric result for specific queries. #13845
+* [BUGFIX] Scrape: Fix setting native histogram schema factor during scrape. #13846
+* [BUGFIX] TSDB: Fix counting of histogram samples when creating WAL checkpoint stats. #13776
+* [BUGFIX] TSDB: Fix cases of compacting empty heads. #13755
+* [BUGFIX] TSDB: Count float histograms in WAL checkpoint. #13844
+* [BUGFIX] Remote Read: Fix memory leak due to broken requests. #13777
+* [BUGFIX] API: Stop building response for `/api/v1/series/` when the API request was cancelled. #13766
+* [BUGFIX] promtool: Fix panic on `promtool tsdb analyze --extended` when no native histograms are present. #13976
+
+## 2.51.2 / 2024-04-09
+
+Bugfix release.
+
+[BUGFIX] Notifier: could hang when using relabeling on alerts #13861
+
+## 2.51.1 / 2024-03-27
+
+Bugfix release.
+
+* [BUGFIX] PromQL: Re-instate validation of label_join destination label #13803
+* [BUGFIX] Scraping (experimental native histograms): Fix handling of the min bucket factor on sync of targets #13846
+* [BUGFIX] PromQL: Some queries could return the same series twice (library use only) #13845
+
+## 2.51.0 / 2024-03-18
+
+This version is built with Go 1.22.1.
+
+There is a new optional build tag "dedupelabels", which should reduce memory consumption (#12304).
+It is off by default; there will be an optional alternative image to try it out.
+
+* [CHANGE] Scraping: Do experimental timestamp alignment even if tolerance is bigger than 1% of scrape interval #13624, #13737
+* [FEATURE] Alerting: Relabel rules for AlertManagerConfig; allows routing alerts to different alertmanagers #12551, #13735
+* [FEATURE] API: add limit param to series, label-names and label-values APIs #13396
+* [FEATURE] UI (experimental native histograms): Add native histogram chart to Table view #13658
+* [FEATURE] Promtool: Add a "tsdb dump-openmetrics" to dump in OpenMetrics format. #13194
+* [FEATURE] PromQL (experimental native histograms): Add histogram_avg function #13467
+* [ENHANCEMENT] Rules: Evaluate independent rules concurrently #12946, #13527
+* [ENHANCEMENT] Scraping (experimental native histograms): Support exemplars #13488
+* [ENHANCEMENT] Remote Write: Disable resharding during active retry backoffs #13562
+* [ENHANCEMENT] Observability: Add native histograms to latency/duration metrics #13681
+* [ENHANCEMENT] Observability: Add 'type' label to prometheus_tsdb_head_out_of_order_samples_appended_total #13607
+* [ENHANCEMENT] API: Faster generation of targets into JSON #13469, #13484
+* [ENHANCEMENT] Scraping, API: Use faster compression library #10782
+* [ENHANCEMENT] OpenTelemetry: Performance improvements in OTLP parsing #13627
+* [ENHANCEMENT] PromQL: Optimisations to reduce CPU and memory #13448, #13536
+* [BUGFIX] PromQL: Constrain extrapolation in rate() to half of sample interval #13725
+* [BUGFIX] Remote Write: Stop slowing down when a new WAL segment is created #13583, #13628
+* [BUGFIX] PromQL: Fix wrongly scoped range vectors with @ modifier #13559
+* [BUGFIX] Kubernetes SD: Pod status changes were not discovered by Endpoints service discovery #13337
+* [BUGFIX] Azure SD: Fix 'error: parameter virtualMachineScaleSetName cannot be empty' (#13702)
+* [BUGFIX] Remote Write: Fix signing for AWS sigv4 transport #13497
+* [BUGFIX] Observability: Exemplars emitted by Prometheus use "trace_id" not "traceID" #13589
+
+## 2.50.1 / 2024-02-26
+
+* [BUGFIX] API: Fix metadata API using wrong field names. #13633
+
+## 2.50.0 / 2024-02-22
+
+* [CHANGE] Remote Write: Error `storage.ErrTooOldSample` is now generating HTTP error 400 instead of HTTP error 500. #13335
+* [FEATURE] Remote Write: Drop old inmemory samples. Activated using the config entry `sample_age_limit`. #13002
+* [FEATURE] **Experimental**: Add support for ingesting zeros as created timestamps. (enabled under the feature-flag `created-timestamp-zero-ingestion`). #12733 #13279
+* [FEATURE] Promtool: Add `analyze` histograms command. #12331
+* [FEATURE] TSDB/compaction: Add a way to enable overlapping compaction. #13282 #13393 #13398
+* [FEATURE] Add automatic memory limit handling. Activated using the feature flag. `auto-gomemlimit` #13395
+* [ENHANCEMENT] Promtool: allow specifying multiple matchers in `promtool tsdb dump`. #13296
+* [ENHANCEMENT] PromQL: Restore more efficient version of `NewPossibleNonCounterInfo` annotation. #13022
+* [ENHANCEMENT] Kuma SD: Extend configuration to allow users to specify client ID. #13278
+* [ENHANCEMENT] PromQL: Use natural sort in `sort_by_label` and `sort_by_label_desc`. This is **experimental**. #13411
+* [ENHANCEMENT] Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config. #13222
+* [ENHANCEMENT] Native Histograms: Issue warning if histogramRate is applied to the wrong kind of histogram. #13392
+* [ENHANCEMENT] TSDB: Make transaction isolation data structures smaller. #13015
+* [ENHANCEMENT] TSDB/postings: Optimize merge using Loser Tree. #12878
+* [ENHANCEMENT] TSDB: Simplify internal series delete function. #13261
+* [ENHANCEMENT] Agent: Performance improvement by making the global hash lookup table smaller. #13262
+* [ENHANCEMENT] PromQL: faster execution of metric functions, e.g. abs(), rate() #13446
+* [ENHANCEMENT] TSDB: Optimize label values with matchers by taking shortcuts. #13426
+* [ENHANCEMENT] Kubernetes SD: Check preconditions earlier and avoid unnecessary checks or iterations in kube_sd. #13408
+* [ENHANCEMENT] Promtool: Improve visibility for `promtool test rules` with JSON colored formatting. #13342
+* [ENHANCEMENT] Consoles: Exclude iowait and steal from CPU Utilisation. #9593
+* [ENHANCEMENT] Various improvements and optimizations on Native Histograms. #13267, #13215, #13276 #13289, #13340
+* [BUGFIX] Scraping: Fix quality value in HTTP Accept header. #13313
+* [BUGFIX] UI: Fix usage of the function `time()` that was crashing. #13371
+* [BUGFIX] Azure SD: Fix SD crashing when it finds a VM scale set. #13578
+
+## 2.49.1 / 2024-01-15
+
+* [BUGFIX] TSDB: Fixed a wrong `q=` value in scrape accept header #13313
+
+## 2.49.0 / 2024-01-15
+
+* [FEATURE] Promtool: Add `--run` flag promtool test rules command. #12206
+* [FEATURE] SD: Add support for `NS` records to DNS SD. #13219
+* [FEATURE] UI: Add heatmap visualization setting in the Graph tab, useful histograms. #13096 #13371
+* [FEATURE] Scraping: Add `scrape_config.enable_compression` (default true) to disable gzip compression when scraping the target. #13166
+* [FEATURE] PromQL: Add a `promql-experimental-functions` feature flag containing some new experimental PromQL functions. #13103 NOTE: More experimental functions might be added behind the same feature flag in the future. Added functions:
+ * Experimental `mad_over_time` (median absolute deviation around the median) function. #13059
+ * Experimental `sort_by_label` and `sort_by_label_desc` functions allowing sorting returned series by labels. #11299
+* [FEATURE] SD: Add `__meta_linode_gpus` label to Linode SD. #13097
+* [FEATURE] API: Add `exclude_alerts` query parameter to `/api/v1/rules` to only return recording rules. #12999
+* [FEATURE] TSDB: --storage.tsdb.retention.time flag value is now exposed as a `prometheus_tsdb_retention_limit_seconds` metric. #12986
+* [FEATURE] Scraping: Add ability to specify priority of scrape protocols to accept during scrape (e.g. to scrape Prometheus proto format for certain jobs). This can be changed by setting `global.scrape_protocols` and `scrape_config.scrape_protocols`. #12738
+* [ENHANCEMENT] Scraping: Automated handling of scraping histograms that violate `scrape_config.native_histogram_bucket_limit` setting. #13129
+* [ENHANCEMENT] Scraping: Optimized memory allocations when scraping. #12992
+* [ENHANCEMENT] SD: Added cache for Azure SD to avoid rate-limits. #12622
+* [ENHANCEMENT] TSDB: Various improvements to OOO exemplar scraping. E.g. allowing ingestion of exemplars with the same timestamp, but with different labels. #13021
+* [ENHANCEMENT] API: Optimize `/api/v1/labels` and `/api/v1/label//values` when 1 set of matchers are used. #12888
+* [ENHANCEMENT] TSDB: Various optimizations for TSDB block index, head mmap chunks and WAL, reducing latency and memory allocations (improving API calls, compaction queries etc). #12997 #13058 #13056 #13040
+* [ENHANCEMENT] PromQL: Optimize memory allocations and latency when querying float histograms. #12954
+* [ENHANCEMENT] Rules: Instrument TraceID in log lines for rule evaluations. #13034
+* [ENHANCEMENT] PromQL: Optimize memory allocations in query_range calls. #13043
+* [ENHANCEMENT] Promtool: unittest interval now defaults to evaluation_intervals when not set. #12729
+* [BUGFIX] SD: Fixed Azure SD public IP reporting #13241
+* [BUGFIX] API: Fix inaccuracies in posting cardinality statistics. #12653
+* [BUGFIX] PromQL: Fix inaccuracies of `histogram_quantile` with classic histograms. #13153
+* [BUGFIX] TSDB: Fix rare fails or inaccurate queries with OOO samples. #13115
+* [BUGFIX] TSDB: Fix rare panics on append commit when exemplars are used. #13092
+* [BUGFIX] TSDB: Fix exemplar WAL storage, so remote write can send/receive samples before exemplars. #13113
+* [BUGFIX] Mixins: Fix `url` filter on remote write dashboards. #10721
+* [BUGFIX] PromQL/TSDB: Various fixes to float histogram operations. #12891 #12977 #12609 #13190 #13189 #13191 #13201 #13212 #13208
+* [BUGFIX] Promtool: Fix int32 overflow issues for 32-bit architectures. #12978
+* [BUGFIX] SD: Fix Azure VM Scale Set NIC issue. #13283
+
+## 2.48.1 / 2023-12-07
+
+* [BUGFIX] TSDB: Make the wlog watcher read segments synchronously when not tailing. #13224
+* [BUGFIX] Agent: Participate in notify calls (fixes slow down in remote write handling introduced in 2.45). #13223
## 2.48.0 / 2023-11-16
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index f666caae9..9b1b286cc 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -95,7 +95,7 @@ can modify the `./promql/parser/generated_parser.y.go` manually.
```golang
// As of writing this was somewhere around line 600.
var (
- yyDebug = 0 // This can be be a number 0 -> 5.
+ yyDebug = 0 // This can be a number 0 -> 5.
yyErrorVerbose = false // This can be set to true.
)
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index 902e9a6e9..d9a7d0f78 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -1,15 +1,23 @@
# Maintainers
-Julien Pivotto ( / @roidelapluie) and Levi Harrison ( / @LeviHarrison) are the main/default maintainers, some parts of the codebase have other maintainers:
+General maintainers:
+* Bryan Boreham (bjboreham@gmail.com / @bboreham)
+* Levi Harrison (levi@leviharrison.dev / @LeviHarrison)
+* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
+* Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie)
+Maintainers for specific parts of the codebase:
* `cmd`
* `promtool`: David Leadbeater ( / @dgl)
* `discovery`
* `k8s`: Frederic Branczyk ( / @brancz)
* `documentation`
* `prometheus-mixin`: Matthias Loibl ( / @metalmatze)
+* `model/histogram` and other code related to native histograms: Björn Rabenstein ( / @beorn7),
+George Krajcsovits ( / @krajorama)
* `storage`
- * `remote`: Chris Marchbanks ( / @csmarchbanks), Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie ( / @tomwilkie)
+ * `remote`: Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie ( / @tomwilkie)
+ * `otlptranslator`: Arve Knudsen ( / @aknuds1), Jesús Vázquez ( / @jesusvazquez)
* `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez)
* `agent`: Robert Fratto ( / @rfratto)
* `web`
@@ -22,3 +30,13 @@ size of this repository, the natural changes in focus of maintainers over time,
and nuances of where particular features live, this list will always be
incomplete and out of date. However the listed maintainer(s) should be able to
direct a PR/question to the right person.
+
+v3 release coordinators:
+* Alex Greenbank ( / @alexgreenbank)
+* Carrie Edwards ( / @carrieedwards)
+* Fiona Liao ( / @fionaliao)
+* Jan Fajerski ( / @jan--f)
+* Jesús Vázquez ( / @jesusvazquez)
+* Nico Pazos ( / @npazosmendez)
+* Owen Williams ( / @ywwg)
+* Tom Braack ( / @sh0rez)
diff --git a/Makefile b/Makefile
index ab229f931..5dcebfd1a 100644
--- a/Makefile
+++ b/Makefile
@@ -24,6 +24,7 @@ TSDB_BENCHMARK_DATASET ?= ./tsdb/testdata/20kseries.json
TSDB_BENCHMARK_OUTPUT_DIR ?= ./benchout
GOLANGCI_LINT_OPTS ?= --timeout 4m
+GOYACC_VERSION ?= v0.6.0
include Makefile.common
@@ -78,24 +79,42 @@ assets-tarball: assets
@echo '>> packaging assets'
scripts/package_assets.sh
-# We only want to generate the parser when there's changes to the grammar.
.PHONY: parser
parser:
@echo ">> running goyacc to generate the .go file."
-ifeq (, $(shell command -v goyacc > /dev/null))
+ifeq (, $(shell command -v goyacc 2> /dev/null))
@echo "goyacc not installed so skipping"
- @echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0"
+ @echo "To install: \"go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION)\" or run \"make install-goyacc\""
else
- goyacc -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
+ $(MAKE) promql/parser/generated_parser.y.go
endif
+promql/parser/generated_parser.y.go: promql/parser/generated_parser.y
+ @echo ">> running goyacc to generate the .go file."
+ @goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
+
+.PHONY: clean-parser
+clean-parser:
+ @echo ">> cleaning generated parser"
+ @rm -f promql/parser/generated_parser.y.go
+
+.PHONY: check-generated-parser
+check-generated-parser: clean-parser promql/parser/generated_parser.y.go
+ @echo ">> checking generated parser"
+ @git diff --exit-code -- promql/parser/generated_parser.y.go || (echo "Generated parser is out of date. Please run 'make parser' and commit the changes." && false)
+
+.PHONY: install-goyacc
+install-goyacc:
+ @echo ">> installing goyacc $(GOYACC_VERSION)"
+ @go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION)
+
.PHONY: test
# If we only want to only test go code we have to change the test target
# which is called by all.
ifeq ($(GO_ONLY),1)
test: common-test check-go-mod-version
else
-test: common-test ui-build-module ui-test ui-lint check-go-mod-version
+test: check-generated-parser common-test ui-build-module ui-test ui-lint check-go-mod-version
endif
.PHONY: npm_licenses
diff --git a/Makefile.common b/Makefile.common
index 8f3630921..0e9ace29b 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -49,20 +49,20 @@ endif
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
-ifneq ($(shell command -v gotestsum > /dev/null),)
+ifneq ($(shell command -v gotestsum 2> /dev/null),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
endif
-PROMU_VERSION ?= 0.15.0
+PROMU_VERSION ?= 0.17.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.55.2
-# golangci-lint only supports linux, darwin and windows platforms on i386/amd64 and arm64.
+GOLANGCI_LINT_VERSION ?= v1.56.2
+# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
@@ -169,16 +169,20 @@ common-vet:
common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint"
-# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
-# Otherwise staticcheck might fail randomly for some reason not yet explained.
- $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
+.PHONY: common-lint-fix
+common-lint-fix: $(GOLANGCI_LINT)
+ifdef GOLANGCI_LINT
+ @echo ">> running golangci-lint fix"
+ $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
+endif
+
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
-ifeq (, $(shell command -v yamllint > /dev/null))
+ifeq (, $(shell command -v yamllint 2> /dev/null))
@echo "yamllint not installed so skipping"
else
yamllint .
@@ -204,6 +208,10 @@ common-tarball: promu
@echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
+.PHONY: common-docker-repo-name
+common-docker-repo-name:
+ @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
+
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
diff --git a/README.md b/README.md
index 5fa6cc49e..023619a78 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@ examples and guides.
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486)
[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus)
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
-[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus)
+[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus)
@@ -149,7 +149,7 @@ We are publishing our Remote Write protobuf independently at
You can use that as a library:
```shell
-go get go.buf.build/protocolbuffers/go/prometheus/prometheus
+go get buf.build/gen/go/prometheus/prometheus/protocolbuffers/go@latest
```
This is experimental.
diff --git a/RELEASE.md b/RELEASE.md
index 6ab2f6389..f313c4172 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -54,7 +54,10 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) |
| v2.48 | 2023-10-04 | Levi Harrison (GitHub: @LeviHarrison) |
| v2.49 | 2023-12-05 | Bartek Plotka (GitHub: @bwplotka) |
-| v2.50 | 2024-01-16 | **searching for volunteer** |
+| v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) |
+| v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) |
+| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) |
+| v2.53 | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
diff --git a/VERSION b/VERSION
index 9a9feb084..867c356bb 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.48.0
+2.52.0-rc.1
diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go
index a8bd2f2b3..e250a95c8 100644
--- a/cmd/prometheus/main.go
+++ b/cmd/prometheus/main.go
@@ -33,6 +33,7 @@ import (
"syscall"
"time"
+ "github.com/KimMachineGun/automemlimit/memlimit"
"github.com/alecthomas/kingpin/v2"
"github.com/alecthomas/units"
"github.com/go-kit/log"
@@ -41,6 +42,7 @@ import (
"github.com/mwitkow/go-conntrack"
"github.com/oklog/run"
"github.com/prometheus/client_golang/prometheus"
+ versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
promlogflag "github.com/prometheus/common/promlog/flag"
@@ -98,7 +100,7 @@ var (
)
func init() {
- prometheus.MustRegister(version.NewCollector(strings.ReplaceAll(appName, "-", "_")))
+ prometheus.MustRegister(versioncollector.NewCollector(strings.ReplaceAll(appName, "-", "_")))
var err error
defaultRetentionDuration, err = model.ParseDuration(defaultRetentionString)
@@ -136,6 +138,7 @@ type flagConfig struct {
forGracePeriod model.Duration
outageTolerance model.Duration
resendDelay model.Duration
+ maxConcurrentEvals int64
web web.Options
scrape scrape.Options
tsdb tsdbOptions
@@ -147,13 +150,16 @@ type flagConfig struct {
queryMaxSamples int
RemoteFlushDeadline model.Duration
- featureList []string
+ featureList []string
+ memlimitRatio float64
// These options are extracted from featureList
// for ease of use.
enableExpandExternalLabels bool
enableNewSDManager bool
enablePerStepStats bool
enableAutoGOMAXPROCS bool
+ enableAutoGOMEMLIMIT bool
+ enableConcurrentRuleEval bool
prometheusURL string
corsRegexString string
@@ -197,6 +203,12 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "auto-gomaxprocs":
c.enableAutoGOMAXPROCS = true
level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota")
+ case "auto-gomemlimit":
+ c.enableAutoGOMEMLIMIT = true
+ level.Info(logger).Log("msg", "Automatically set GOMEMLIMIT to match Linux container or system memory limit")
+ case "concurrent-rule-eval":
+ c.enableConcurrentRuleEval = true
+ level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.")
case "no-default-scrape-port":
c.scrape.NoDefaultPort = true
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
@@ -205,6 +217,7 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.")
case "native-histograms":
c.tsdb.EnableNativeHistograms = true
+ c.scrape.EnableNativeHistogramsIngestion = true
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
@@ -262,6 +275,9 @@ func main() {
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry.").
Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress)
+ a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory").
+ Default("0.9").FloatVar(&cfg.memlimitRatio)
+
webConfig := a.Flag(
"web.config.file",
"[EXPERIMENTAL] Path to configuration file that can enable TLS or authentication.",
@@ -402,6 +418,9 @@ func main() {
serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
Default("1m").SetValue(&cfg.resendDelay)
+ serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently. When set, \"query.max-concurrency\" may need to be adjusted accordingly.").
+ Default("4").Int64Var(&cfg.maxConcurrentEvals)
+
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
Hidden().Default("true").BoolVar(&scrape.AlignScrapeTimestamps)
@@ -429,7 +448,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
- a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
+ a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList)
promlogflag.AddFlags(a, &cfg.promlogConfig)
@@ -467,6 +486,11 @@ func main() {
os.Exit(3)
}
+ if cfg.memlimitRatio <= 0.0 || cfg.memlimitRatio > 1.0 {
+ fmt.Fprintf(os.Stderr, "--auto-gomemlimit.ratio must be greater than 0 and less than or equal to 1.")
+ os.Exit(1)
+ }
+
localStoragePath := cfg.serverStoragePath
if agentMode {
localStoragePath = cfg.agentStoragePath
@@ -630,9 +654,16 @@ func main() {
level.Error(logger).Log("msg", "failed to register Kubernetes client metrics", "err", err)
os.Exit(1)
}
+
+ sdMetrics, err := discovery.CreateAndRegisterSDMetrics(prometheus.DefaultRegisterer)
+ if err != nil {
+ level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err)
+ os.Exit(1)
+ }
+
if cfg.enableNewSDManager {
{
- discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, discovery.Name("scrape"))
+ discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
if discMgr == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
os.Exit(1)
@@ -641,7 +672,7 @@ func main() {
}
{
- discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, discovery.Name("notify"))
+ discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
if discMgr == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
os.Exit(1)
@@ -650,7 +681,7 @@ func main() {
}
} else {
{
- discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, legacymanager.Name("scrape"))
+ discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("scrape"))
if discMgr == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
os.Exit(1)
@@ -659,7 +690,7 @@ func main() {
}
{
- discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, legacymanager.Name("notify"))
+ discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("notify"))
if discMgr == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
os.Exit(1)
@@ -695,6 +726,20 @@ func main() {
}
}
+ if cfg.enableAutoGOMEMLIMIT {
+ if _, err := memlimit.SetGoMemLimitWithOpts(
+ memlimit.WithRatio(cfg.memlimitRatio),
+ memlimit.WithProvider(
+ memlimit.ApplyFallback(
+ memlimit.FromCgroup,
+ memlimit.FromSystem,
+ ),
+ ),
+ ); err != nil {
+ level.Warn(logger).Log("component", "automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
+ }
+ }
+
if !agentMode {
opts := promql.EngineOpts{
Logger: log.With(logger, "component", "query engine"),
@@ -714,17 +759,19 @@ func main() {
queryEngine = promql.NewEngine(opts)
ruleManager = rules.NewManager(&rules.ManagerOptions{
- Appendable: fanoutStorage,
- Queryable: localStorage,
- QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage),
- NotifyFunc: rules.SendAlerts(notifierManager, cfg.web.ExternalURL.String()),
- Context: ctxRule,
- ExternalURL: cfg.web.ExternalURL,
- Registerer: prometheus.DefaultRegisterer,
- Logger: log.With(logger, "component", "rule manager"),
- OutageTolerance: time.Duration(cfg.outageTolerance),
- ForGracePeriod: time.Duration(cfg.forGracePeriod),
- ResendDelay: time.Duration(cfg.resendDelay),
+ Appendable: fanoutStorage,
+ Queryable: localStorage,
+ QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage),
+ NotifyFunc: rules.SendAlerts(notifierManager, cfg.web.ExternalURL.String()),
+ Context: ctxRule,
+ ExternalURL: cfg.web.ExternalURL,
+ Registerer: prometheus.DefaultRegisterer,
+ Logger: log.With(logger, "component", "rule manager"),
+ OutageTolerance: time.Duration(cfg.outageTolerance),
+ ForGracePeriod: time.Duration(cfg.forGracePeriod),
+ ResendDelay: time.Duration(cfg.resendDelay),
+ MaxConcurrentEvals: cfg.maxConcurrentEvals,
+ ConcurrentEvalsEnabled: cfg.enableConcurrentRuleEval,
})
}
@@ -914,8 +961,8 @@ func main() {
func() error {
// Don't forget to release the reloadReady channel so that waiting blocks can exit normally.
select {
- case <-term:
- level.Warn(logger).Log("msg", "Received SIGTERM, exiting gracefully...")
+ case sig := <-term:
+ level.Warn(logger).Log("msg", "Received an OS signal, exiting gracefully...", "signal", sig.String())
reloadReady.Close()
case <-webHandler.Quit():
level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...")
@@ -1646,6 +1693,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown,
EnableNativeHistograms: opts.EnableNativeHistograms,
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
+ EnableOverlappingCompaction: true,
}
}
diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go
index f4fe3855c..89c171bb5 100644
--- a/cmd/prometheus/main_test.go
+++ b/cmd/prometheus/main_test.go
@@ -24,6 +24,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
+ "strconv"
"strings"
"syscall"
"testing"
@@ -126,12 +127,9 @@ func TestFailedStartupExitCode(t *testing.T) {
require.Error(t, err)
var exitError *exec.ExitError
- if errors.As(err, &exitError) {
- status := exitError.Sys().(syscall.WaitStatus)
- require.Equal(t, expectedExitStatus, status.ExitStatus())
- } else {
- t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
- }
+ require.ErrorAs(t, err, &exitError)
+ status := exitError.Sys().(syscall.WaitStatus)
+ require.Equal(t, expectedExitStatus, status.ExitStatus())
}
type senderFunc func(alerts ...*notifier.Alert)
@@ -192,11 +190,9 @@ func TestSendAlerts(t *testing.T) {
for i, tc := range testCases {
tc := tc
- t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
- if len(tc.in) == 0 {
- t.Fatalf("sender called with 0 alert")
- }
+ require.NotEmpty(t, tc.in, "sender called with 0 alert")
require.Equal(t, tc.exp, alerts)
})
rules.SendAlerts(senderFunc, "http://localhost:9090")(context.TODO(), "up", tc.in...)
@@ -228,7 +224,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
go func() { done <- prom.Wait() }()
select {
case err := <-done:
- t.Errorf("prometheus should be still running: %v", err)
+ require.Fail(t, "prometheus should be still running: %v", err)
case <-time.After(startupTime):
prom.Process.Kill()
<-done
@@ -239,12 +235,9 @@ func TestWALSegmentSizeBounds(t *testing.T) {
err = prom.Wait()
require.Error(t, err)
var exitError *exec.ExitError
- if errors.As(err, &exitError) {
- status := exitError.Sys().(syscall.WaitStatus)
- require.Equal(t, expectedExitStatus, status.ExitStatus())
- } else {
- t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
- }
+ require.ErrorAs(t, err, &exitError)
+ status := exitError.Sys().(syscall.WaitStatus)
+ require.Equal(t, expectedExitStatus, status.ExitStatus())
}
}
@@ -274,7 +267,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
go func() { done <- prom.Wait() }()
select {
case err := <-done:
- t.Errorf("prometheus should be still running: %v", err)
+ require.Fail(t, "prometheus should be still running: %v", err)
case <-time.After(startupTime):
prom.Process.Kill()
<-done
@@ -285,12 +278,9 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
err = prom.Wait()
require.Error(t, err)
var exitError *exec.ExitError
- if errors.As(err, &exitError) {
- status := exitError.Sys().(syscall.WaitStatus)
- require.Equal(t, expectedExitStatus, status.ExitStatus())
- } else {
- t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
- }
+ require.ErrorAs(t, err, &exitError)
+ status := exitError.Sys().(syscall.WaitStatus)
+ require.Equal(t, expectedExitStatus, status.ExitStatus())
}
}
@@ -347,10 +337,8 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
}
require.Len(t, g.GetMetric(), 1)
- if _, ok := res[m]; ok {
- t.Error("expected only one metric family for", m)
- t.FailNow()
- }
+ _, ok := res[m]
+ require.False(t, ok, "expected only one metric family for", m)
res[m] = *g.GetMetric()[0].GetGauge().Value
}
}
diff --git a/cmd/prometheus/main_unix_test.go b/cmd/prometheus/main_unix_test.go
index 7224e25d7..2011fb123 100644
--- a/cmd/prometheus/main_unix_test.go
+++ b/cmd/prometheus/main_unix_test.go
@@ -12,7 +12,6 @@
// limitations under the License.
//
//go:build !windows
-// +build !windows
package main
@@ -24,6 +23,8 @@ import (
"testing"
"time"
+ "github.com/stretchr/testify/require"
+
"github.com/prometheus/prometheus/util/testutil"
)
@@ -38,9 +39,7 @@ func TestStartupInterrupt(t *testing.T) {
prom := exec.Command(promPath, "-test.main", "--config.file="+promConfig, "--storage.tsdb.path="+t.TempDir(), "--web.listen-address=0.0.0.0"+port)
err := prom.Start()
- if err != nil {
- t.Fatalf("execution error: %v", err)
- }
+ require.NoError(t, err)
done := make(chan error, 1)
go func() {
@@ -69,14 +68,11 @@ Loop:
time.Sleep(500 * time.Millisecond)
}
- if !startedOk {
- t.Fatal("prometheus didn't start in the specified timeout")
- }
- switch err := prom.Process.Kill(); {
- case err == nil:
- t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal")
- case stoppedErr != nil && stoppedErr.Error() != "signal: interrupt":
- // TODO: find a better way to detect when the process didn't exit as expected!
- t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr)
+ require.True(t, startedOk, "prometheus didn't start in the specified timeout")
+ err = prom.Process.Kill()
+ require.Error(t, err, "prometheus didn't shutdown gracefully after sending the Interrupt signal")
+ // TODO - find a better way to detect when the process didn't exit as expected!
+ if stoppedErr != nil {
+ require.EqualError(t, stoppedErr, "signal: interrupt", "prometheus exit")
}
}
diff --git a/cmd/promtool/analyze.go b/cmd/promtool/analyze.go
new file mode 100644
index 000000000..c1f523de5
--- /dev/null
+++ b/cmd/promtool/analyze.go
@@ -0,0 +1,370 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net/http"
+ "net/url"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ v1 "github.com/prometheus/client_golang/api/prometheus/v1"
+ "github.com/prometheus/common/model"
+
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+var (
+ errNotNativeHistogram = fmt.Errorf("not a native histogram")
+ errNotEnoughData = fmt.Errorf("not enough data")
+
+ outputHeader = `Bucket stats for each histogram series over time
+------------------------------------------------
+First the min, avg, and max number of populated buckets, followed by the total
+number of buckets (only if different from the max number of populated buckets
+which is typical for classic but not native histograms).`
+ outputFooter = `Aggregated bucket stats
+-----------------------
+Each line shows min/avg/max over the series above.`
+)
+
+type QueryAnalyzeConfig struct {
+ metricType string
+ duration time.Duration
+ time string
+ matchers []string
+}
+
+// run retrieves metrics that look like conventional histograms (i.e. have _bucket
+// suffixes) or native histograms, depending on metricType flag.
+func (c *QueryAnalyzeConfig) run(url *url.URL, roundtripper http.RoundTripper) error {
+ if c.metricType != "histogram" {
+ return fmt.Errorf("analyze type is %s, must be 'histogram'", c.metricType)
+ }
+
+ ctx := context.Background()
+
+ api, err := newAPI(url, roundtripper, nil)
+ if err != nil {
+ return err
+ }
+
+ var endTime time.Time
+ if c.time != "" {
+ endTime, err = parseTime(c.time)
+ if err != nil {
+ return fmt.Errorf("error parsing time '%s': %w", c.time, err)
+ }
+ } else {
+ endTime = time.Now()
+ }
+
+ return c.getStatsFromMetrics(ctx, api, endTime, os.Stdout, c.matchers)
+}
+
+func (c *QueryAnalyzeConfig) getStatsFromMetrics(ctx context.Context, api v1.API, endTime time.Time, out io.Writer, matchers []string) error {
+ fmt.Fprintf(out, "%s\n\n", outputHeader)
+ metastatsNative := newMetaStatistics()
+ metastatsClassic := newMetaStatistics()
+ for _, matcher := range matchers {
+ seriesSel := seriesSelector(matcher, c.duration)
+ matrix, err := querySamples(ctx, api, seriesSel, endTime)
+ if err != nil {
+ return err
+ }
+
+ matrices := make(map[string]model.Matrix)
+ for _, series := range matrix {
+ // We do not handle mixed types. If there are float values, we assume it is a
+ // classic histogram, otherwise we assume it is a native histogram, and we
+ // ignore series with errors if they do not match the expected type.
+ if len(series.Values) == 0 {
+ stats, err := calcNativeBucketStatistics(series)
+ if err != nil {
+ if errors.Is(err, errNotNativeHistogram) || errors.Is(err, errNotEnoughData) {
+ continue
+ }
+ return err
+ }
+ fmt.Fprintf(out, "- %s (native): %v\n", series.Metric, *stats)
+ metastatsNative.update(stats)
+ } else {
+ lbs := model.LabelSet(series.Metric).Clone()
+ if _, ok := lbs["le"]; !ok {
+ continue
+ }
+ metricName := string(lbs[labels.MetricName])
+ if !strings.HasSuffix(metricName, "_bucket") {
+ continue
+ }
+ delete(lbs, labels.MetricName)
+ delete(lbs, "le")
+ key := formatSeriesName(metricName, lbs)
+ matrices[key] = append(matrices[key], series)
+ }
+ }
+
+ for key, matrix := range matrices {
+ stats, err := calcClassicBucketStatistics(matrix)
+ if err != nil {
+ if errors.Is(err, errNotEnoughData) {
+ continue
+ }
+ return err
+ }
+ fmt.Fprintf(out, "- %s (classic): %v\n", key, *stats)
+ metastatsClassic.update(stats)
+ }
+ }
+ fmt.Fprintf(out, "\n%s\n", outputFooter)
+ if metastatsNative.Count() > 0 {
+ fmt.Fprintf(out, "\nNative %s\n", metastatsNative)
+ }
+ if metastatsClassic.Count() > 0 {
+ fmt.Fprintf(out, "\nClassic %s\n", metastatsClassic)
+ }
+ return nil
+}
+
+func seriesSelector(metricName string, duration time.Duration) string {
+ builder := strings.Builder{}
+ builder.WriteString(metricName)
+ builder.WriteRune('[')
+ builder.WriteString(duration.String())
+ builder.WriteRune(']')
+ return builder.String()
+}
+
+func formatSeriesName(metricName string, lbs model.LabelSet) string {
+ builder := strings.Builder{}
+ builder.WriteString(metricName)
+ builder.WriteString(lbs.String())
+ return builder.String()
+}
+
+func querySamples(ctx context.Context, api v1.API, query string, end time.Time) (model.Matrix, error) {
+ values, _, err := api.Query(ctx, query, end)
+ if err != nil {
+ return nil, err
+ }
+
+ matrix, ok := values.(model.Matrix)
+ if !ok {
+ return nil, fmt.Errorf("query of buckets resulted in non-Matrix")
+ }
+
+ return matrix, nil
+}
+
+// minPop/avgPop/maxPop is for the number of populated (non-zero) buckets.
+// total is the total number of buckets across all samples in the series,
+// populated or not.
+type statistics struct {
+ minPop, maxPop, total int
+ avgPop float64
+}
+
+func (s statistics) String() string {
+ if s.maxPop == s.total {
+ return fmt.Sprintf("%d/%.3f/%d", s.minPop, s.avgPop, s.maxPop)
+ }
+ return fmt.Sprintf("%d/%.3f/%d/%d", s.minPop, s.avgPop, s.maxPop, s.total)
+}
+
+func calcClassicBucketStatistics(matrix model.Matrix) (*statistics, error) {
+ numBuckets := len(matrix)
+
+ stats := &statistics{
+ minPop: math.MaxInt,
+ total: numBuckets,
+ }
+
+ if numBuckets == 0 || len(matrix[0].Values) < 2 {
+ return stats, errNotEnoughData
+ }
+
+ numSamples := len(matrix[0].Values)
+
+ sortMatrix(matrix)
+
+ totalPop := 0
+ for timeIdx := 0; timeIdx < numSamples; timeIdx++ {
+ curr, err := getBucketCountsAtTime(matrix, numBuckets, timeIdx)
+ if err != nil {
+ return stats, err
+ }
+ countPop := 0
+ for _, b := range curr {
+ if b != 0 {
+ countPop++
+ }
+ }
+
+ totalPop += countPop
+ if stats.minPop > countPop {
+ stats.minPop = countPop
+ }
+ if stats.maxPop < countPop {
+ stats.maxPop = countPop
+ }
+ }
+ stats.avgPop = float64(totalPop) / float64(numSamples)
+ return stats, nil
+}
+
+func sortMatrix(matrix model.Matrix) {
+ sort.SliceStable(matrix, func(i, j int) bool {
+ return getLe(matrix[i]) < getLe(matrix[j])
+ })
+}
+
+func getLe(series *model.SampleStream) float64 {
+ lbs := model.LabelSet(series.Metric)
+ le, _ := strconv.ParseFloat(string(lbs["le"]), 64)
+ return le
+}
+
+func getBucketCountsAtTime(matrix model.Matrix, numBuckets, timeIdx int) ([]int, error) {
+ counts := make([]int, numBuckets)
+ if timeIdx >= len(matrix[0].Values) {
+ // Just return zeroes instead of erroring out so we can get partial results.
+ return counts, nil
+ }
+ counts[0] = int(matrix[0].Values[timeIdx].Value)
+ for i, bucket := range matrix[1:] {
+ if timeIdx >= len(bucket.Values) {
+ // Just return zeroes instead of erroring out so we can get partial results.
+ return counts, nil
+ }
+ curr := bucket.Values[timeIdx]
+ prev := matrix[i].Values[timeIdx]
+ // Assume the results are nicely aligned.
+ if curr.Timestamp != prev.Timestamp {
+ return counts, fmt.Errorf("matrix result is not time aligned")
+ }
+ counts[i+1] = int(curr.Value - prev.Value)
+ }
+ return counts, nil
+}
+
+type bucketBounds struct {
+ boundaries int32
+ upper, lower float64
+}
+
+func makeBucketBounds(b *model.HistogramBucket) bucketBounds {
+ return bucketBounds{
+ boundaries: b.Boundaries,
+ upper: float64(b.Upper),
+ lower: float64(b.Lower),
+ }
+}
+
+func calcNativeBucketStatistics(series *model.SampleStream) (*statistics, error) {
+ stats := &statistics{
+ minPop: math.MaxInt,
+ }
+
+ overall := make(map[bucketBounds]struct{})
+ totalPop := 0
+ if len(series.Histograms) == 0 {
+ return nil, errNotNativeHistogram
+ }
+ if len(series.Histograms) == 1 {
+ return nil, errNotEnoughData
+ }
+ for _, histogram := range series.Histograms {
+ for _, bucket := range histogram.Histogram.Buckets {
+ bb := makeBucketBounds(bucket)
+ overall[bb] = struct{}{}
+ }
+ countPop := len(histogram.Histogram.Buckets)
+
+ totalPop += countPop
+ if stats.minPop > countPop {
+ stats.minPop = countPop
+ }
+ if stats.maxPop < countPop {
+ stats.maxPop = countPop
+ }
+ }
+ stats.avgPop = float64(totalPop) / float64(len(series.Histograms))
+ stats.total = len(overall)
+ return stats, nil
+}
+
+type distribution struct {
+ min, max, count int
+ avg float64
+}
+
+func newDistribution() distribution {
+ return distribution{
+ min: math.MaxInt,
+ }
+}
+
+func (d *distribution) update(num int) {
+ if d.min > num {
+ d.min = num
+ }
+ if d.max < num {
+ d.max = num
+ }
+ d.count++
+ d.avg += float64(num)/float64(d.count) - d.avg/float64(d.count)
+}
+
+func (d distribution) String() string {
+ return fmt.Sprintf("%d/%.3f/%d", d.min, d.avg, d.max)
+}
+
+type metaStatistics struct {
+ minPop, avgPop, maxPop, total distribution
+}
+
+func newMetaStatistics() *metaStatistics {
+ return &metaStatistics{
+ minPop: newDistribution(),
+ avgPop: newDistribution(),
+ maxPop: newDistribution(),
+ total: newDistribution(),
+ }
+}
+
+func (ms metaStatistics) Count() int {
+ return ms.minPop.count
+}
+
+func (ms metaStatistics) String() string {
+ if ms.maxPop == ms.total {
+ return fmt.Sprintf("histogram series (%d in total):\n- min populated: %v\n- avg populated: %v\n- max populated: %v", ms.Count(), ms.minPop, ms.avgPop, ms.maxPop)
+ }
+ return fmt.Sprintf("histogram series (%d in total):\n- min populated: %v\n- avg populated: %v\n- max populated: %v\n- total: %v", ms.Count(), ms.minPop, ms.avgPop, ms.maxPop, ms.total)
+}
+
+func (ms *metaStatistics) update(s *statistics) {
+ ms.minPop.update(s.minPop)
+ ms.avgPop.update(int(s.avgPop))
+ ms.maxPop.update(s.maxPop)
+ ms.total.update(s.total)
+}
diff --git a/cmd/promtool/analyze_test.go b/cmd/promtool/analyze_test.go
new file mode 100644
index 000000000..83d2ac4a3
--- /dev/null
+++ b/cmd/promtool/analyze_test.go
@@ -0,0 +1,170 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/common/model"
+)
+
+var (
+ exampleMatrix = model.Matrix{
+ &model.SampleStream{
+ Metric: model.Metric{
+ "le": "+Inf",
+ },
+ Values: []model.SamplePair{
+ {
+ Value: 31,
+ Timestamp: 100,
+ },
+ {
+ Value: 32,
+ Timestamp: 200,
+ },
+ {
+ Value: 40,
+ Timestamp: 300,
+ },
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{
+ "le": "0.5",
+ },
+ Values: []model.SamplePair{
+ {
+ Value: 10,
+ Timestamp: 100,
+ },
+ {
+ Value: 11,
+ Timestamp: 200,
+ },
+ {
+ Value: 11,
+ Timestamp: 300,
+ },
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{
+ "le": "10",
+ },
+ Values: []model.SamplePair{
+ {
+ Value: 30,
+ Timestamp: 100,
+ },
+ {
+ Value: 31,
+ Timestamp: 200,
+ },
+ {
+ Value: 37,
+ Timestamp: 300,
+ },
+ },
+ },
+ &model.SampleStream{
+ Metric: model.Metric{
+ "le": "2",
+ },
+ Values: []model.SamplePair{
+ {
+ Value: 25,
+ Timestamp: 100,
+ },
+ {
+ Value: 26,
+ Timestamp: 200,
+ },
+ {
+ Value: 27,
+ Timestamp: 300,
+ },
+ },
+ },
+ }
+ exampleMatrixLength = len(exampleMatrix)
+)
+
+func init() {
+ sortMatrix(exampleMatrix)
+}
+
+func TestGetBucketCountsAtTime(t *testing.T) {
+ cases := []struct {
+ matrix model.Matrix
+ length int
+ timeIdx int
+ expected []int
+ }{
+ {
+ exampleMatrix,
+ exampleMatrixLength,
+ 0,
+ []int{10, 15, 5, 1},
+ },
+ {
+ exampleMatrix,
+ exampleMatrixLength,
+ 1,
+ []int{11, 15, 5, 1},
+ },
+ {
+ exampleMatrix,
+ exampleMatrixLength,
+ 2,
+ []int{11, 16, 10, 3},
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(fmt.Sprintf("exampleMatrix@%d", c.timeIdx), func(t *testing.T) {
+ res, err := getBucketCountsAtTime(c.matrix, c.length, c.timeIdx)
+ require.NoError(t, err)
+ require.Equal(t, c.expected, res)
+ })
+ }
+}
+
+func TestCalcClassicBucketStatistics(t *testing.T) {
+ cases := []struct {
+ matrix model.Matrix
+ expected *statistics
+ }{
+ {
+ exampleMatrix,
+ &statistics{
+ minPop: 4,
+ avgPop: 4,
+ maxPop: 4,
+ total: 4,
+ },
+ },
+ }
+
+ for i, c := range cases {
+ t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
+ res, err := calcClassicBucketStatistics(c.matrix)
+ require.NoError(t, err)
+ require.Equal(t, c.expected, res)
+ })
+ }
+}
diff --git a/cmd/promtool/archive.go b/cmd/promtool/archive.go
index 6edb741ac..7b565c57c 100644
--- a/cmd/promtool/archive.go
+++ b/cmd/promtool/archive.go
@@ -15,9 +15,10 @@ package main
import (
"archive/tar"
- "compress/gzip"
"fmt"
"os"
+
+ "github.com/klauspost/compress/gzip"
)
const filePerm = 0o666
diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go
index 39410881b..79db428c7 100644
--- a/cmd/promtool/backfill.go
+++ b/cmd/promtool/backfill.go
@@ -88,7 +88,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
blockDuration := getCompatibleBlockDuration(maxBlockDuration)
mint = blockDuration * (mint / blockDuration)
- db, err := tsdb.OpenDBReadOnly(outputDir, nil)
+ db, err := tsdb.OpenDBReadOnly(outputDir, "", nil)
if err != nil {
return err
}
@@ -127,7 +127,8 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
ctx := context.Background()
app := w.Appender(ctx)
- p := textparse.NewOpenMetricsParser(input)
+ symbolTable := labels.NewSymbolTable() // One table per block means it won't grow too large.
+ p := textparse.NewOpenMetricsParser(input, symbolTable)
samplesCount := 0
for {
e, err := p.Next()
@@ -216,7 +217,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
}
func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) {
- p := textparse.NewOpenMetricsParser(input)
+ p := textparse.NewOpenMetricsParser(input, nil) // Don't need a SymbolTable to get max and min timestamps.
maxt, mint, err := getMinAndMaxTimestamps(p)
if err != nil {
return fmt.Errorf("getting min and max timestamp: %w", err)
diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go
index 7d29690e4..32abfa46a 100644
--- a/cmd/promtool/backfill_test.go
+++ b/cmd/promtool/backfill_test.go
@@ -26,6 +26,7 @@ import (
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc"
+ "github.com/prometheus/prometheus/util/testutil"
)
type backfillSample struct {
@@ -76,7 +77,7 @@ func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, exp
allSamples := queryAllSeries(t, q, expectedMinTime, expectedMaxTime)
sortSamples(allSamples)
sortSamples(expectedSamples)
- require.Equal(t, expectedSamples, allSamples, "did not create correct samples")
+ testutil.RequireEqual(t, expectedSamples, allSamples, "did not create correct samples")
if len(allSamples) > 0 {
require.Equal(t, expectedMinTime, allSamples[0].Timestamp, "timestamp of first sample is not the expected minimum time")
diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go
index 508b681b8..e1d275e97 100644
--- a/cmd/promtool/main.go
+++ b/cmd/promtool/main.go
@@ -35,9 +35,7 @@ import (
"github.com/go-kit/log"
"github.com/google/pprof/profile"
"github.com/prometheus/client_golang/api"
- v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/client_golang/prometheus/testutil/promlint"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
@@ -58,8 +56,8 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/notifier"
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
- "github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
+ "github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/util/documentcli"
)
@@ -185,6 +183,14 @@ func main() {
queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String()
queryLabelsMatch := queryLabelsCmd.Flag("match", "Series selector. Can be specified multiple times.").Strings()
+ queryAnalyzeCfg := &QueryAnalyzeConfig{}
+ queryAnalyzeCmd := queryCmd.Command("analyze", "Run queries against your Prometheus to analyze the usage pattern of certain metrics.")
+ queryAnalyzeCmd.Flag("server", "Prometheus server to query.").Required().URLVar(&serverURL)
+ queryAnalyzeCmd.Flag("type", "Type of metric: histogram.").Required().StringVar(&queryAnalyzeCfg.metricType)
+ queryAnalyzeCmd.Flag("duration", "Time frame to analyze.").Default("1h").DurationVar(&queryAnalyzeCfg.duration)
+ queryAnalyzeCmd.Flag("time", "Query time (RFC3339 or Unix timestamp), defaults to now.").StringVar(&queryAnalyzeCfg.time)
+ queryAnalyzeCmd.Flag("match", "Series selector. Can be specified multiple times.").Required().StringsVar(&queryAnalyzeCfg.matchers)
+
pushCmd := app.Command("push", "Push to a Prometheus server.")
pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).")
@@ -204,6 +210,7 @@ func main() {
"test-rule-file",
"The unit test file.",
).Required().ExistingFiles()
+ testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool()
defaultDBPath := "data/"
tsdbCmd := app.Command("tsdb", "Run tsdb commands.")
@@ -228,9 +235,17 @@ func main() {
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
+ dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
- dumpMatch := tsdbDumpCmd.Flag("match", "Series selector.").Default("{__name__=~'(?s:.*)'}").String()
+ dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
+
+ tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
+ dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
+ dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
+ dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
+ dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
+ dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
importCmd := tsdbCmd.Command("create-blocks-from", "[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.")
importHumanReadable := importCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool()
@@ -364,11 +379,12 @@ func main() {
case testRulesCmd.FullCommand():
os.Exit(RulesUnitTest(
- promql.LazyLoaderOpts{
+ promqltest.LazyLoaderOpts{
EnableAtModifier: true,
EnableNegativeOffset: true,
},
*testRulesRun,
+ *testRulesDiff,
*testRulesFiles...),
)
@@ -382,7 +398,9 @@ func main() {
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
case tsdbDumpCmd.FullCommand():
- os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch)))
+ os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet)))
+ case tsdbDumpOpenMetricsCmd.FullCommand():
+ os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
// TODO(aSquare14): Work on adding support for custom block size.
case openMetricsImportCmd.FullCommand():
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
@@ -390,6 +408,9 @@ func main() {
case importRulesCmd.FullCommand():
os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...)))
+ case queryAnalyzeCmd.FullCommand():
+ os.Exit(checkErr(queryAnalyzeCfg.run(serverURL, httpRoundTripper)))
+
case documentationCmd.FullCommand():
os.Exit(checkErr(documentcli.GenerateMarkdown(app.Model(), os.Stdout)))
@@ -463,7 +484,7 @@ func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper ht
return err
}
- request, err := http.NewRequest("GET", config.Address, nil)
+ request, err := http.NewRequest(http.MethodGet, config.Address, nil)
if err != nil {
return err
}
@@ -997,246 +1018,6 @@ func checkMetricsExtended(r io.Reader) ([]metricStat, int, error) {
return stats, total, nil
}
-// QueryInstant performs an instant query against a Prometheus server.
-func QueryInstant(url *url.URL, roundTripper http.RoundTripper, query, evalTime string, p printer) int {
- if url.Scheme == "" {
- url.Scheme = "http"
- }
- config := api.Config{
- Address: url.String(),
- RoundTripper: roundTripper,
- }
-
- // Create new client.
- c, err := api.NewClient(config)
- if err != nil {
- fmt.Fprintln(os.Stderr, "error creating API client:", err)
- return failureExitCode
- }
-
- eTime := time.Now()
- if evalTime != "" {
- eTime, err = parseTime(evalTime)
- if err != nil {
- fmt.Fprintln(os.Stderr, "error parsing evaluation time:", err)
- return failureExitCode
- }
- }
-
- // Run query against client.
- api := v1.NewAPI(c)
-
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
- val, _, err := api.Query(ctx, query, eTime) // Ignoring warnings for now.
- cancel()
- if err != nil {
- return handleAPIError(err)
- }
-
- p.printValue(val)
-
- return successExitCode
-}
-
-// QueryRange performs a range query against a Prometheus server.
-func QueryRange(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, query, start, end string, step time.Duration, p printer) int {
- if url.Scheme == "" {
- url.Scheme = "http"
- }
- config := api.Config{
- Address: url.String(),
- RoundTripper: roundTripper,
- }
-
- if len(headers) > 0 {
- config.RoundTripper = promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
- for key, value := range headers {
- req.Header.Add(key, value)
- }
- return roundTripper.RoundTrip(req)
- })
- }
-
- // Create new client.
- c, err := api.NewClient(config)
- if err != nil {
- fmt.Fprintln(os.Stderr, "error creating API client:", err)
- return failureExitCode
- }
-
- var stime, etime time.Time
-
- if end == "" {
- etime = time.Now()
- } else {
- etime, err = parseTime(end)
- if err != nil {
- fmt.Fprintln(os.Stderr, "error parsing end time:", err)
- return failureExitCode
- }
- }
-
- if start == "" {
- stime = etime.Add(-5 * time.Minute)
- } else {
- stime, err = parseTime(start)
- if err != nil {
- fmt.Fprintln(os.Stderr, "error parsing start time:", err)
- return failureExitCode
- }
- }
-
- if !stime.Before(etime) {
- fmt.Fprintln(os.Stderr, "start time is not before end time")
- return failureExitCode
- }
-
- if step == 0 {
- resolution := math.Max(math.Floor(etime.Sub(stime).Seconds()/250), 1)
- // Convert seconds to nanoseconds such that time.Duration parses correctly.
- step = time.Duration(resolution) * time.Second
- }
-
- // Run query against client.
- api := v1.NewAPI(c)
- r := v1.Range{Start: stime, End: etime, Step: step}
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
- val, _, err := api.QueryRange(ctx, query, r) // Ignoring warnings for now.
- cancel()
-
- if err != nil {
- return handleAPIError(err)
- }
-
- p.printValue(val)
- return successExitCode
-}
-
-// QuerySeries queries for a series against a Prometheus server.
-func QuerySeries(url *url.URL, roundTripper http.RoundTripper, matchers []string, start, end string, p printer) int {
- if url.Scheme == "" {
- url.Scheme = "http"
- }
- config := api.Config{
- Address: url.String(),
- RoundTripper: roundTripper,
- }
-
- // Create new client.
- c, err := api.NewClient(config)
- if err != nil {
- fmt.Fprintln(os.Stderr, "error creating API client:", err)
- return failureExitCode
- }
-
- stime, etime, err := parseStartTimeAndEndTime(start, end)
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- return failureExitCode
- }
-
- // Run query against client.
- api := v1.NewAPI(c)
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
- val, _, err := api.Series(ctx, matchers, stime, etime) // Ignoring warnings for now.
- cancel()
-
- if err != nil {
- return handleAPIError(err)
- }
-
- p.printSeries(val)
- return successExitCode
-}
-
-// QueryLabels queries for label values against a Prometheus server.
-func QueryLabels(url *url.URL, roundTripper http.RoundTripper, matchers []string, name, start, end string, p printer) int {
- if url.Scheme == "" {
- url.Scheme = "http"
- }
- config := api.Config{
- Address: url.String(),
- RoundTripper: roundTripper,
- }
-
- // Create new client.
- c, err := api.NewClient(config)
- if err != nil {
- fmt.Fprintln(os.Stderr, "error creating API client:", err)
- return failureExitCode
- }
-
- stime, etime, err := parseStartTimeAndEndTime(start, end)
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- return failureExitCode
- }
-
- // Run query against client.
- api := v1.NewAPI(c)
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
- val, warn, err := api.LabelValues(ctx, name, matchers, stime, etime)
- cancel()
-
- for _, v := range warn {
- fmt.Fprintln(os.Stderr, "query warning:", v)
- }
- if err != nil {
- return handleAPIError(err)
- }
-
- p.printLabelValues(val)
- return successExitCode
-}
-
-func handleAPIError(err error) int {
- var apiErr *v1.Error
- if errors.As(err, &apiErr) && apiErr.Detail != "" {
- fmt.Fprintf(os.Stderr, "query error: %v (detail: %s)\n", apiErr, strings.TrimSpace(apiErr.Detail))
- } else {
- fmt.Fprintln(os.Stderr, "query error:", err)
- }
-
- return failureExitCode
-}
-
-func parseStartTimeAndEndTime(start, end string) (time.Time, time.Time, error) {
- var (
- minTime = time.Now().Add(-9999 * time.Hour)
- maxTime = time.Now().Add(9999 * time.Hour)
- err error
- )
-
- stime := minTime
- etime := maxTime
-
- if start != "" {
- stime, err = parseTime(start)
- if err != nil {
- return stime, etime, fmt.Errorf("error parsing start time: %w", err)
- }
- }
-
- if end != "" {
- etime, err = parseTime(end)
- if err != nil {
- return stime, etime, fmt.Errorf("error parsing end time: %w", err)
- }
- }
- return stime, etime, nil
-}
-
-func parseTime(s string) (time.Time, error) {
- if t, err := strconv.ParseFloat(s, 64); err == nil {
- s, ns := math.Modf(t)
- return time.Unix(int64(s), int64(ns*float64(time.Second))).UTC(), nil
- }
- if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
- return t, nil
- }
- return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s)
-}
-
type endpointsGroup struct {
urlToFilename map[string]string
postProcess func(b []byte) ([]byte, error)
@@ -1390,15 +1171,12 @@ func importRules(url *url.URL, roundTripper http.RoundTripper, start, end, outpu
evalInterval: evalInterval,
maxBlockDuration: maxBlockDuration,
}
- client, err := api.NewClient(api.Config{
- Address: url.String(),
- RoundTripper: roundTripper,
- })
+ api, err := newAPI(url, roundTripper, nil)
if err != nil {
return fmt.Errorf("new api client error: %w", err)
}
- ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, v1.NewAPI(client))
+ ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, api)
errs := ruleImporter.loadGroups(ctx, files)
for _, err := range errs {
if err != nil {
diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go
index 7306a3e64..78500fe93 100644
--- a/cmd/promtool/main_test.go
+++ b/cmd/promtool/main_test.go
@@ -25,6 +25,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
+ "strconv"
"strings"
"syscall"
"testing"
@@ -410,7 +411,7 @@ func TestExitCodes(t *testing.T) {
} {
t.Run(c.file, func(t *testing.T) {
for _, lintFatal := range []bool{true, false} {
- t.Run(fmt.Sprintf("%t", lintFatal), func(t *testing.T) {
+ t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) {
args := []string{"-test.main", "check", "config", "testdata/" + c.file}
if lintFatal {
args = append(args, "--lint-fatal")
diff --git a/cmd/promtool/query.go b/cmd/promtool/query.go
new file mode 100644
index 000000000..0d7cb12cf
--- /dev/null
+++ b/cmd/promtool/query.go
@@ -0,0 +1,251 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/prometheus/client_golang/api"
+ v1 "github.com/prometheus/client_golang/api/prometheus/v1"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+
+ _ "github.com/prometheus/prometheus/plugins" // Register plugins.
+)
+
+func newAPI(url *url.URL, roundTripper http.RoundTripper, headers map[string]string) (v1.API, error) {
+ if url.Scheme == "" {
+ url.Scheme = "http"
+ }
+ config := api.Config{
+ Address: url.String(),
+ RoundTripper: roundTripper,
+ }
+
+ if len(headers) > 0 {
+ config.RoundTripper = promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
+ for key, value := range headers {
+ req.Header.Add(key, value)
+ }
+ return roundTripper.RoundTrip(req)
+ })
+ }
+
+ // Create new client.
+ client, err := api.NewClient(config)
+ if err != nil {
+ return nil, err
+ }
+
+ api := v1.NewAPI(client)
+ return api, nil
+}
+
+// QueryInstant performs an instant query against a Prometheus server.
+func QueryInstant(url *url.URL, roundTripper http.RoundTripper, query, evalTime string, p printer) int {
+ api, err := newAPI(url, roundTripper, nil)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "error creating API client:", err)
+ return failureExitCode
+ }
+
+ eTime := time.Now()
+ if evalTime != "" {
+ eTime, err = parseTime(evalTime)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "error parsing evaluation time:", err)
+ return failureExitCode
+ }
+ }
+
+ // Run query against client.
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
+ val, _, err := api.Query(ctx, query, eTime) // Ignoring warnings for now.
+ cancel()
+ if err != nil {
+ return handleAPIError(err)
+ }
+
+ p.printValue(val)
+
+ return successExitCode
+}
+
+// QueryRange performs a range query against a Prometheus server.
+func QueryRange(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, query, start, end string, step time.Duration, p printer) int {
+ api, err := newAPI(url, roundTripper, headers)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "error creating API client:", err)
+ return failureExitCode
+ }
+
+ var stime, etime time.Time
+
+ if end == "" {
+ etime = time.Now()
+ } else {
+ etime, err = parseTime(end)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "error parsing end time:", err)
+ return failureExitCode
+ }
+ }
+
+ if start == "" {
+ stime = etime.Add(-5 * time.Minute)
+ } else {
+ stime, err = parseTime(start)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "error parsing start time:", err)
+ return failureExitCode
+ }
+ }
+
+ if !stime.Before(etime) {
+ fmt.Fprintln(os.Stderr, "start time is not before end time")
+ return failureExitCode
+ }
+
+ if step == 0 {
+ resolution := math.Max(math.Floor(etime.Sub(stime).Seconds()/250), 1)
+ // Convert seconds to nanoseconds such that time.Duration parses correctly.
+ step = time.Duration(resolution) * time.Second
+ }
+
+ // Run query against client.
+ r := v1.Range{Start: stime, End: etime, Step: step}
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
+ val, _, err := api.QueryRange(ctx, query, r) // Ignoring warnings for now.
+ cancel()
+
+ if err != nil {
+ return handleAPIError(err)
+ }
+
+ p.printValue(val)
+ return successExitCode
+}
+
+// QuerySeries queries for a series against a Prometheus server.
+func QuerySeries(url *url.URL, roundTripper http.RoundTripper, matchers []string, start, end string, p printer) int {
+ api, err := newAPI(url, roundTripper, nil)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "error creating API client:", err)
+ return failureExitCode
+ }
+
+ stime, etime, err := parseStartTimeAndEndTime(start, end)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return failureExitCode
+ }
+
+ // Run query against client.
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
+ val, _, err := api.Series(ctx, matchers, stime, etime) // Ignoring warnings for now.
+ cancel()
+
+ if err != nil {
+ return handleAPIError(err)
+ }
+
+ p.printSeries(val)
+ return successExitCode
+}
+
+// QueryLabels queries for label values against a Prometheus server.
+func QueryLabels(url *url.URL, roundTripper http.RoundTripper, matchers []string, name, start, end string, p printer) int {
+ api, err := newAPI(url, roundTripper, nil)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "error creating API client:", err)
+ return failureExitCode
+ }
+
+ stime, etime, err := parseStartTimeAndEndTime(start, end)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return failureExitCode
+ }
+
+ // Run query against client.
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
+ val, warn, err := api.LabelValues(ctx, name, matchers, stime, etime)
+ cancel()
+
+ for _, v := range warn {
+ fmt.Fprintln(os.Stderr, "query warning:", v)
+ }
+ if err != nil {
+ return handleAPIError(err)
+ }
+
+ p.printLabelValues(val)
+ return successExitCode
+}
+
+func handleAPIError(err error) int {
+ var apiErr *v1.Error
+ if errors.As(err, &apiErr) && apiErr.Detail != "" {
+ fmt.Fprintf(os.Stderr, "query error: %v (detail: %s)\n", apiErr, strings.TrimSpace(apiErr.Detail))
+ } else {
+ fmt.Fprintln(os.Stderr, "query error:", err)
+ }
+
+ return failureExitCode
+}
+
+func parseStartTimeAndEndTime(start, end string) (time.Time, time.Time, error) {
+ var (
+ minTime = time.Now().Add(-9999 * time.Hour)
+ maxTime = time.Now().Add(9999 * time.Hour)
+ err error
+ )
+
+ stime := minTime
+ etime := maxTime
+
+ if start != "" {
+ stime, err = parseTime(start)
+ if err != nil {
+ return stime, etime, fmt.Errorf("error parsing start time: %w", err)
+ }
+ }
+
+ if end != "" {
+ etime, err = parseTime(end)
+ if err != nil {
+ return stime, etime, fmt.Errorf("error parsing end time: %w", err)
+ }
+ }
+ return stime, etime, nil
+}
+
+func parseTime(s string) (time.Time, error) {
+ if t, err := strconv.ParseFloat(s, 64); err == nil {
+ s, ns := math.Modf(t)
+ return time.Unix(int64(s), int64(ns*float64(time.Second))).UTC(), nil
+ }
+ if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
+ return t, nil
+ }
+ return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s)
+}
diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go
index d8d6bb83e..5a1864484 100644
--- a/cmd/promtool/rules.go
+++ b/cmd/promtool/rules.go
@@ -234,17 +234,3 @@ func (m *multipleAppender) flushAndCommit(ctx context.Context) error {
}
return nil
}
-
-func max(x, y int64) int64 {
- if x > y {
- return x
- }
- return y
-}
-
-func min(x, y int64) int64 {
- if x < y {
- return x
- }
- return y
-}
diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go
index 75aad6786..d55fb0c89 100644
--- a/cmd/promtool/rules_test.go
+++ b/cmd/promtool/rules_test.go
@@ -78,7 +78,6 @@ func TestBackfillRuleIntegration(t *testing.T) {
// Execute the test more than once to simulate running the rule importer twice with the same data.
// We expect duplicate blocks with the same series are created when run more than once.
for i := 0; i < tt.runcount; i++ {
-
ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, tt.samples, tt.maxBlockDuration)
require.NoError(t, err)
path1 := filepath.Join(tmpDir, "test.file")
diff --git a/cmd/promtool/sd.go b/cmd/promtool/sd.go
index 155152e1a..e65262d43 100644
--- a/cmd/promtool/sd.go
+++ b/cmd/promtool/sd.go
@@ -18,10 +18,10 @@ import (
"encoding/json"
"fmt"
"os"
- "reflect"
"time"
"github.com/go-kit/log"
+ "github.com/google/go-cmp/cmp"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/config"
@@ -78,12 +78,25 @@ func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefault
defer cancel()
for _, cfg := range scrapeConfig.ServiceDiscoveryConfigs {
- d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger, Registerer: registerer})
+ reg := prometheus.NewRegistry()
+ refreshMetrics := discovery.NewRefreshMetrics(reg)
+ metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
+ err := metrics.Register()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "Could not register service discovery metrics", err)
+ return failureExitCode
+ }
+
+ d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger, Metrics: metrics})
if err != nil {
fmt.Fprintln(os.Stderr, "Could not create new discoverer", err)
return failureExitCode
}
- go d.Run(ctx, targetGroupChan)
+ go func() {
+ d.Run(ctx, targetGroupChan)
+ metrics.Unregister()
+ refreshMetrics.Unregister()
+ }()
}
var targetGroups []*targetgroup.Group
@@ -140,7 +153,7 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc
duplicateRes := false
for _, sdCheckRes := range sdCheckResults {
- if reflect.DeepEqual(sdCheckRes, result) {
+ if cmp.Equal(sdCheckRes, result, cmp.Comparer(labels.Equal)) {
duplicateRes = true
break
}
diff --git a/cmd/promtool/sd_test.go b/cmd/promtool/sd_test.go
index 2f4d3aba7..cb65ee72a 100644
--- a/cmd/promtool/sd_test.go
+++ b/cmd/promtool/sd_test.go
@@ -23,6 +23,7 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
+ "github.com/prometheus/prometheus/util/testutil"
"github.com/stretchr/testify/require"
)
@@ -69,5 +70,5 @@ func TestSDCheckResult(t *testing.T) {
},
}
- require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true))
+ testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true))
}
diff --git a/cmd/promtool/testdata/dump-openmetrics-roundtrip-test.prom b/cmd/promtool/testdata/dump-openmetrics-roundtrip-test.prom
new file mode 100644
index 000000000..c2318e94e
--- /dev/null
+++ b/cmd/promtool/testdata/dump-openmetrics-roundtrip-test.prom
@@ -0,0 +1,15 @@
+my_histogram_bucket{instance="localhost:8000",job="example2",le="+Inf"} 1.0267820369e+10 1700215884.373
+my_histogram_bucket{instance="localhost:8000",job="example2",le="+Inf"} 1.026872507e+10 1700215889.373
+my_histogram_bucket{instance="localhost:8000",job="example2",le="0.01"} 0 1700215884.373
+my_histogram_bucket{instance="localhost:8000",job="example2",le="0.01"} 0 1700215889.373
+my_histogram_count{instance="localhost:8000",job="example2"} 1.0267820369e+10 1700215884.373
+my_histogram_count{instance="localhost:8000",job="example2"} 1.026872507e+10 1700215889.373
+my_summary_count{instance="localhost:8000",job="example5"} 9.518161497e+09 1700211684.981
+my_summary_count{instance="localhost:8000",job="example5"} 9.519048034e+09 1700211689.984
+my_summary_sum{instance="localhost:8000",job="example5"} 5.2349889185e+10 1700211684.981
+my_summary_sum{instance="localhost:8000",job="example5"} 5.2354761848e+10 1700211689.984
+up{instance="localhost:8000",job="example2"} 1 1700226034.330
+up{instance="localhost:8000",job="example2"} 1 1700226094.329
+up{instance="localhost:8000",job="example3"} 1 1700210681.366
+up{instance="localhost:8000",job="example3"} 1 1700210686.366
+# EOF
diff --git a/cmd/promtool/testdata/dump-openmetrics-test.prom b/cmd/promtool/testdata/dump-openmetrics-test.prom
new file mode 100644
index 000000000..c027b8c27
--- /dev/null
+++ b/cmd/promtool/testdata/dump-openmetrics-test.prom
@@ -0,0 +1,11 @@
+my_counter{baz="abc",foo="bar"} 1 0.000
+my_counter{baz="abc",foo="bar"} 2 60.000
+my_counter{baz="abc",foo="bar"} 3 120.000
+my_counter{baz="abc",foo="bar"} 4 180.000
+my_counter{baz="abc",foo="bar"} 5 240.000
+my_gauge{abc="baz",bar="foo"} 9 0.000
+my_gauge{abc="baz",bar="foo"} 8 60.000
+my_gauge{abc="baz",bar="foo"} 0 120.000
+my_gauge{abc="baz",bar="foo"} 4 180.000
+my_gauge{abc="baz",bar="foo"} 7 240.000
+# EOF
diff --git a/cmd/promtool/testdata/dump-test-1.prom b/cmd/promtool/testdata/dump-test-1.prom
new file mode 100644
index 000000000..878cdecab
--- /dev/null
+++ b/cmd/promtool/testdata/dump-test-1.prom
@@ -0,0 +1,15 @@
+{__name__="heavy_metric", foo="bar"} 5 0
+{__name__="heavy_metric", foo="bar"} 4 60000
+{__name__="heavy_metric", foo="bar"} 3 120000
+{__name__="heavy_metric", foo="bar"} 2 180000
+{__name__="heavy_metric", foo="bar"} 1 240000
+{__name__="heavy_metric", foo="foo"} 5 0
+{__name__="heavy_metric", foo="foo"} 4 60000
+{__name__="heavy_metric", foo="foo"} 3 120000
+{__name__="heavy_metric", foo="foo"} 2 180000
+{__name__="heavy_metric", foo="foo"} 1 240000
+{__name__="metric", baz="abc", foo="bar"} 1 0
+{__name__="metric", baz="abc", foo="bar"} 2 60000
+{__name__="metric", baz="abc", foo="bar"} 3 120000
+{__name__="metric", baz="abc", foo="bar"} 4 180000
+{__name__="metric", baz="abc", foo="bar"} 5 240000
diff --git a/cmd/promtool/testdata/dump-test-2.prom b/cmd/promtool/testdata/dump-test-2.prom
new file mode 100644
index 000000000..4ac2ffa5a
--- /dev/null
+++ b/cmd/promtool/testdata/dump-test-2.prom
@@ -0,0 +1,10 @@
+{__name__="heavy_metric", foo="foo"} 5 0
+{__name__="heavy_metric", foo="foo"} 4 60000
+{__name__="heavy_metric", foo="foo"} 3 120000
+{__name__="heavy_metric", foo="foo"} 2 180000
+{__name__="heavy_metric", foo="foo"} 1 240000
+{__name__="metric", baz="abc", foo="bar"} 1 0
+{__name__="metric", baz="abc", foo="bar"} 2 60000
+{__name__="metric", baz="abc", foo="bar"} 3 120000
+{__name__="metric", baz="abc", foo="bar"} 4 180000
+{__name__="metric", baz="abc", foo="bar"} 5 240000
diff --git a/cmd/promtool/testdata/dump-test-3.prom b/cmd/promtool/testdata/dump-test-3.prom
new file mode 100644
index 000000000..faa278101
--- /dev/null
+++ b/cmd/promtool/testdata/dump-test-3.prom
@@ -0,0 +1,2 @@
+{__name__="metric", baz="abc", foo="bar"} 2 60000
+{__name__="metric", baz="abc", foo="bar"} 3 120000
diff --git a/cmd/promtool/testdata/no-test-group-interval.yml b/cmd/promtool/testdata/no-test-group-interval.yml
index d1f6935cd..99f2ec646 100644
--- a/cmd/promtool/testdata/no-test-group-interval.yml
+++ b/cmd/promtool/testdata/no-test-group-interval.yml
@@ -12,4 +12,4 @@ tests:
eval_time: 1m
exp_samples:
- value: 1
- labels: test
\ No newline at end of file
+ labels: test
diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go
index e6df9b78c..2ed7244b1 100644
--- a/cmd/promtool/tsdb.go
+++ b/cmd/promtool/tsdb.go
@@ -15,6 +15,7 @@ package main
import (
"bufio"
+ "bytes"
"context"
"errors"
"fmt"
@@ -23,6 +24,7 @@ import (
"path/filepath"
"runtime"
"runtime/pprof"
+ "slices"
"strconv"
"strings"
"sync"
@@ -31,7 +33,7 @@ import (
"github.com/alecthomas/units"
"github.com/go-kit/log"
- "golang.org/x/exp/slices"
+ "go.uber.org/atomic"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
@@ -148,8 +150,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
}
func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (uint64, error) {
- var mu sync.Mutex
- var total uint64
+ var total atomic.Uint64
for i := 0; i < scrapeCount; i += 100 {
var wg sync.WaitGroup
@@ -164,22 +165,21 @@ func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (u
wg.Add(1)
go func() {
+ defer wg.Done()
+
n, err := b.ingestScrapesShard(batch, 100, int64(timeDelta*i))
if err != nil {
// exitWithError(err)
fmt.Println(" err", err)
}
- mu.Lock()
- total += n
- mu.Unlock()
- wg.Done()
+ total.Add(n)
}()
}
wg.Wait()
}
fmt.Println("ingestion completed")
- return total, nil
+ return total.Load(), nil
}
func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount int, baset int64) (uint64, error) {
@@ -338,7 +338,7 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) {
}
func listBlocks(path string, humanReadable bool) error {
- db, err := tsdb.OpenDBReadOnly(path, nil)
+ db, err := tsdb.OpenDBReadOnly(path, "", nil)
if err != nil {
return err
}
@@ -393,7 +393,7 @@ func getFormatedBytes(bytes int64, humanReadable bool) string {
}
func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) {
- db, err := tsdb.OpenDBReadOnly(path, nil)
+ db, err := tsdb.OpenDBReadOnly(path, "", nil)
if err != nil {
return nil, nil, err
}
@@ -667,7 +667,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
it := fhchk.Iterator(nil)
bucketCount := 0
for it.Next() == chunkenc.ValFloatHistogram {
- _, f := it.AtFloatHistogram()
+ _, f := it.AtFloatHistogram(nil)
bucketCount += len(f.PositiveBuckets)
bucketCount += len(f.NegativeBuckets)
}
@@ -682,7 +682,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
it := hchk.Iterator(nil)
bucketCount := 0
for it.Next() == chunkenc.ValHistogram {
- _, f := it.AtHistogram()
+ _, f := it.AtHistogram(nil)
bucketCount += len(f.PositiveBuckets)
bucketCount += len(f.NegativeBuckets)
}
@@ -706,8 +706,10 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
return nil
}
-func dumpSamples(ctx context.Context, path string, mint, maxt int64, match string) (err error) {
- db, err := tsdb.OpenDBReadOnly(path, nil)
+type SeriesSetFormatter func(series storage.SeriesSet) error
+
+func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
+ db, err := tsdb.OpenDBReadOnly(dbDir, sandboxDirRoot, nil)
if err != nil {
return err
}
@@ -720,12 +722,38 @@ func dumpSamples(ctx context.Context, path string, mint, maxt int64, match strin
}
defer q.Close()
- matchers, err := parser.ParseMetricSelector(match)
+ matcherSets, err := parser.ParseMetricSelectors(match)
+ if err != nil {
+ return err
+ }
+
+ var ss storage.SeriesSet
+ if len(matcherSets) > 1 {
+ var sets []storage.SeriesSet
+ for _, mset := range matcherSets {
+ sets = append(sets, q.Select(ctx, true, nil, mset...))
+ }
+ ss = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge)
+ } else {
+ ss = q.Select(ctx, false, nil, matcherSets[0]...)
+ }
+
+ err = formatter(ss)
if err != nil {
return err
}
- ss := q.Select(ctx, false, nil, matchers...)
+ if ws := ss.Warnings(); len(ws) > 0 {
+ return tsdb_errors.NewMulti(ws.AsErrors()...).Err()
+ }
+
+ if ss.Err() != nil {
+ return ss.Err()
+ }
+ return nil
+}
+
+func formatSeriesSet(ss storage.SeriesSet) error {
for ss.Next() {
series := ss.At()
lbs := series.Labels()
@@ -735,25 +763,55 @@ func dumpSamples(ctx context.Context, path string, mint, maxt int64, match strin
fmt.Printf("%s %g %d\n", lbs, val, ts)
}
for it.Next() == chunkenc.ValFloatHistogram {
- ts, fh := it.AtFloatHistogram()
+ ts, fh := it.AtFloatHistogram(nil)
fmt.Printf("%s %s %d\n", lbs, fh.String(), ts)
}
for it.Next() == chunkenc.ValHistogram {
- ts, h := it.AtHistogram()
+ ts, h := it.AtHistogram(nil)
fmt.Printf("%s %s %d\n", lbs, h.String(), ts)
}
if it.Err() != nil {
return ss.Err()
}
}
+ return nil
+}
- if ws := ss.Warnings(); len(ws) > 0 {
- return tsdb_errors.NewMulti(ws.AsErrors()...).Err()
- }
+// CondensedString is labels.Labels.String() without spaces after the commas.
+func CondensedString(ls labels.Labels) string {
+ var b bytes.Buffer
- if ss.Err() != nil {
- return ss.Err()
+ b.WriteByte('{')
+ i := 0
+ ls.Range(func(l labels.Label) {
+ if i > 0 {
+ b.WriteByte(',')
+ }
+ b.WriteString(l.Name)
+ b.WriteByte('=')
+ b.WriteString(strconv.Quote(l.Value))
+ i++
+ })
+ b.WriteByte('}')
+ return b.String()
+}
+
+func formatSeriesSetOpenMetrics(ss storage.SeriesSet) error {
+ for ss.Next() {
+ series := ss.At()
+ lbs := series.Labels()
+ metricName := lbs.Get(labels.MetricName)
+ lbs = lbs.DropMetricName()
+ it := series.Iterator(nil)
+ for it.Next() == chunkenc.ValFloat {
+ ts, val := it.At()
+ fmt.Printf("%s%s %g %.3f\n", metricName, CondensedString(lbs), val, float64(ts)/1000)
+ }
+ if it.Err() != nil {
+ return ss.Err()
+ }
}
+ fmt.Println("# EOF")
return nil
}
@@ -780,6 +838,10 @@ func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxB
}
func displayHistogram(dataType string, datas []int, total int) {
+ if len(datas) == 0 {
+ fmt.Printf("%s: N/A\n\n", dataType)
+ return
+ }
slices.Sort(datas)
start, end, step := generateBucket(datas[0], datas[len(datas)-1])
sum := 0
@@ -794,9 +856,9 @@ func displayHistogram(dataType string, datas []int, total int) {
}
avg := sum / len(datas)
fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1])
- maxLeftLen := strconv.Itoa(len(fmt.Sprintf("%d", end)))
- maxRightLen := strconv.Itoa(len(fmt.Sprintf("%d", end+step)))
- maxCountLen := strconv.Itoa(len(fmt.Sprintf("%d", maxCount)))
+ maxLeftLen := strconv.Itoa(len(strconv.Itoa(end)))
+ maxRightLen := strconv.Itoa(len(strconv.Itoa(end + step)))
+ maxCountLen := strconv.Itoa(len(strconv.Itoa(maxCount)))
for bucket, count := range buckets {
percentage := 100.0 * count / total
fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage))
diff --git a/cmd/promtool/tsdb_test.go b/cmd/promtool/tsdb_test.go
index 0f0040cd3..75089b168 100644
--- a/cmd/promtool/tsdb_test.go
+++ b/cmd/promtool/tsdb_test.go
@@ -14,9 +14,20 @@
package main
import (
+ "bytes"
+ "context"
+ "io"
+ "math"
+ "os"
+ "runtime"
+ "strings"
"testing"
+ "time"
"github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/promql/promqltest"
+ "github.com/prometheus/prometheus/tsdb"
)
func TestGenerateBucket(t *testing.T) {
@@ -41,3 +52,144 @@ func TestGenerateBucket(t *testing.T) {
require.Equal(t, tc.step, step)
}
}
+
+// getDumpedSamples dumps samples and returns them.
+func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []string, formatter SeriesSetFormatter) string {
+ t.Helper()
+
+ oldStdout := os.Stdout
+ r, w, _ := os.Pipe()
+ os.Stdout = w
+
+ err := dumpSamples(
+ context.Background(),
+ path,
+ t.TempDir(),
+ mint,
+ maxt,
+ match,
+ formatter,
+ )
+ require.NoError(t, err)
+
+ w.Close()
+ os.Stdout = oldStdout
+
+ var buf bytes.Buffer
+ io.Copy(&buf, r)
+ return buf.String()
+}
+
+func normalizeNewLine(b []byte) []byte {
+ if strings.Contains(runtime.GOOS, "windows") {
+ // We use "/n" while dumping on windows as well.
+ return bytes.ReplaceAll(b, []byte("\r\n"), []byte("\n"))
+ }
+ return b
+}
+
+func TestTSDBDump(t *testing.T) {
+ storage := promqltest.LoadedStorage(t, `
+ load 1m
+ metric{foo="bar", baz="abc"} 1 2 3 4 5
+ heavy_metric{foo="bar"} 5 4 3 2 1
+ heavy_metric{foo="foo"} 5 4 3 2 1
+ `)
+
+ tests := []struct {
+ name string
+ mint int64
+ maxt int64
+ match []string
+ expectedDump string
+ }{
+ {
+ name: "default match",
+ mint: math.MinInt64,
+ maxt: math.MaxInt64,
+ match: []string{"{__name__=~'(?s:.*)'}"},
+ expectedDump: "testdata/dump-test-1.prom",
+ },
+ {
+ name: "same matcher twice",
+ mint: math.MinInt64,
+ maxt: math.MaxInt64,
+ match: []string{"{foo=~'.+'}", "{foo=~'.+'}"},
+ expectedDump: "testdata/dump-test-1.prom",
+ },
+ {
+ name: "no duplication",
+ mint: math.MinInt64,
+ maxt: math.MaxInt64,
+ match: []string{"{__name__=~'(?s:.*)'}", "{baz='abc'}"},
+ expectedDump: "testdata/dump-test-1.prom",
+ },
+ {
+ name: "well merged",
+ mint: math.MinInt64,
+ maxt: math.MaxInt64,
+ match: []string{"{__name__='heavy_metric'}", "{baz='abc'}"},
+ expectedDump: "testdata/dump-test-1.prom",
+ },
+ {
+ name: "multi matchers",
+ mint: math.MinInt64,
+ maxt: math.MaxInt64,
+ match: []string{"{__name__='heavy_metric',foo='foo'}", "{__name__='metric'}"},
+ expectedDump: "testdata/dump-test-2.prom",
+ },
+ {
+ name: "with reduced mint and maxt",
+ mint: int64(60000),
+ maxt: int64(120000),
+ match: []string{"{__name__='metric'}"},
+ expectedDump: "testdata/dump-test-3.prom",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.mint, tt.maxt, tt.match, formatSeriesSet)
+ expectedMetrics, err := os.ReadFile(tt.expectedDump)
+ require.NoError(t, err)
+ expectedMetrics = normalizeNewLine(expectedMetrics)
+ // even though in case of one matcher samples are not sorted, the order in the cases above should stay the same.
+ require.Equal(t, string(expectedMetrics), dumpedMetrics)
+ })
+ }
+}
+
+func TestTSDBDumpOpenMetrics(t *testing.T) {
+ storage := promqltest.LoadedStorage(t, `
+ load 1m
+ my_counter{foo="bar", baz="abc"} 1 2 3 4 5
+ my_gauge{bar="foo", abc="baz"} 9 8 0 4 7
+ `)
+
+ expectedMetrics, err := os.ReadFile("testdata/dump-openmetrics-test.prom")
+ require.NoError(t, err)
+ expectedMetrics = normalizeNewLine(expectedMetrics)
+ dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
+ require.Equal(t, string(expectedMetrics), dumpedMetrics)
+}
+
+func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {
+ initialMetrics, err := os.ReadFile("testdata/dump-openmetrics-roundtrip-test.prom")
+ require.NoError(t, err)
+ initialMetrics = normalizeNewLine(initialMetrics)
+
+ dbDir := t.TempDir()
+ // Import samples from OM format
+ err = backfill(5000, initialMetrics, dbDir, false, false, 2*time.Hour)
+ require.NoError(t, err)
+ db, err := tsdb.Open(dbDir, nil, nil, tsdb.DefaultOptions(), nil)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, db.Close())
+ })
+
+ // Dump the blocks into OM format
+ dumpedMetrics := getDumpedSamples(t, dbDir, math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
+
+ // Should get back the initial metrics.
+ require.Equal(t, string(initialMetrics), dumpedMetrics)
+}
diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go
index a25a8596d..5451c5296 100644
--- a/cmd/promtool/unittest.go
+++ b/cmd/promtool/unittest.go
@@ -15,18 +15,20 @@ package main
import (
"context"
+ "encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
- "reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/go-kit/log"
+ "github.com/google/go-cmp/cmp"
"github.com/grafana/regexp"
+ "github.com/nsf/jsondiff"
"github.com/prometheus/common/model"
"gopkg.in/yaml.v2"
@@ -34,13 +36,14 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
+ "github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/storage"
)
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
// More info about the file format can be found in the docs.
-func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, files ...string) int {
+func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
failed := false
var run *regexp.Regexp
@@ -49,7 +52,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, files .
}
for _, f := range files {
- if errs := ruleUnitTest(f, queryOpts, run); errs != nil {
+ if errs := ruleUnitTest(f, queryOpts, run, diffFlag); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
@@ -67,7 +70,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, files .
return successExitCode
}
-func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.Regexp) []error {
+func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error {
fmt.Println("Unit Testing: ", filename)
b, err := os.ReadFile(filename)
@@ -109,7 +112,7 @@ func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.
if t.Interval == 0 {
t.Interval = unitTestInp.EvaluationInterval
}
- ers := t.test(evalInterval, groupOrderMap, queryOpts, unitTestInp.RuleFiles...)
+ ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...)
if ers != nil {
errs = append(errs, ers...)
}
@@ -173,13 +176,18 @@ type testGroup struct {
}
// test performs the unit tests.
-func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, ruleFiles ...string) []error {
+func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) {
// Setup testing suite.
- suite, err := promql.NewLazyLoader(nil, tg.seriesLoadingString(), queryOpts)
+ suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
if err != nil {
return []error{err}
}
- defer suite.Close()
+ defer func() {
+ err := suite.Close()
+ if err != nil {
+ outErr = append(outErr, err)
+ }
+ }()
suite.SubqueryInterval = evalInterval
// Load the rule files.
@@ -338,15 +346,51 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
sort.Sort(gotAlerts)
sort.Sort(expAlerts)
- if !reflect.DeepEqual(expAlerts, gotAlerts) {
+ if !cmp.Equal(expAlerts, gotAlerts, cmp.Comparer(labels.Equal)) {
var testName string
if tg.TestGroupName != "" {
testName = fmt.Sprintf(" name: %s,\n", tg.TestGroupName)
}
expString := indentLines(expAlerts.String(), " ")
gotString := indentLines(gotAlerts.String(), " ")
- errs = append(errs, fmt.Errorf("%s alertname: %s, time: %s, \n exp:%v, \n got:%v",
- testName, testcase.Alertname, testcase.EvalTime.String(), expString, gotString))
+ if diffFlag {
+ // If empty, populates an empty value
+ if gotAlerts.Len() == 0 {
+ gotAlerts = append(gotAlerts, labelAndAnnotation{
+ Labels: labels.Labels{},
+ Annotations: labels.Labels{},
+ })
+ }
+ // If empty, populates an empty value
+ if expAlerts.Len() == 0 {
+ expAlerts = append(expAlerts, labelAndAnnotation{
+ Labels: labels.Labels{},
+ Annotations: labels.Labels{},
+ })
+ }
+
+ diffOpts := jsondiff.DefaultConsoleOptions()
+ expAlertsJSON, err := json.Marshal(expAlerts)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("error marshaling expected %s alert: [%s]", tg.TestGroupName, err.Error()))
+ continue
+ }
+
+ gotAlertsJSON, err := json.Marshal(gotAlerts)
+ if err != nil {
+ errs = append(errs, fmt.Errorf("error marshaling received %s alert: [%s]", tg.TestGroupName, err.Error()))
+ continue
+ }
+
+ res, diff := jsondiff.Compare(expAlertsJSON, gotAlertsJSON, &diffOpts)
+ if res != jsondiff.FullMatch {
+ errs = append(errs, fmt.Errorf("%s alertname: %s, time: %s, \n diff: %v",
+ testName, testcase.Alertname, testcase.EvalTime.String(), indentLines(diff, " ")))
+ }
+ } else {
+ errs = append(errs, fmt.Errorf("%s alertname: %s, time: %s, \n exp:%v, \n got:%v",
+ testName, testcase.Alertname, testcase.EvalTime.String(), expString, gotString))
+ }
}
}
@@ -370,7 +414,7 @@ Outer:
gotSamples = append(gotSamples, parsedSample{
Labels: s.Metric.Copy(),
Value: s.F,
- Histogram: promql.HistogramTestExpression(s.H),
+ Histogram: promqltest.HistogramTestExpression(s.H),
})
}
@@ -400,7 +444,7 @@ Outer:
expSamples = append(expSamples, parsedSample{
Labels: lb,
Value: s.Value,
- Histogram: promql.HistogramTestExpression(hist),
+ Histogram: promqltest.HistogramTestExpression(hist),
})
}
@@ -410,7 +454,7 @@ Outer:
sort.Slice(gotSamples, func(i, j int) bool {
return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0
})
- if !reflect.DeepEqual(expSamples, gotSamples) {
+ if !cmp.Equal(expSamples, gotSamples, cmp.Comparer(labels.Equal)) {
errs = append(errs, fmt.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr,
testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples)))
}
@@ -529,7 +573,7 @@ func (la labelsAndAnnotations) String() string {
}
s := "[\n0:" + indentLines("\n"+la[0].String(), " ")
for i, l := range la[1:] {
- s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ")
+ s += ",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " ")
}
s += "\n]"
diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go
index fb4012e3c..2dbd5a4e5 100644
--- a/cmd/promtool/unittest_test.go
+++ b/cmd/promtool/unittest_test.go
@@ -16,7 +16,9 @@ package main
import (
"testing"
- "github.com/prometheus/prometheus/promql"
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/promql/promqltest"
)
func TestRulesUnitTest(t *testing.T) {
@@ -26,7 +28,7 @@ func TestRulesUnitTest(t *testing.T) {
tests := []struct {
name string
args args
- queryOpts promql.LazyLoaderOpts
+ queryOpts promqltest.LazyLoaderOpts
want int
}{
{
@@ -90,7 +92,7 @@ func TestRulesUnitTest(t *testing.T) {
args: args{
files: []string{"./testdata/at-modifier-test.yml"},
},
- queryOpts: promql.LazyLoaderOpts{
+ queryOpts: promqltest.LazyLoaderOpts{
EnableAtModifier: true,
},
want: 0,
@@ -107,7 +109,7 @@ func TestRulesUnitTest(t *testing.T) {
args: args{
files: []string{"./testdata/negative-offset-test.yml"},
},
- queryOpts: promql.LazyLoaderOpts{
+ queryOpts: promqltest.LazyLoaderOpts{
EnableNegativeOffset: true,
},
want: 0,
@@ -117,7 +119,7 @@ func TestRulesUnitTest(t *testing.T) {
args: args{
files: []string{"./testdata/no-test-group-interval.yml"},
},
- queryOpts: promql.LazyLoaderOpts{
+ queryOpts: promqltest.LazyLoaderOpts{
EnableNegativeOffset: true,
},
want: 0,
@@ -125,7 +127,7 @@ func TestRulesUnitTest(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- if got := RulesUnitTest(tt.queryOpts, nil, tt.args.files...); got != tt.want {
+ if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want {
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
}
})
@@ -140,7 +142,7 @@ func TestRulesUnitTestRun(t *testing.T) {
tests := []struct {
name string
args args
- queryOpts promql.LazyLoaderOpts
+ queryOpts promqltest.LazyLoaderOpts
want int
}{
{
@@ -178,9 +180,8 @@ func TestRulesUnitTestRun(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- if got := RulesUnitTest(tt.queryOpts, tt.args.run, tt.args.files...); got != tt.want {
- t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
- }
+ got := RulesUnitTest(tt.queryOpts, tt.args.run, false, tt.args.files...)
+ require.Equal(t, tt.want, got)
})
}
}
diff --git a/config/config.go b/config/config.go
index ddcca84dc..1cfd58864 100644
--- a/config/config.go
+++ b/config/config.go
@@ -82,7 +82,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
return cfg, nil
}
- b := labels.ScratchBuilder{}
+ b := labels.NewScratchBuilder(0)
cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) {
newV := os.Expand(v.Value, func(s string) string {
if s == "$" {
@@ -97,6 +97,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
if newV != v.Value {
level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV)
}
+ // Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024
b.Add(v.Name, newV)
})
cfg.GlobalConfig.ExternalLabels = b.Labels()
@@ -610,9 +611,12 @@ type ScrapeConfig struct {
// More than this label value length post metric-relabeling will cause the
// scrape to fail. 0 means no limit.
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
- // More than this many buckets in a native histogram will cause the scrape to
- // fail.
+ // If there are more than this many buckets in a native histogram,
+ // buckets will be merged to stay within the limit.
NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"`
+ // If the growth factor of one bucket to the next is smaller than this,
+ // buckets will be merged to increase the factor sufficiently.
+ NativeHistogramMinBucketFactor float64 `yaml:"native_histogram_min_bucket_factor,omitempty"`
// Keep no more than this many dropped targets per job.
// 0 means no limit.
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
@@ -937,6 +941,8 @@ type AlertmanagerConfig struct {
// List of Alertmanager relabel configurations.
RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"`
+ // Relabel alerts before sending to the specific alertmanager.
+ AlertRelabelConfigs []*relabel.Config `yaml:"alert_relabel_configs,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
@@ -979,6 +985,12 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
}
}
+ for _, rlcfg := range c.AlertRelabelConfigs {
+ if rlcfg == nil {
+ return errors.New("empty or null Alertmanager alert relabeling rule")
+ }
+ }
+
return nil
}
@@ -1124,6 +1136,9 @@ type QueueConfig struct {
MinBackoff model.Duration `yaml:"min_backoff,omitempty"`
MaxBackoff model.Duration `yaml:"max_backoff,omitempty"`
RetryOnRateLimit bool `yaml:"retry_on_http_429,omitempty"`
+
+ // Samples older than the limit will be dropped.
+ SampleAgeLimit model.Duration `yaml:"sample_age_limit,omitempty"`
}
// MetadataConfig is the configuration for sending metadata to remote
diff --git a/config/config_default_test.go b/config/config_default_test.go
index f5333f4c8..26623590d 100644
--- a/config/config_default_test.go
+++ b/config/config_default_test.go
@@ -12,7 +12,6 @@
// limitations under the License.
//go:build !windows
-// +build !windows
package config
diff --git a/config/config_test.go b/config/config_test.go
index 5d753a0f7..14981d25f 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -58,6 +58,7 @@ import (
"github.com/prometheus/prometheus/discovery/zookeeper"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
+ "github.com/prometheus/prometheus/util/testutil"
)
func mustParseURL(u string) *config.URL {
@@ -568,6 +569,7 @@ var expectedConf = &Config{
ServiceDiscoveryConfigs: discovery.Configs{
&xds.KumaSDConfig{
Server: "http://kuma-control-plane.kuma-system.svc:5676",
+ ClientID: "main-prometheus",
HTTPClientConfig: config.DefaultHTTPClientConfig,
RefreshInterval: model.Duration(15 * time.Second),
FetchTimeout: model.Duration(2 * time.Minute),
@@ -1838,7 +1840,7 @@ var expectedErrors = []struct {
},
{
filename: "azure_authentication_method.bad.yml",
- errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\" or \"ManagedIdentity\"",
+ errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\", \"ManagedIdentity\" or \"SDK\"",
},
{
filename: "azure_bearertoken_basicauth.bad.yml",
@@ -2036,16 +2038,16 @@ func TestExpandExternalLabels(t *testing.T) {
c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger())
require.NoError(t, err)
- require.Equal(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels)
+ testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels)
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
require.NoError(t, err)
- require.Equal(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
+ testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
os.Setenv("TEST", "TestValue")
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
require.NoError(t, err)
- require.Equal(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
+ testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
}
func TestAgentMode(t *testing.T) {
diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml
index e034eff43..b58430164 100644
--- a/config/testdata/conf.good.yml
+++ b/config/testdata/conf.good.yml
@@ -221,6 +221,7 @@ scrape_configs:
kuma_sd_configs:
- server: http://kuma-control-plane.kuma-system.svc:5676
+ client_id: main-prometheus
- job_name: service-marathon
marathon_sd_configs:
diff --git a/config/testdata/roundtrip.good.yml b/config/testdata/roundtrip.good.yml
index f2634d257..24ab7d259 100644
--- a/config/testdata/roundtrip.good.yml
+++ b/config/testdata/roundtrip.good.yml
@@ -108,6 +108,7 @@ scrape_configs:
kuma_sd_configs:
- server: http://kuma-control-plane.kuma-system.svc:5676
+ client_id: main-prometheus
marathon_sd_configs:
- servers:
diff --git a/consoles/node-cpu.html b/consoles/node-cpu.html
index d6c515d2d..284ad738f 100644
--- a/consoles/node-cpu.html
+++ b/consoles/node-cpu.html
@@ -47,7 +47,7 @@