diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index a6df329be..b75a7896e 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -2,6 +2,6 @@
/web/ui/module @juliusv @nexucis
/storage/remote @csmarchbanks @cstyan @bwplotka @tomwilkie
/discovery/kubernetes @brancz
-/tsdb @codesome
-/promql @codesome @roidelapluie
+/tsdb @jesusvazquez
+/promql @roidelapluie
/cmd/promtool @dgl
diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml
index 5d93c7dfe..a72837b79 100644
--- a/.github/workflows/buf-lint.yml
+++ b/.github/workflows/buf-lint.yml
@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- - uses: bufbuild/buf-setup-action@v1.13.1
+ - uses: bufbuild/buf-setup-action@v1.20.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@v1
diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml
index 567ecc000..edb7936d7 100644
--- a/.github/workflows/buf.yml
+++ b/.github/workflows/buf.yml
@@ -10,7 +10,7 @@ jobs:
if: github.repository_owner == 'prometheus'
steps:
- uses: actions/checkout@v3
- - uses: bufbuild/buf-setup-action@v1.13.1
+ - uses: bufbuild/buf-setup-action@v1.20.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@v1
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d6d79b6fc..c0bccd0ef 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -3,6 +3,7 @@ name: CI
on:
pull_request:
push:
+
jobs:
test_go:
name: Go tests
@@ -10,10 +11,10 @@ jobs:
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
container:
- image: quay.io/prometheus/golang-builder:1.19-base
+ image: quay.io/prometheus/golang-builder:1.20-base
steps:
- uses: actions/checkout@v3
- - uses: prometheus/promci@v0.0.2
+ - uses: prometheus/promci@v0.1.0
- uses: ./.github/promci/actions/setup_environment
- run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1
- run: go test ./tsdb/ -test.tsdb-isolation=false
@@ -31,11 +32,11 @@ jobs:
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
container:
- image: quay.io/prometheus/golang-builder:1.19-base
+ image: quay.io/prometheus/golang-builder:1.20-base
steps:
- uses: actions/checkout@v3
- - uses: prometheus/promci@v0.0.2
+ - uses: prometheus/promci@v0.1.0
- uses: ./.github/promci/actions/setup_environment
with:
enable_go: false
@@ -52,9 +53,9 @@ jobs:
runs-on: windows-latest
steps:
- uses: actions/checkout@v3
- - uses: actions/setup-go@v3
+ - uses: actions/setup-go@v4
with:
- go-version: '>=1.19 <1.20'
+ go-version: '>=1.20 <1.21'
- run: |
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
go test $TestTargets -vet=off -v
@@ -65,7 +66,7 @@ jobs:
runs-on: ubuntu-latest
# The go verson in this image should be N-1 wrt test_go.
container:
- image: quay.io/prometheus/golang-builder:1.18-base
+ image: quay.io/prometheus/golang-builder:1.19-base
steps:
- uses: actions/checkout@v3
- run: make build
@@ -104,7 +105,7 @@ jobs:
thread: [ 0, 1, 2 ]
steps:
- uses: actions/checkout@v3
- - uses: prometheus/promci@v0.0.2
+ - uses: prometheus/promci@v0.1.0
- uses: ./.github/promci/actions/build
with:
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
@@ -127,7 +128,7 @@ jobs:
# should also be updated.
steps:
- uses: actions/checkout@v3
- - uses: prometheus/promci@v0.0.2
+ - uses: prometheus/promci@v0.1.0
- uses: ./.github/promci/actions/build
with:
parallelism: 12
@@ -139,7 +140,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v3
- name: Install Go
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v4
with:
go-version: 1.20.x
- name: Install snmp_exporter/generator dependencies
@@ -148,7 +149,8 @@ jobs:
- name: Lint
uses: golangci/golangci-lint-action@v3.4.0
with:
- version: v1.51.2
+ args: --verbose
+ version: v1.53.3
fuzzing:
uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request'
@@ -162,7 +164,7 @@ jobs:
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v3
- - uses: prometheus/promci@v0.0.2
+ - uses: prometheus/promci@v0.1.0
- uses: ./.github/promci/actions/publish_main
with:
docker_hub_login: ${{ secrets.docker_hub_login }}
@@ -176,7 +178,7 @@ jobs:
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
steps:
- uses: actions/checkout@v3
- - uses: prometheus/promci@v0.0.2
+ - uses: prometheus/promci@v0.1.0
- uses: ./.github/promci/actions/publish_release
with:
docker_hub_login: ${{ secrets.docker_hub_login }}
@@ -191,13 +193,13 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
- - uses: prometheus/promci@v0.0.2
+ - uses: prometheus/promci@v0.1.0
- name: Install nodejs
uses: actions/setup-node@v3
with:
node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org"
- - uses: actions/cache@v3.2.4
+ - uses: actions/cache@v3.3.1
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 01075f0c2..6036e80ae 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -21,9 +21,9 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v3
- - uses: actions/setup-go@v3
+ - uses: actions/setup-go@v4
with:
- go-version: '>=1.19 <1.20'
+ go-version: '>=1.20 <1.21'
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
diff --git a/.golangci.yml b/.golangci.yml
index 81790e6e3..4a6daae59 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,5 +1,5 @@
run:
- deadline: 5m
+ timeout: 15m
skip-files:
# Skip autogenerated files.
- ^.*\.(pb|y)\.go$
@@ -10,31 +10,60 @@ output:
linters:
enable:
- depguard
+ - gocritic
- gofumpt
- goimports
- - revive
- misspell
+ - predeclared
+ - revive
+ - unconvert
+ - unused
issues:
max-same-issues: 0
exclude-rules:
+ - linters:
+ - gocritic
+ text: "appendAssign"
- path: _test.go
linters:
- errcheck
linters-settings:
depguard:
- list-type: blacklist
- include-go-root: true
- packages-with-error-message:
- - sync/atomic: "Use go.uber.org/atomic instead of sync/atomic"
- - github.com/stretchr/testify/assert: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
- - github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
- - io/ioutil: "Use corresponding 'os' or 'io' functions instead."
- - regexp: "Use github.com/grafana/regexp instead of regexp"
+ rules:
+ main:
+ deny:
+ - pkg: "sync/atomic"
+ desc: "Use go.uber.org/atomic instead of sync/atomic"
+ - pkg: "github.com/stretchr/testify/assert"
+ desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
+ - pkg: "github.com/go-kit/kit/log"
+ desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
+ - pkg: "io/ioutil"
+ desc: "Use corresponding 'os' or 'io' functions instead."
+ - pkg: "regexp"
+ desc: "Use github.com/grafana/regexp instead of regexp"
errcheck:
- exclude: scripts/errcheck_excludes.txt
+ exclude-functions:
+ # Don't flag lines such as "io.Copy(io.Discard, resp.Body)".
+ - io.Copy
+ # The next two are used in HTTP handlers, any error is handled by the server itself.
+ - io.WriteString
+ - (net/http.ResponseWriter).Write
+ # No need to check for errors on server's shutdown.
+ - (*net/http.Server).Shutdown
+ # Never check for logger errors.
+ - (github.com/go-kit/log.Logger).Log
+ # Never check for rollback errors as Rollback() is called when a previous error was detected.
+ - (github.com/prometheus/prometheus/storage.Appender).Rollback
goimports:
local-prefixes: github.com/prometheus/prometheus
gofumpt:
extra-rules: true
+ revive:
+ rules:
+ # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter
+ - name: unused-parameter
+ severity: warning
+ disabled: true
diff --git a/.promu.yml b/.promu.yml
index 233295f85..f724dc34f 100644
--- a/.promu.yml
+++ b/.promu.yml
@@ -1,7 +1,7 @@
go:
# Whenever the Go version is updated here,
# .circle/config.yml should also be updated.
- version: 1.19
+ version: 1.20
repository:
path: github.com/prometheus/prometheus
build:
@@ -14,8 +14,10 @@ build:
all:
- netgo
- builtinassets
+ - stringlabels
windows:
- builtinassets
+ - stringlabels
flags: -a
ldflags: |
-X github.com/prometheus/common/version.Version={{.Version}}
diff --git a/.yamllint b/.yamllint.yml
similarity index 90%
rename from .yamllint
rename to .yamllint.yml
index 19552574b..955a5a627 100644
--- a/.yamllint
+++ b/.yamllint.yml
@@ -20,5 +20,4 @@ rules:
config/testdata/section_key_dup.bad.yml
line-length: disable
truthy:
- ignore: |
- .github/workflows/*.yml
+ check-keys: false
diff --git a/CHANGELOG.md b/CHANGELOG.md
index cccbae7dd..d316e84d3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,54 @@
# Changelog
-## 2.43.0-rc.0 / 2023-03-09
+
+## 2.45.0 / 2023-06-23
+
+This release is a LTS (Long-Term Support) release of Prometheus and will
+receive security, documentation and bugfix patches for at least 12 months.
+Please read more about our LTS release cycle at
+.
+
+* [FEATURE] API: New limit parameter to limit the number of items returned by `/api/v1/status/tsdb` endpoint. #12336
+* [FEATURE] Config: Add limits to global config. #12126
+* [FEATURE] Consul SD: Added support for `path_prefix`. #12372
+* [FEATURE] Native histograms: Add option to scrape both classic and native histograms. #12350
+* [FEATURE] Native histograms: Added support for two more arithmetic operators `avg_over_time` and `sum_over_time`. #12262
+* [FEATURE] Promtool: When providing the block id, only one block will be loaded and analyzed. #12031
+* [FEATURE] Remote-write: New Azure ad configuration to support remote writing directly to Azure Monitor workspace. #11944
+* [FEATURE] TSDB: Samples per chunk are now configurable with flag `storage.tsdb.samples-per-chunk`. By default set to its former value 120. #12055
+* [ENHANCEMENT] Native histograms: bucket size can now be limited to avoid scrape fails. #12254
+* [ENHANCEMENT] TSDB: Dropped series are now deleted from the WAL sooner. #12297
+* [BUGFIX] Native histograms: ChunkSeries iterator now checks if a new sample can be appended to the open chunk. #12185
+* [BUGFIX] Native histograms: Fix Histogram Appender `Appendable()` segfault. #12357
+* [BUGFIX] Native histograms: Fix setting reset header to gauge histograms in seriesToChunkEncoder. #12329
+* [BUGFIX] TSDB: Tombstone intervals are not modified after Get() call. #12245
+* [BUGFIX] TSDB: Use path/filepath to set the WAL directory. #12349
+
+## 2.44.0 / 2023-05-13
+
+This version is built with Go tag `stringlabels`, to use the smaller data
+structure for Labels that was optional in the previous release. For more
+details about this code change see #10991.
+
+* [CHANGE] Remote-write: Raise default samples per send to 2,000. #12203
+* [FEATURE] Remote-read: Handle native histograms. #12085, #12192
+* [FEATURE] Promtool: Health and readiness check of prometheus server in CLI. #12096
+* [FEATURE] PromQL: Add `query_samples_total` metric, the total number of samples loaded by all queries. #12251
+* [ENHANCEMENT] Storage: Optimise buffer used to iterate through samples. #12326
+* [ENHANCEMENT] Scrape: Reduce memory allocations on target labels. #12084
+* [ENHANCEMENT] PromQL: Use faster heap method for `topk()` / `bottomk()`. #12190
+* [ENHANCEMENT] Rules API: Allow filtering by rule name. #12270
+* [ENHANCEMENT] Native Histograms: Various fixes and improvements. #11687, #12264, #12272
+* [ENHANCEMENT] UI: Search of scraping pools is now case-insensitive. #12207
+* [ENHANCEMENT] TSDB: Add an affirmative log message for successful WAL repair. #12135
+* [BUGFIX] TSDB: Block compaction failed when shutting down. #12179
+* [BUGFIX] TSDB: Out-of-order chunks could be ignored if the write-behind log was deleted. #12127
+
+## 2.43.1 / 2023-05-03
+
+* [BUGFIX] Labels: `Set()` after `Del()` would be ignored, which broke some relabeling rules. #12322
+
+## 2.43.0 / 2023-03-21
We are working on some performance improvements in Prometheus, which are only
built into Prometheus when compiling it using the Go tag `stringlabels`
@@ -8,14 +56,14 @@ built into Prometheus when compiling it using the Go tag `stringlabels`
structure for labels that uses a single string to hold all the label/values,
resulting in a smaller heap size and some speedups in most cases. We would like
to encourage users who are interested in these improvements to help us measure
-the gains on their production architecture. Building Prometheus from source
-with the `stringlabels` Go tag and providing feedback on its effectiveness in
-their specific use cases would be incredibly helpful to us. #10991
+the gains on their production architecture. We are providing release artefacts
+`2.43.0+stringlabels` and Docker images tagged `v2.43.0-stringlabels` with those
+improvements for testing. #10991
* [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487
-* [FEATURE] Scrape: Add `include_scrape_configs` to include scrape configs from different files. #12019
+* [FEATURE] Scrape: Add `scrape_config_files` to include scrape configs from different files. #12019
* [FEATURE] HTTP client: Add `no_proxy` to exclude URLs from proxied requests. #12098
-* [FEATURE] HTTP client: Add `proxy_from_enviroment` to read proxies from env variables. #12098
+* [FEATURE] HTTP client: Add `proxy_from_environment` to read proxies from env variables. #12098
* [ENHANCEMENT] API: Add support for setting lookback delta per query via the API. #12088
* [ENHANCEMENT] API: Change HTTP status code from 503/422 to 499 if a request is canceled. #11897
* [ENHANCEMENT] Scrape: Allow exemplars for all metric types. #11984
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 2fbed3880..57055ef38 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -85,7 +85,7 @@ The PromQL parser grammar is located in `promql/parser/generated_parser.y` and i
The parser is built using [goyacc](https://pkg.go.dev/golang.org/x/tools/cmd/goyacc)
If doing some sort of debugging, then it is possible to add some verbose output. After generating the parser, then you
-can modify the the `./promql/parser/generated_parser.y.go` manually.
+can modify the `./promql/parser/generated_parser.y.go` manually.
```golang
// As of writing this was somewhere around line 600.
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index 103ebdda6..1175bb9a6 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -10,7 +10,7 @@ Julien Pivotto ( / @roidelapluie) and Levi Harrison
* `prometheus-mixin`: Björn Rabenstein ( / @beorn7)
* `storage`
* `remote`: Chris Marchbanks ( / @csmarchbanks), Callum Styan ( / @cstyan), Bartłomiej Płotka ( / @bwplotka), Tom Wilkie ( / @tomwilkie)
-* `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka)
+* `tsdb`: Ganesh Vernekar ( / @codesome), Bartłomiej Płotka ( / @bwplotka), Jesús Vázquez ( / @jesusvazquez)
* `agent`: Robert Fratto ( / @rfratto)
* `web`
* `ui`: Julius Volz ( / @juliusv)
diff --git a/Makefile b/Makefile
index 3877ee719..0dd8673af 100644
--- a/Makefile
+++ b/Makefile
@@ -82,7 +82,7 @@ assets-tarball: assets
.PHONY: parser
parser:
@echo ">> running goyacc to generate the .go file."
-ifeq (, $(shell which goyacc))
+ifeq (, $(shell command -v goyacc > /dev/null))
@echo "goyacc not installed so skipping"
@echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0"
else
diff --git a/Makefile.common b/Makefile.common
index 6d8007c95..787feff08 100644
--- a/Makefile.common
+++ b/Makefile.common
@@ -49,7 +49,7 @@ endif
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
-ifneq ($(shell which gotestsum),)
+ifneq ($(shell command -v gotestsum > /dev/null),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
@@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.51.2
+GOLANGCI_LINT_VERSION ?= v1.53.3
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
+SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
+
ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64
@@ -176,7 +178,7 @@ endif
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
-ifeq (, $(shell which yamllint))
+ifeq (, $(shell command -v yamllint > /dev/null))
@echo "yamllint not installed so skipping"
else
yamllint .
@@ -205,7 +207,7 @@ common-tarball: promu
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
- docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
+ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
@@ -214,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
- docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
+ docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest
common-docker-manifest:
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
.PHONY: promu
promu: $(PROMU)
diff --git a/README.md b/README.md
index 9becf71aa..8b89bb01e 100644
--- a/README.md
+++ b/README.md
@@ -34,7 +34,7 @@ The features that distinguish Prometheus from other metrics and monitoring syste
## Architecture overview
-![Architecture overview](https://cdn.jsdelivr.net/gh/prometheus/prometheus@c34257d069c630685da35bcef084632ffd5d6209/documentation/images/architecture.svg)
+![Architecture overview](documentation/images/architecture.svg)
## Install
diff --git a/RELEASE.md b/RELEASE.md
index 26f72ba62..0d0918191 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -49,7 +49,10 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) |
| v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) |
| v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) |
-| v2.45 | 2023-05-31 | **searching for volunteer** |
+| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) |
+| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) |
+| v2.47 | 2023-08-23 | **searching for volunteer** |
+| v2.48 | 2023-10-04 | **searching for volunteer** |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
diff --git a/VERSION b/VERSION
index 1a38590b2..e599014ea 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.43.0-rc.0
+2.45.0
diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go
index f4f6af20d..3d723f152 100644
--- a/cmd/prometheus/main.go
+++ b/cmd/prometheus/main.go
@@ -12,6 +12,7 @@
// limitations under the License.
// The main package for the Prometheus server executable.
+// nolint:revive // Many unsued function arguments in this file by design.
package main
import (
@@ -336,6 +337,9 @@ func main() {
serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental.").
Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize)
+ serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk.").
+ Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk)
+
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
Default("data-agent/").StringVar(&cfg.agentStoragePath)
@@ -425,7 +429,7 @@ func main() {
_, err := a.Parse(os.Args[1:])
if err != nil {
- fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err))
+ fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing command line arguments: %w", err))
a.Usage(os.Args[1:])
os.Exit(2)
}
@@ -490,7 +494,7 @@ func main() {
if cfgFile.StorageConfig.ExemplarsConfig == nil {
cfgFile.StorageConfig.ExemplarsConfig = &config.DefaultExemplarsConfig
}
- cfg.tsdb.MaxExemplars = int64(cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars)
+ cfg.tsdb.MaxExemplars = cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars
}
if cfgFile.StorageConfig.TSDBConfig != nil {
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
@@ -1049,6 +1053,7 @@ func main() {
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
localStorage.Set(db, startTimeMargin)
+ db.SetWriteNotified(remoteStorage)
close(dbOpen)
<-cancel
return nil
@@ -1481,11 +1486,11 @@ func (s *readyStorage) Snapshot(dir string, withHead bool) error {
}
// Stats implements the api_v1.TSDBAdminStats interface.
-func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) {
+func (s *readyStorage) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) {
if x := s.get(); x != nil {
switch db := x.(type) {
case *tsdb.DB:
- return db.Head().Stats(statsByLabelName), nil
+ return db.Head().Stats(statsByLabelName, limit), nil
case *agent.DB:
return nil, agent.ErrUnsupported
default:
@@ -1542,6 +1547,7 @@ type tsdbOptions struct {
NoLockfile bool
WALCompression bool
HeadChunksWriteQueueSize int
+ SamplesPerChunk int
StripeSize int
MinBlockDuration model.Duration
MaxBlockDuration model.Duration
@@ -1562,6 +1568,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
AllowOverlappingCompaction: true,
WALCompression: opts.WALCompression,
HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize,
+ SamplesPerChunk: opts.SamplesPerChunk,
StripeSize: opts.StripeSize,
MinBlockDuration: int64(time.Duration(opts.MinBlockDuration) / time.Millisecond),
MaxBlockDuration: int64(time.Duration(opts.MaxBlockDuration) / time.Millisecond),
diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go
index 26d11e21e..21447d036 100644
--- a/cmd/prometheus/main_test.go
+++ b/cmd/prometheus/main_test.go
@@ -121,7 +121,7 @@ func TestFailedStartupExitCode(t *testing.T) {
fakeInputFile := "fake-input-file"
expectedExitStatus := 2
- prom := exec.Command(promPath, "-test.main", "--config.file="+fakeInputFile)
+ prom := exec.Command(promPath, "-test.main", "--web.listen-address=0.0.0.0:0", "--config.file="+fakeInputFile)
err := prom.Run()
require.Error(t, err)
@@ -358,7 +358,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
}
func TestAgentSuccessfulStartup(t *testing.T) {
- prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+agentConfig)
+ prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
require.NoError(t, prom.Start())
actualExitStatus := 0
@@ -376,7 +376,7 @@ func TestAgentSuccessfulStartup(t *testing.T) {
}
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
- prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--config.file="+promConfig)
+ prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
output := bytes.Buffer{}
prom.Stderr = &output
@@ -403,7 +403,7 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) {
}
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
- prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+promConfig)
+ prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
require.NoError(t, prom.Start())
actualExitStatus := 0
@@ -438,7 +438,7 @@ func TestModeSpecificFlags(t *testing.T) {
for _, tc := range testcases {
t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
- args := []string{"-test.main", tc.arg, t.TempDir()}
+ args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"}
if tc.mode == "agent" {
args = append(args, "--enable-feature=agent", "--config.file="+agentConfig)
diff --git a/cmd/prometheus/main_unix_test.go b/cmd/prometheus/main_unix_test.go
index b49110ea9..7224e25d7 100644
--- a/cmd/prometheus/main_unix_test.go
+++ b/cmd/prometheus/main_unix_test.go
@@ -72,9 +72,11 @@ Loop:
if !startedOk {
t.Fatal("prometheus didn't start in the specified timeout")
}
- if err := prom.Process.Kill(); err == nil {
+ switch err := prom.Process.Kill(); {
+ case err == nil:
t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal")
- } else if stoppedErr != nil && stoppedErr.Error() != "signal: interrupt" { // TODO - find a better way to detect when the process didn't exit as expected!
+ case stoppedErr != nil && stoppedErr.Error() != "signal: interrupt":
+ // TODO: find a better way to detect when the process didn't exit as expected!
t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr)
}
}
diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go
index d5dfbea50..f20f2a22c 100644
--- a/cmd/prometheus/query_log_test.go
+++ b/cmd/prometheus/query_log_test.go
@@ -193,7 +193,7 @@ func (p *queryLogTest) String() string {
}
name = name + ", " + p.host + ":" + strconv.Itoa(p.port)
if p.enabledAtStart {
- name = name + ", enabled at start"
+ name += ", enabled at start"
}
if p.prefix != "" {
name = name + ", with prefix " + p.prefix
diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go
index 3c23d2c03..39410881b 100644
--- a/cmd/promtool/backfill.go
+++ b/cmd/promtool/backfill.go
@@ -101,7 +101,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
nextSampleTs int64 = math.MaxInt64
)
- for t := mint; t <= maxt; t = t + blockDuration {
+ for t := mint; t <= maxt; t += blockDuration {
tsUpper := t + blockDuration
if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper {
// The next sample is not in this timerange, we can avoid parsing
diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go
index 2c551abeb..e6f7cad31 100644
--- a/cmd/promtool/backfill_test.go
+++ b/cmd/promtool/backfill_test.go
@@ -44,7 +44,7 @@ func sortSamples(samples []backfillSample) {
})
}
-func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample {
+func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample { // nolint:revive
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
samples := []backfillSample{}
for ss.Next() {
diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go
index 3988957ef..f94be8b27 100644
--- a/cmd/promtool/main.go
+++ b/cmd/promtool/main.go
@@ -71,6 +71,8 @@ const (
lintOptionAll = "all"
lintOptionDuplicateRules = "duplicate-rules"
lintOptionNone = "none"
+ checkHealth = "/-/healthy"
+ checkReadiness = "/-/ready"
)
var lintOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone}
@@ -79,6 +81,7 @@ func main() {
var (
httpRoundTripper = api.DefaultRoundTripper
serverURL *url.URL
+ remoteWriteURL *url.URL
httpConfigFilePath string
)
@@ -113,11 +116,19 @@ func main() {
"The config files to check.",
).Required().ExistingFiles()
+ checkServerHealthCmd := checkCmd.Command("healthy", "Check if the Prometheus server is healthy.")
+ checkServerHealthCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
+ checkServerHealthCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL)
+
+ checkServerReadyCmd := checkCmd.Command("ready", "Check if the Prometheus server is ready.")
+ checkServerReadyCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
+ checkServerReadyCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL)
+
checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.")
ruleFiles := checkRulesCmd.Arg(
"rule-files",
- "The rule files to check.",
- ).Required().ExistingFiles()
+ "The rule files to check, default is read from standard input.",
+ ).ExistingFiles()
checkRulesLint := checkRulesCmd.Flag(
"lint",
"Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting",
@@ -168,6 +179,18 @@ func main() {
queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String()
queryLabelsMatch := queryLabelsCmd.Flag("match", "Series selector. Can be specified multiple times.").Strings()
+ pushCmd := app.Command("push", "Push to a Prometheus server.")
+ pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("").ExistingFileVar(&httpConfigFilePath)
+ pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).")
+ pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&remoteWriteURL)
+ metricFiles := pushMetricsCmd.Arg(
+ "metric-files",
+ "The metric files to push, default is read from standard input.",
+ ).ExistingFiles()
+ pushMetricsLabels := pushMetricsCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times.").Default("job=promtool").StringMap()
+ pushMetricsTimeout := pushMetricsCmd.Flag("timeout", "The time to wait for pushing metrics.").Default("30s").Duration()
+ pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap()
+
testCmd := app.Command("test", "Unit testing.")
testRulesCmd := testCmd.Command("rules", "Unit tests for rules.")
testRulesFiles := testRulesCmd.Arg(
@@ -276,6 +299,12 @@ func main() {
case checkConfigCmd.FullCommand():
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
+ case checkServerHealthCmd.FullCommand():
+ os.Exit(checkErr(CheckServerStatus(serverURL, checkHealth, httpRoundTripper)))
+
+ case checkServerReadyCmd.FullCommand():
+ os.Exit(checkErr(CheckServerStatus(serverURL, checkReadiness, httpRoundTripper)))
+
case checkWebConfigCmd.FullCommand():
os.Exit(CheckWebConfig(*webConfigFiles...))
@@ -285,6 +314,9 @@ func main() {
case checkMetricsCmd.FullCommand():
os.Exit(CheckMetrics(*checkMetricsExtended))
+ case pushMetricsCmd.FullCommand():
+ os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *pushMetricsLabels, *metricFiles...))
+
case queryInstantCmd.FullCommand():
os.Exit(QueryInstant(serverURL, httpRoundTripper, *queryInstantExpr, *queryInstantTime, p))
@@ -369,6 +401,43 @@ func (ls lintConfig) lintDuplicateRules() bool {
return ls.all || ls.duplicateRules
}
+// Check server status - healthy & ready.
+func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error {
+ if serverURL.Scheme == "" {
+ serverURL.Scheme = "http"
+ }
+
+ config := api.Config{
+ Address: serverURL.String() + checkEndpoint,
+ RoundTripper: roundTripper,
+ }
+
+ // Create new client.
+ c, err := api.NewClient(config)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "error creating API client:", err)
+ return err
+ }
+
+ request, err := http.NewRequest("GET", config.Address, nil)
+ if err != nil {
+ return err
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ response, dataBytes, err := c.Do(ctx, request)
+ if err != nil {
+ return err
+ }
+
+ if response.StatusCode != http.StatusOK {
+ return fmt.Errorf("check failed: URL=%s, status=%d", serverURL, response.StatusCode)
+ }
+
+ fmt.Fprintln(os.Stderr, " SUCCESS: ", string(dataBytes))
+ return nil
+}
+
// CheckConfig validates configuration files.
func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files ...string) int {
failed := false
@@ -388,20 +457,12 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files
}
fmt.Println()
- for _, rf := range ruleFiles {
- if n, errs := checkRules(rf, lintSettings); len(errs) > 0 {
- fmt.Fprintln(os.Stderr, " FAILED:")
- for _, err := range errs {
- fmt.Fprintln(os.Stderr, " ", err)
- }
- failed = true
- for _, err := range errs {
- hasErrors = hasErrors || !errors.Is(err, lintError)
- }
- } else {
- fmt.Printf(" SUCCESS: %d rules found\n", n)
- }
- fmt.Println()
+ rulesFailed, rulesHasErrors := checkRules(ruleFiles, lintSettings)
+ if rulesFailed {
+ failed = rulesFailed
+ }
+ if rulesHasErrors {
+ hasErrors = rulesHasErrors
}
}
if failed && hasErrors {
@@ -629,9 +690,57 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) {
func CheckRules(ls lintConfig, files ...string) int {
failed := false
hasErrors := false
+ if len(files) == 0 {
+ fmt.Println("Checking standard input")
+ data, err := io.ReadAll(os.Stdin)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, " FAILED:", err)
+ return failureExitCode
+ }
+ rgs, errs := rulefmt.Parse(data)
+ for _, e := range errs {
+ fmt.Fprintln(os.Stderr, e.Error())
+ return failureExitCode
+ }
+ if n, errs := checkRuleGroups(rgs, ls); errs != nil {
+ fmt.Fprintln(os.Stderr, " FAILED:")
+ for _, e := range errs {
+ fmt.Fprintln(os.Stderr, e.Error())
+ }
+ failed = true
+ for _, err := range errs {
+ hasErrors = hasErrors || !errors.Is(err, lintError)
+ }
+ } else {
+ fmt.Printf(" SUCCESS: %d rules found\n", n)
+ }
+ fmt.Println()
+ } else {
+ failed, hasErrors = checkRules(files, ls)
+ }
+ if failed && hasErrors {
+ return failureExitCode
+ }
+ if failed && ls.fatal {
+ return lintErrExitCode
+ }
+
+ return successExitCode
+}
+
+// checkRules validates rule files.
+func checkRules(files []string, ls lintConfig) (bool, bool) {
+ failed := false
+ hasErrors := false
for _, f := range files {
- if n, errs := checkRules(f, ls); errs != nil {
+ fmt.Println("Checking", f)
+ rgs, errs := rulefmt.ParseFile(f)
+ if errs != nil {
+ failed = true
+ continue
+ }
+ if n, errs := checkRuleGroups(rgs, ls); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error())
@@ -645,23 +754,10 @@ func CheckRules(ls lintConfig, files ...string) int {
}
fmt.Println()
}
- if failed && hasErrors {
- return failureExitCode
- }
- if failed && ls.fatal {
- return lintErrExitCode
- }
- return successExitCode
+ return failed, hasErrors
}
-func checkRules(filename string, lintSettings lintConfig) (int, []error) {
- fmt.Println("Checking", filename)
-
- rgs, errs := rulefmt.ParseFile(filename)
- if errs != nil {
- return successExitCode, errs
- }
-
+func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []error) {
numRules := 0
for _, rg := range rgs.Groups {
numRules += len(rg.Rules)
diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go
new file mode 100644
index 000000000..2bc2237e2
--- /dev/null
+++ b/cmd/promtool/metrics.go
@@ -0,0 +1,138 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "time"
+
+ "github.com/golang/snappy"
+ config_util "github.com/prometheus/common/config"
+ "github.com/prometheus/common/model"
+
+ "github.com/prometheus/prometheus/storage/remote"
+ "github.com/prometheus/prometheus/util/fmtutil"
+)
+
+// Push metrics to a prometheus remote write (for testing purpose only).
+func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int {
+ addressURL, err := url.Parse(url.String())
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return failureExitCode
+ }
+
+ // build remote write client
+ writeClient, err := remote.NewWriteClient("remote-write", &remote.ClientConfig{
+ URL: &config_util.URL{URL: addressURL},
+ Timeout: model.Duration(timeout),
+ })
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return failureExitCode
+ }
+
+ // set custom tls config from httpConfigFilePath
+ // set custom headers to every request
+ client, ok := writeClient.(*remote.Client)
+ if !ok {
+ fmt.Fprintln(os.Stderr, fmt.Errorf("unexpected type %T", writeClient))
+ return failureExitCode
+ }
+ client.Client.Transport = &setHeadersTransport{
+ RoundTripper: roundTripper,
+ headers: headers,
+ }
+
+ var data []byte
+ var failed bool
+
+ if len(files) == 0 {
+ data, err = io.ReadAll(os.Stdin)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, " FAILED:", err)
+ return failureExitCode
+ }
+ fmt.Printf("Parsing standard input\n")
+ if parseAndPushMetrics(client, data, labels) {
+ fmt.Printf(" SUCCESS: metrics pushed to remote write.\n")
+ return successExitCode
+ }
+ return failureExitCode
+ }
+
+ for _, file := range files {
+ data, err = os.ReadFile(file)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, " FAILED:", err)
+ failed = true
+ continue
+ }
+
+ fmt.Printf("Parsing metrics file %s\n", file)
+ if parseAndPushMetrics(client, data, labels) {
+ fmt.Printf(" SUCCESS: metrics file %s pushed to remote write.\n", file)
+ continue
+ }
+ failed = true
+ }
+
+ if failed {
+ return failureExitCode
+ }
+
+ return successExitCode
+}
+
+func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool {
+ metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, " FAILED:", err)
+ return false
+ }
+
+ raw, err := metricsData.Marshal()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, " FAILED:", err)
+ return false
+ }
+
+ // Encode the request body into snappy encoding.
+ compressed := snappy.Encode(nil, raw)
+ err = client.Store(context.Background(), compressed)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, " FAILED:", err)
+ return false
+ }
+
+ return true
+}
+
+type setHeadersTransport struct {
+ http.RoundTripper
+ headers map[string]string
+}
+
+func (s *setHeadersTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ for key, value := range s.headers {
+ req.Header.Set(key, value)
+ }
+ return s.RoundTripper.RoundTrip(req)
+}
diff --git a/cmd/promtool/rules.go b/cmd/promtool/rules.go
index 2b5ed1d78..d8d6bb83e 100644
--- a/cmd/promtool/rules.go
+++ b/cmd/promtool/rules.go
@@ -68,7 +68,7 @@ func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient que
}
// loadGroups parses groups from a list of recording rule files.
-func (importer *ruleImporter) loadGroups(ctx context.Context, filenames []string) (errs []error) {
+func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) {
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...)
if errs != nil {
return errs
@@ -100,7 +100,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
startInMs := start.Unix() * int64(time.Second/time.Millisecond)
endInMs := end.Unix() * int64(time.Second/time.Millisecond)
- for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock = startOfBlock + blockDuration {
+ for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock += blockDuration {
endOfBlock := startOfBlock + blockDuration - 1
currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix())
@@ -163,7 +163,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
})
lb.Set(labels.MetricName, ruleName)
- lbls := lb.Labels(labels.EmptyLabels())
+ lbls := lb.Labels()
for _, value := range sample.Values {
if err := app.add(ctx, lbls, timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
diff --git a/cmd/promtool/rules_test.go b/cmd/promtool/rules_test.go
index fb582ed0d..213b7d2a0 100644
--- a/cmd/promtool/rules_test.go
+++ b/cmd/promtool/rules_test.go
@@ -35,7 +35,7 @@ type mockQueryRangeAPI struct {
samples model.Matrix
}
-func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) {
+func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) { // nolint:revive
return mockAPI.samples, v1.Warnings{}, nil
}
@@ -161,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
}
}
-func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
+func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
logger := log.NewNopLogger()
cfg := ruleImporterConfig{
outputDir: tmpDir,
diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go
index 0e0cdb863..b7fad5fe0 100644
--- a/cmd/promtool/tsdb.go
+++ b/cmd/promtool/tsdb.go
@@ -398,25 +398,20 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
if err != nil {
return nil, nil, err
}
- blocks, err := db.Blocks()
+
+ if blockID == "" {
+ blockID, err = db.LastBlockID()
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ b, err := db.Block(blockID)
if err != nil {
return nil, nil, err
}
- var block tsdb.BlockReader
- if blockID != "" {
- for _, b := range blocks {
- if b.Meta().ULID.String() == blockID {
- block = b
- break
- }
- }
- } else if len(blocks) > 0 {
- block = blocks[len(blocks)-1]
- }
- if block == nil {
- return nil, nil, fmt.Errorf("block %s not found", blockID)
- }
- return db, block, nil
+
+ return db, b, nil
}
func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go
index cc40ac9d0..e934f37c8 100644
--- a/cmd/promtool/unittest.go
+++ b/cmd/promtool/unittest.go
@@ -130,7 +130,7 @@ func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error {
if err != nil {
return err
}
- if len(m) <= 0 {
+ if len(m) == 0 {
fmt.Fprintln(os.Stderr, " WARNING: no file match pattern", rf)
}
globbedFiles = append(globbedFiles, m...)
@@ -347,7 +347,7 @@ Outer:
for _, s := range got {
gotSamples = append(gotSamples, parsedSample{
Labels: s.Metric.Copy(),
- Value: s.V,
+ Value: s.F,
})
}
@@ -434,7 +434,7 @@ func (tg *testGroup) maxEvalTime() time.Duration {
}
func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, qu storage.Queryable) (promql.Vector, error) {
- q, err := engine.NewInstantQuery(qu, nil, qs, t)
+ q, err := engine.NewInstantQuery(ctx, qu, nil, qs, t)
if err != nil {
return nil, err
}
@@ -447,7 +447,8 @@ func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, q
return v, nil
case promql.Scalar:
return promql.Vector{promql.Sample{
- Point: promql.Point{T: v.T, V: v.V},
+ T: v.T,
+ F: v.V,
Metric: labels.Labels{},
}}, nil
default:
diff --git a/config/config.go b/config/config.go
index a29c98eed..d32fcc33c 100644
--- a/config/config.go
+++ b/config/config.go
@@ -34,6 +34,7 @@ import (
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel"
+ "github.com/prometheus/prometheus/storage/remote/azuread"
)
var (
@@ -146,13 +147,14 @@ var (
// DefaultScrapeConfig is the default scrape configuration.
DefaultScrapeConfig = ScrapeConfig{
- // ScrapeTimeout and ScrapeInterval default to the
- // configured globals.
- MetricsPath: "/metrics",
- Scheme: "http",
- HonorLabels: false,
- HonorTimestamps: true,
- HTTPClientConfig: config.DefaultHTTPClientConfig,
+ // ScrapeTimeout and ScrapeInterval default to the configured
+ // globals.
+ ScrapeClassicHistograms: false,
+ MetricsPath: "/metrics",
+ Scheme: "http",
+ HonorLabels: false,
+ HonorTimestamps: true,
+ HTTPClientConfig: config.DefaultHTTPClientConfig,
}
// DefaultAlertmanagerConfig is the default alertmanager configuration.
@@ -173,16 +175,16 @@ var (
// DefaultQueueConfig is the default remote queue configuration.
DefaultQueueConfig = QueueConfig{
- // With a maximum of 200 shards, assuming an average of 100ms remote write
- // time and 500 samples per batch, we will be able to push 1M samples/s.
- MaxShards: 200,
+ // With a maximum of 50 shards, assuming an average of 100ms remote write
+ // time and 2000 samples per batch, we will be able to push 1M samples/s.
+ MaxShards: 50,
MinShards: 1,
- MaxSamplesPerSend: 500,
+ MaxSamplesPerSend: 2000,
- // Each shard will have a max of 2500 samples pending in its channel, plus the pending
- // samples that have been enqueued. Theoretically we should only ever have about 3000 samples
- // per shard pending. At 200 shards that's 600k.
- Capacity: 2500,
+ // Each shard will have a max of 10,000 samples pending in its channel, plus the pending
+ // samples that have been enqueued. Theoretically we should only ever have about 12,000 samples
+ // per shard pending. At 50 shards that's 600k.
+ Capacity: 10000,
BatchSendDeadline: model.Duration(5 * time.Second),
// Backoff times for retrying a batch of samples on recoverable errors.
@@ -194,7 +196,7 @@ var (
DefaultMetadataConfig = MetadataConfig{
Send: true,
SendInterval: model.Duration(1 * time.Minute),
- MaxSamplesPerSend: 500,
+ MaxSamplesPerSend: 2000,
}
// DefaultRemoteReadConfig is the default remote read configuration.
@@ -266,7 +268,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
for i, scfg := range c.ScrapeConfigs {
// We do these checks for library users that would not call Validate in
// Unmarshal.
- if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil {
+ if err := scfg.Validate(c.GlobalConfig); err != nil {
return nil, err
}
@@ -293,7 +295,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
return nil, fileErr(filename, err)
}
for _, scfg := range cfg.ScrapeConfigs {
- if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil {
+ if err := scfg.Validate(c.GlobalConfig); err != nil {
return nil, fileErr(filename, err)
}
@@ -342,7 +344,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
// Do global overrides and validate unique names.
jobNames := map[string]struct{}{}
for _, scfg := range c.ScrapeConfigs {
- if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil {
+ if err := scfg.Validate(c.GlobalConfig); err != nil {
return err
}
@@ -389,6 +391,24 @@ type GlobalConfig struct {
QueryLogFile string `yaml:"query_log_file,omitempty"`
// The labels to add to any timeseries that this Prometheus instance scrapes.
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
+ // An uncompressed response body larger than this many bytes will cause the
+ // scrape to fail. 0 means no limit.
+ BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`
+ // More than this many samples post metric-relabeling will cause the scrape to
+ // fail. 0 means no limit.
+ SampleLimit uint `yaml:"sample_limit,omitempty"`
+ // More than this many targets after the target relabeling will cause the
+ // scrapes to fail. 0 means no limit.
+ TargetLimit uint `yaml:"target_limit,omitempty"`
+ // More than this many labels post metric-relabeling will cause the scrape to
+ // fail. 0 means no limit.
+ LabelLimit uint `yaml:"label_limit,omitempty"`
+ // More than this label name length post metric-relabeling will cause the
+ // scrape to fail. 0 means no limit.
+ LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
+ // More than this label value length post metric-relabeling will cause the
+ // scrape to fail. 0 means no limit.
+ LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
@@ -467,6 +487,8 @@ type ScrapeConfig struct {
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
// The timeout for scraping targets of this config.
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
+ // Whether to scrape a classic histogram that is also exposed as a native histogram.
+ ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"`
// The HTTP resource path on which to fetch metrics from targets.
MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets.
@@ -475,20 +497,23 @@ type ScrapeConfig struct {
// scrape to fail. 0 means no limit.
BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`
// More than this many samples post metric-relabeling will cause the scrape to
- // fail.
+ // fail. 0 means no limit.
SampleLimit uint `yaml:"sample_limit,omitempty"`
// More than this many targets after the target relabeling will cause the
- // scrapes to fail.
+ // scrapes to fail. 0 means no limit.
TargetLimit uint `yaml:"target_limit,omitempty"`
// More than this many labels post metric-relabeling will cause the scrape to
- // fail.
+ // fail. 0 means no limit.
LabelLimit uint `yaml:"label_limit,omitempty"`
// More than this label name length post metric-relabeling will cause the
- // scrape to fail.
+ // scrape to fail. 0 means no limit.
LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
// More than this label value length post metric-relabeling will cause the
- // scrape to fail.
+ // scrape to fail. 0 means no limit.
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
+ // More than this many buckets in a native histogram will cause the scrape to
+ // fail.
+ NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
@@ -546,25 +571,44 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil
}
-func (c *ScrapeConfig) Validate(defaultInterval, defaultTimeout model.Duration) error {
+func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
if c == nil {
return errors.New("empty or null scrape config section")
}
// First set the correct scrape interval, then check that the timeout
// (inferred or explicit) is not greater than that.
if c.ScrapeInterval == 0 {
- c.ScrapeInterval = defaultInterval
+ c.ScrapeInterval = globalConfig.ScrapeInterval
}
if c.ScrapeTimeout > c.ScrapeInterval {
return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName)
}
if c.ScrapeTimeout == 0 {
- if defaultTimeout > c.ScrapeInterval {
+ if globalConfig.ScrapeTimeout > c.ScrapeInterval {
c.ScrapeTimeout = c.ScrapeInterval
} else {
- c.ScrapeTimeout = defaultTimeout
+ c.ScrapeTimeout = globalConfig.ScrapeTimeout
}
}
+ if c.BodySizeLimit == 0 {
+ c.BodySizeLimit = globalConfig.BodySizeLimit
+ }
+ if c.SampleLimit == 0 {
+ c.SampleLimit = globalConfig.SampleLimit
+ }
+ if c.TargetLimit == 0 {
+ c.TargetLimit = globalConfig.TargetLimit
+ }
+ if c.LabelLimit == 0 {
+ c.LabelLimit = globalConfig.LabelLimit
+ }
+ if c.LabelNameLengthLimit == 0 {
+ c.LabelNameLengthLimit = globalConfig.LabelNameLengthLimit
+ }
+ if c.LabelValueLengthLimit == 0 {
+ c.LabelValueLengthLimit = globalConfig.LabelValueLengthLimit
+ }
+
return nil
}
@@ -864,6 +908,7 @@ type RemoteWriteConfig struct {
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
+ AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
@@ -900,8 +945,12 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
- if httpClientConfigAuthEnabled && c.SigV4Config != nil {
- return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
+ if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) {
+ return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
+ }
+
+ if c.SigV4Config != nil && c.AzureADConfig != nil {
+ return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
}
return nil
@@ -922,7 +971,7 @@ func validateHeadersForTracing(headers map[string]string) error {
func validateHeaders(headers map[string]string) error {
for header := range headers {
if strings.ToLower(header) == "authorization" {
- return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter")
+ return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter")
}
if _, ok := reservedHeaders[strings.ToLower(header)]; ok {
return fmt.Errorf("%s is a reserved header. It must not be changed", header)
diff --git a/config/config_test.go b/config/config_test.go
index 3ee327c5f..d3288cc90 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -68,6 +68,15 @@ func mustParseURL(u string) *config.URL {
return &config.URL{URL: parsed}
}
+const (
+ globBodySizeLimit = 15 * units.MiB
+ globSampleLimit = 1500
+ globTargetLimit = 30
+ globLabelLimit = 30
+ globLabelNameLengthLimit = 200
+ globLabelValueLengthLimit = 200
+)
+
var expectedConf = &Config{
GlobalConfig: GlobalConfig{
ScrapeInterval: model.Duration(15 * time.Second),
@@ -76,6 +85,13 @@ var expectedConf = &Config{
QueryLogFile: "",
ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"),
+
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
},
RuleFiles: []string{
@@ -165,10 +181,16 @@ var expectedConf = &Config{
{
JobName: "prometheus",
- HonorLabels: true,
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorLabels: true,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -261,11 +283,15 @@ var expectedConf = &Config{
{
JobName: "service-x",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(50 * time.Second),
- ScrapeTimeout: model.Duration(5 * time.Second),
- BodySizeLimit: 10 * units.MiB,
- SampleLimit: 1000,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(50 * time.Second),
+ ScrapeTimeout: model.Duration(5 * time.Second),
+ BodySizeLimit: 10 * units.MiB,
+ SampleLimit: 1000,
+ TargetLimit: 35,
+ LabelLimit: 35,
+ LabelNameLengthLimit: 210,
+ LabelValueLengthLimit: 210,
HTTPClientConfig: config.HTTPClientConfig{
BasicAuth: &config.BasicAuth{
@@ -352,9 +378,15 @@ var expectedConf = &Config{
{
JobName: "service-y",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -363,6 +395,7 @@ var expectedConf = &Config{
ServiceDiscoveryConfigs: discovery.Configs{
&consul.SDConfig{
Server: "localhost:1234",
+ PathPrefix: "/consul",
Token: "mysecret",
Services: []string{"nginx", "cache", "mysql"},
ServiceTags: []string{"canary", "v1"},
@@ -398,9 +431,15 @@ var expectedConf = &Config{
{
JobName: "service-z",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: model.Duration(10 * time.Second),
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: model.Duration(10 * time.Second),
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: "/metrics",
Scheme: "http",
@@ -423,9 +462,15 @@ var expectedConf = &Config{
{
JobName: "service-kubernetes",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -454,9 +499,15 @@ var expectedConf = &Config{
{
JobName: "service-kubernetes-namespaces",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -485,9 +536,15 @@ var expectedConf = &Config{
{
JobName: "service-kuma",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -505,9 +562,15 @@ var expectedConf = &Config{
{
JobName: "service-marathon",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -534,9 +597,15 @@ var expectedConf = &Config{
{
JobName: "service-nomad",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -560,9 +629,15 @@ var expectedConf = &Config{
{
JobName: "service-ec2",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -593,9 +668,15 @@ var expectedConf = &Config{
{
JobName: "service-lightsail",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -616,9 +697,15 @@ var expectedConf = &Config{
{
JobName: "service-azure",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -642,9 +729,15 @@ var expectedConf = &Config{
{
JobName: "service-nerve",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -661,9 +754,15 @@ var expectedConf = &Config{
{
JobName: "0123service-xxx",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -683,9 +782,15 @@ var expectedConf = &Config{
{
JobName: "badfederation",
- HonorTimestamps: false,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: false,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: "/federate",
Scheme: DefaultScrapeConfig.Scheme,
@@ -705,9 +810,15 @@ var expectedConf = &Config{
{
JobName: "測試",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -727,9 +838,15 @@ var expectedConf = &Config{
{
JobName: "httpsd",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -746,9 +863,15 @@ var expectedConf = &Config{
{
JobName: "service-triton",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -773,9 +896,15 @@ var expectedConf = &Config{
{
JobName: "digitalocean-droplets",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -799,9 +928,15 @@ var expectedConf = &Config{
{
JobName: "docker",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -821,9 +956,15 @@ var expectedConf = &Config{
{
JobName: "dockerswarm",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -843,9 +984,15 @@ var expectedConf = &Config{
{
JobName: "service-openstack",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -869,9 +1016,15 @@ var expectedConf = &Config{
{
JobName: "service-puppetdb",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -897,10 +1050,16 @@ var expectedConf = &Config{
},
},
{
- JobName: "hetzner",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ JobName: "hetzner",
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -946,9 +1105,15 @@ var expectedConf = &Config{
{
JobName: "service-eureka",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -965,9 +1130,16 @@ var expectedConf = &Config{
{
JobName: "ovhcloud",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
+
HTTPClientConfig: config.DefaultHTTPClientConfig,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -994,9 +1166,16 @@ var expectedConf = &Config{
{
JobName: "scaleway",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
+
HTTPClientConfig: config.DefaultHTTPClientConfig,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1029,9 +1208,15 @@ var expectedConf = &Config{
{
JobName: "linode-instances",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1056,9 +1241,16 @@ var expectedConf = &Config{
{
JobName: "uyuni",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
+
HTTPClientConfig: config.DefaultHTTPClientConfig,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1075,10 +1267,16 @@ var expectedConf = &Config{
},
},
{
- JobName: "ionos",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ JobName: "ionos",
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1100,9 +1298,15 @@ var expectedConf = &Config{
{
JobName: "vultr",
- HonorTimestamps: true,
- ScrapeInterval: model.Duration(15 * time.Second),
- ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ HonorTimestamps: true,
+ ScrapeInterval: model.Duration(15 * time.Second),
+ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
+ BodySizeLimit: globBodySizeLimit,
+ SampleLimit: globSampleLimit,
+ TargetLimit: globTargetLimit,
+ LabelLimit: globLabelLimit,
+ LabelNameLengthLimit: globLabelNameLengthLimit,
+ LabelValueLengthLimit: globLabelValueLengthLimit,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
@@ -1523,7 +1727,7 @@ var expectedErrors = []struct {
},
{
filename: "remote_write_authorization_header.bad.yml",
- errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`,
+ errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`,
},
{
filename: "remote_write_url_missing.bad.yml",
diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml
index 764f1a342..19cfe1eb5 100644
--- a/config/testdata/conf.good.yml
+++ b/config/testdata/conf.good.yml
@@ -2,6 +2,12 @@
global:
scrape_interval: 15s
evaluation_interval: 30s
+ body_size_limit: 15MB
+ sample_limit: 1500
+ target_limit: 30
+ label_limit: 30
+ label_name_length_limit: 200
+ label_value_length_limit: 200
# scrape_timeout is set to the global default (10s).
external_labels:
@@ -111,6 +117,11 @@ scrape_configs:
body_size_limit: 10MB
sample_limit: 1000
+ target_limit: 35
+ label_limit: 35
+ label_name_length_limit: 210
+ label_value_length_limit: 210
+
metrics_path: /my_path
scheme: https
@@ -151,6 +162,7 @@ scrape_configs:
consul_sd_configs:
- server: "localhost:1234"
token: mysecret
+ path_prefix: /consul
services: ["nginx", "cache", "mysql"]
tags: ["canary", "v1"]
node_meta:
diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go
index ca9921159..86d76627e 100644
--- a/discovery/aws/ec2.go
+++ b/discovery/aws/ec2.go
@@ -164,7 +164,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery {
return d
}
-func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) {
+func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) {
if d.ec2 != nil {
return d.ec2, nil
}
diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go
index c59bd1f5d..99ea396b9 100644
--- a/discovery/consul/consul.go
+++ b/discovery/consul/consul.go
@@ -111,6 +111,7 @@ func init() {
// SDConfig is the configuration for Consul service discovery.
type SDConfig struct {
Server string `yaml:"server,omitempty"`
+ PathPrefix string `yaml:"path_prefix,omitempty"`
Token config.Secret `yaml:"token,omitempty"`
Datacenter string `yaml:"datacenter,omitempty"`
Namespace string `yaml:"namespace,omitempty"`
@@ -211,6 +212,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
clientConf := &consul.Config{
Address: conf.Server,
+ PathPrefix: conf.PathPrefix,
Scheme: conf.Scheme,
Datacenter: conf.Datacenter,
Namespace: conf.Namespace,
diff --git a/discovery/dns/dns.go b/discovery/dns/dns.go
index 2b11c242a..96e07254f 100644
--- a/discovery/dns/dns.go
+++ b/discovery/dns/dns.go
@@ -285,21 +285,22 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms
for _, lname := range conf.NameList(name) {
response, err := lookupFromAnyServer(lname, qtype, conf, logger)
- if err != nil {
+ switch {
+ case err != nil:
// We can't go home yet, because a later name
// may give us a valid, successful answer. However
// we can no longer say "this name definitely doesn't
// exist", because we did not get that answer for
// at least one name.
allResponsesValid = false
- } else if response.Rcode == dns.RcodeSuccess {
+ case response.Rcode == dns.RcodeSuccess:
// Outcome 1: GOLD!
return response, nil
}
}
if allResponsesValid {
- // Outcome 2: everyone says NXDOMAIN, that's good enough for me
+ // Outcome 2: everyone says NXDOMAIN, that's good enough for me.
return &dns.Msg{}, nil
}
// Outcome 3: boned.
diff --git a/discovery/file/file.go b/discovery/file/file.go
index c45595c6d..60b63350f 100644
--- a/discovery/file/file.go
+++ b/discovery/file/file.go
@@ -226,8 +226,8 @@ func (d *Discovery) watchFiles() {
panic("no watcher configured")
}
for _, p := range d.paths {
- if idx := strings.LastIndex(p, "/"); idx > -1 {
- p = p[:idx]
+ if dir, _ := filepath.Split(p); dir != "" {
+ p = dir
} else {
p = "./"
}
diff --git a/discovery/hetzner/hcloud.go b/discovery/hetzner/hcloud.go
index aa406a1a7..50afdc1ec 100644
--- a/discovery/hetzner/hcloud.go
+++ b/discovery/hetzner/hcloud.go
@@ -59,7 +59,7 @@ type hcloudDiscovery struct {
}
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
-func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, error) {
+func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) {
d := &hcloudDiscovery{
port: conf.Port,
}
diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go
index 4b7abaf77..496088028 100644
--- a/discovery/hetzner/robot.go
+++ b/discovery/hetzner/robot.go
@@ -51,7 +51,7 @@ type robotDiscovery struct {
}
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
-func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, error) {
+func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) {
d := &robotDiscovery{
port: conf.Port,
endpoint: conf.robotEndpoint,
@@ -69,7 +69,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro
return d, nil
}
-func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
if err != nil {
return nil, err
diff --git a/discovery/ionos/server.go b/discovery/ionos/server.go
index 8ac363970..a850fbbfb 100644
--- a/discovery/ionos/server.go
+++ b/discovery/ionos/server.go
@@ -60,7 +60,7 @@ type serverDiscovery struct {
datacenterID string
}
-func newServerDiscovery(conf *SDConfig, logger log.Logger) (*serverDiscovery, error) {
+func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) {
d := &serverDiscovery{
port: conf.Port,
datacenterID: conf.DatacenterID,
diff --git a/discovery/kubernetes/client_metrics.go b/discovery/kubernetes/client_metrics.go
index 3a33e3e8d..b316f7d88 100644
--- a/discovery/kubernetes/client_metrics.go
+++ b/discovery/kubernetes/client_metrics.go
@@ -122,11 +122,11 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer
)
}
-func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) {
+func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) {
clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
}
-func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) {
+func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.URL, latency time.Duration) {
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
}
@@ -169,7 +169,7 @@ func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetr
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
}
-func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
+func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(string) workqueue.CounterMetric {
// Retries are not used so the metric is omitted.
return noopMetric{}
}
diff --git a/discovery/kubernetes/endpoints.go b/discovery/kubernetes/endpoints.go
index 039daf4fa..2413dab45 100644
--- a/discovery/kubernetes/endpoints.go
+++ b/discovery/kubernetes/endpoints.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// nolint:revive // Many legitimately empty blocks in this file.
package kubernetes
import (
@@ -304,7 +305,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
}
if e.withNodeMetadata {
- target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName)
+ if addr.NodeName != nil {
+ target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName)
+ } else if addr.TargetRef != nil && addr.TargetRef.Kind == "Node" {
+ target = addNodeLabels(target, e.nodeInf, e.logger, &addr.TargetRef.Name)
+ }
}
pod := e.resolvePodRef(addr.TargetRef)
@@ -465,5 +470,6 @@ func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.L
nodeLabelset[model.LabelName(nodeLabelPrefix+ln)] = lv(v)
nodeLabelset[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue
}
+
return tg.Merge(nodeLabelset)
}
diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go
index 91b1b0c67..5aa58bdc4 100644
--- a/discovery/kubernetes/endpoints_test.go
+++ b/discovery/kubernetes/endpoints_test.go
@@ -69,6 +69,24 @@ func makeEndpoints() *v1.Endpoints {
},
},
},
+ {
+ Addresses: []v1.EndpointAddress{
+ {
+ IP: "6.7.8.9",
+ TargetRef: &v1.ObjectReference{
+ Kind: "Node",
+ Name: "barbaz",
+ },
+ },
+ },
+ Ports: []v1.EndpointPort{
+ {
+ Name: "testport",
+ Port: 9002,
+ Protocol: v1.ProtocolTCP,
+ },
+ },
+ },
},
}
}
@@ -106,6 +124,14 @@ func TestEndpointsDiscoveryBeforeRun(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false",
},
+ {
+ "__address__": "6.7.8.9:9002",
+ "__meta_kubernetes_endpoint_address_target_kind": "Node",
+ "__meta_kubernetes_endpoint_address_target_name": "barbaz",
+ "__meta_kubernetes_endpoint_port_name": "testport",
+ "__meta_kubernetes_endpoint_port_protocol": "TCP",
+ "__meta_kubernetes_endpoint_ready": "true",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_namespace": "default",
@@ -398,6 +424,14 @@ func TestEndpointsDiscoveryWithService(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false",
},
+ {
+ "__address__": "6.7.8.9:9002",
+ "__meta_kubernetes_endpoint_address_target_kind": "Node",
+ "__meta_kubernetes_endpoint_address_target_name": "barbaz",
+ "__meta_kubernetes_endpoint_port_name": "testport",
+ "__meta_kubernetes_endpoint_port_protocol": "TCP",
+ "__meta_kubernetes_endpoint_ready": "true",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_namespace": "default",
@@ -466,6 +500,14 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false",
},
+ {
+ "__address__": "6.7.8.9:9002",
+ "__meta_kubernetes_endpoint_address_target_kind": "Node",
+ "__meta_kubernetes_endpoint_address_target_name": "barbaz",
+ "__meta_kubernetes_endpoint_port_name": "testport",
+ "__meta_kubernetes_endpoint_port_protocol": "TCP",
+ "__meta_kubernetes_endpoint_ready": "true",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_namespace": "default",
@@ -484,8 +526,10 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
metadataConfig := AttachMetadataConfig{Node: true}
- nodeLabels := map[string]string{"az": "us-east1"}
- node := makeNode("foobar", "", "", nodeLabels, nil)
+ nodeLabels1 := map[string]string{"az": "us-east1"}
+ nodeLabels2 := map[string]string{"az": "us-west2"}
+ node1 := makeNode("foobar", "", "", nodeLabels1, nil)
+ node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
@@ -495,7 +539,7 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
},
},
}
- n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node)
+ n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node1, node2)
k8sDiscoveryTest{
discovery: n,
@@ -526,6 +570,17 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false",
},
+ {
+ "__address__": "6.7.8.9:9002",
+ "__meta_kubernetes_endpoint_address_target_kind": "Node",
+ "__meta_kubernetes_endpoint_address_target_name": "barbaz",
+ "__meta_kubernetes_endpoint_port_name": "testport",
+ "__meta_kubernetes_endpoint_port_protocol": "TCP",
+ "__meta_kubernetes_endpoint_ready": "true",
+ "__meta_kubernetes_node_label_az": "us-west2",
+ "__meta_kubernetes_node_labelpresent_az": "true",
+ "__meta_kubernetes_node_name": "barbaz",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_namespace": "default",
@@ -541,8 +596,10 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
}
func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
- nodeLabels := map[string]string{"az": "us-east1"}
- nodes := makeNode("foobar", "", "", nodeLabels, nil)
+ nodeLabels1 := map[string]string{"az": "us-east1"}
+ nodeLabels2 := map[string]string{"az": "us-west2"}
+ node1 := makeNode("foobar", "", "", nodeLabels1, nil)
+ node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
metadataConfig := AttachMetadataConfig{Node: true}
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
@@ -553,13 +610,13 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
},
},
}
- n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), nodes, svc)
+ n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), node1, node2, svc)
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
- nodes.Labels["az"] = "eu-central1"
- c.CoreV1().Nodes().Update(context.Background(), nodes, metav1.UpdateOptions{})
+ node1.Labels["az"] = "eu-central1"
+ c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{})
},
expectedMaxItems: 2,
expectedRes: map[string]*targetgroup.Group{
@@ -572,7 +629,7 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpoint_port_name": "testport",
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "true",
- "__meta_kubernetes_node_label_az": "eu-central1",
+ "__meta_kubernetes_node_label_az": "us-east1",
"__meta_kubernetes_node_labelpresent_az": "true",
"__meta_kubernetes_node_name": "foobar",
},
@@ -588,6 +645,17 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false",
},
+ {
+ "__address__": "6.7.8.9:9002",
+ "__meta_kubernetes_endpoint_address_target_kind": "Node",
+ "__meta_kubernetes_endpoint_address_target_name": "barbaz",
+ "__meta_kubernetes_endpoint_port_name": "testport",
+ "__meta_kubernetes_endpoint_port_protocol": "TCP",
+ "__meta_kubernetes_endpoint_ready": "true",
+ "__meta_kubernetes_node_label_az": "us-west2",
+ "__meta_kubernetes_node_labelpresent_az": "true",
+ "__meta_kubernetes_node_name": "barbaz",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_namespace": "default",
@@ -699,6 +767,14 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false",
},
+ {
+ "__address__": "6.7.8.9:9002",
+ "__meta_kubernetes_endpoint_address_target_kind": "Node",
+ "__meta_kubernetes_endpoint_address_target_name": "barbaz",
+ "__meta_kubernetes_endpoint_port_name": "testport",
+ "__meta_kubernetes_endpoint_port_protocol": "TCP",
+ "__meta_kubernetes_endpoint_ready": "true",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_namespace": "ns1",
@@ -815,6 +891,14 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) {
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "false",
},
+ {
+ "__address__": "6.7.8.9:9002",
+ "__meta_kubernetes_endpoint_address_target_kind": "Node",
+ "__meta_kubernetes_endpoint_address_target_name": "barbaz",
+ "__meta_kubernetes_endpoint_port_name": "testport",
+ "__meta_kubernetes_endpoint_port_protocol": "TCP",
+ "__meta_kubernetes_endpoint_ready": "true",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_namespace": "own-ns",
diff --git a/discovery/kubernetes/endpointslice.go b/discovery/kubernetes/endpointslice.go
index 135735154..c7df64252 100644
--- a/discovery/kubernetes/endpointslice.go
+++ b/discovery/kubernetes/endpointslice.go
@@ -190,7 +190,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group)
}
go func() {
- for e.process(ctx, ch) {
+ for e.process(ctx, ch) { // nolint:revive
}
}()
@@ -300,7 +300,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
}
if port.protocol() != nil {
- target[endpointSlicePortProtocolLabel] = lv(string(*port.protocol()))
+ target[endpointSlicePortProtocolLabel] = lv(*port.protocol())
}
if port.port() != nil {
@@ -339,7 +339,11 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
}
if e.withNodeMetadata {
- target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename())
+ if ep.targetRef() != nil && ep.targetRef().Kind == "Node" {
+ target = addNodeLabels(target, e.nodeInf, e.logger, &ep.targetRef().Name)
+ } else {
+ target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename())
+ }
}
pod := e.resolvePodRef(ep.targetRef())
diff --git a/discovery/kubernetes/endpointslice_test.go b/discovery/kubernetes/endpointslice_test.go
index f4076b943..8104e3db3 100644
--- a/discovery/kubernetes/endpointslice_test.go
+++ b/discovery/kubernetes/endpointslice_test.go
@@ -90,6 +90,17 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
Serving: boolptr(true),
Terminating: boolptr(true),
},
+ }, {
+ Addresses: []string{"4.5.6.7"},
+ Conditions: v1.EndpointConditions{
+ Ready: boolptr(true),
+ Serving: boolptr(true),
+ Terminating: boolptr(false),
+ },
+ TargetRef: &corev1.ObjectReference{
+ Kind: "Node",
+ Name: "barbaz",
+ },
},
},
}
@@ -130,6 +141,17 @@ func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice {
Serving: boolptr(true),
Terminating: boolptr(true),
},
+ }, {
+ Addresses: []string{"4.5.6.7"},
+ Conditions: v1beta1.EndpointConditions{
+ Ready: boolptr(true),
+ Serving: boolptr(true),
+ Terminating: boolptr(false),
+ },
+ TargetRef: &corev1.ObjectReference{
+ Kind: "Node",
+ Name: "barbaz",
+ },
},
},
}
@@ -183,6 +205,18 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
+ {
+ "__address__": "4.5.6.7:9000",
+ "__meta_kubernetes_endpointslice_address_target_kind": "Node",
+ "__meta_kubernetes_endpointslice_address_target_name": "barbaz",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
+ "__meta_kubernetes_endpointslice_port": "9000",
+ "__meta_kubernetes_endpointslice_port_app_protocol": "http",
+ "__meta_kubernetes_endpointslice_port_name": "testport",
+ "__meta_kubernetes_endpointslice_port_protocol": "TCP",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
@@ -233,6 +267,17 @@ func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
+ {
+ "__address__": "4.5.6.7:9000",
+ "__meta_kubernetes_endpointslice_address_target_kind": "Node",
+ "__meta_kubernetes_endpointslice_address_target_name": "barbaz",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
+ "__meta_kubernetes_endpointslice_port": "9000",
+ "__meta_kubernetes_endpointslice_port_name": "testport",
+ "__meta_kubernetes_endpointslice_port_protocol": "TCP",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
@@ -419,6 +464,18 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
+ {
+ "__address__": "4.5.6.7:9000",
+ "__meta_kubernetes_endpointslice_address_target_kind": "Node",
+ "__meta_kubernetes_endpointslice_address_target_name": "barbaz",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
+ "__meta_kubernetes_endpointslice_port": "9000",
+ "__meta_kubernetes_endpointslice_port_app_protocol": "http",
+ "__meta_kubernetes_endpointslice_port_name": "testport",
+ "__meta_kubernetes_endpointslice_port_protocol": "TCP",
+ },
},
Labels: map[model.LabelName]model.LabelValue{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
@@ -503,6 +560,18 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
+ {
+ "__address__": "4.5.6.7:9000",
+ "__meta_kubernetes_endpointslice_address_target_kind": "Node",
+ "__meta_kubernetes_endpointslice_address_target_name": "barbaz",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
+ "__meta_kubernetes_endpointslice_port": "9000",
+ "__meta_kubernetes_endpointslice_port_app_protocol": "http",
+ "__meta_kubernetes_endpointslice_port_name": "testport",
+ "__meta_kubernetes_endpointslice_port_protocol": "TCP",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
@@ -576,6 +645,18 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
+ {
+ "__address__": "4.5.6.7:9000",
+ "__meta_kubernetes_endpointslice_address_target_kind": "Node",
+ "__meta_kubernetes_endpointslice_address_target_name": "barbaz",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
+ "__meta_kubernetes_endpointslice_port": "9000",
+ "__meta_kubernetes_endpointslice_port_app_protocol": "http",
+ "__meta_kubernetes_endpointslice_port_name": "testport",
+ "__meta_kubernetes_endpointslice_port_protocol": "TCP",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
@@ -644,6 +725,18 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
+ {
+ "__address__": "4.5.6.7:9000",
+ "__meta_kubernetes_endpointslice_address_target_kind": "Node",
+ "__meta_kubernetes_endpointslice_address_target_name": "barbaz",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
+ "__meta_kubernetes_endpointslice_port": "9000",
+ "__meta_kubernetes_endpointslice_port_app_protocol": "http",
+ "__meta_kubernetes_endpointslice_port_name": "testport",
+ "__meta_kubernetes_endpointslice_port_protocol": "TCP",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
@@ -728,6 +821,18 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
},
+ {
+ "__address__": "4.5.6.7:9000",
+ "__meta_kubernetes_endpointslice_address_target_kind": "Node",
+ "__meta_kubernetes_endpointslice_address_target_name": "barbaz",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
+ "__meta_kubernetes_endpointslice_port": "9000",
+ "__meta_kubernetes_endpointslice_port_app_protocol": "http",
+ "__meta_kubernetes_endpointslice_port_name": "testport",
+ "__meta_kubernetes_endpointslice_port_protocol": "TCP",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
@@ -747,7 +852,8 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
metadataConfig := AttachMetadataConfig{Node: true}
- nodeLabels := map[string]string{"az": "us-east1"}
+ nodeLabels1 := map[string]string{"az": "us-east1"}
+ nodeLabels2 := map[string]string{"az": "us-west2"}
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
@@ -757,7 +863,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
},
},
}
- objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels, nil), svc}
+ objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels1, nil), makeNode("barbaz", "", "", nodeLabels2, nil), svc}
n, _ := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
k8sDiscoveryTest{
@@ -804,6 +910,21 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
+ {
+ "__address__": "4.5.6.7:9000",
+ "__meta_kubernetes_endpointslice_address_target_kind": "Node",
+ "__meta_kubernetes_endpointslice_address_target_name": "barbaz",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
+ "__meta_kubernetes_endpointslice_port": "9000",
+ "__meta_kubernetes_endpointslice_port_app_protocol": "http",
+ "__meta_kubernetes_endpointslice_port_name": "testport",
+ "__meta_kubernetes_endpointslice_port_protocol": "TCP",
+ "__meta_kubernetes_node_label_az": "us-west2",
+ "__meta_kubernetes_node_labelpresent_az": "true",
+ "__meta_kubernetes_node_name": "barbaz",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
@@ -821,7 +942,8 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
metadataConfig := AttachMetadataConfig{Node: true}
- nodeLabels := map[string]string{"az": "us-east1"}
+ nodeLabels1 := map[string]string{"az": "us-east1"}
+ nodeLabels2 := map[string]string{"az": "us-west2"}
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
@@ -831,16 +953,17 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
},
},
}
- node := makeNode("foobar", "", "", nodeLabels, nil)
- objs := []runtime.Object{makeEndpointSliceV1(), node, svc}
+ node1 := makeNode("foobar", "", "", nodeLabels1, nil)
+ node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
+ objs := []runtime.Object{makeEndpointSliceV1(), node1, node2, svc}
n, c := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
k8sDiscoveryTest{
discovery: n,
expectedMaxItems: 2,
afterStart: func() {
- node.Labels["az"] = "us-central1"
- c.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{})
+ node1.Labels["az"] = "us-central1"
+ c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{})
},
expectedRes: map[string]*targetgroup.Group{
"endpointslice/default/testendpoints": {
@@ -859,7 +982,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
- "__meta_kubernetes_node_label_az": "us-central1",
+ "__meta_kubernetes_node_label_az": "us-east1",
"__meta_kubernetes_node_labelpresent_az": "true",
"__meta_kubernetes_node_name": "foobar",
},
@@ -883,6 +1006,21 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
},
+ {
+ "__address__": "4.5.6.7:9000",
+ "__meta_kubernetes_endpointslice_address_target_kind": "Node",
+ "__meta_kubernetes_endpointslice_address_target_name": "barbaz",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
+ "__meta_kubernetes_endpointslice_port": "9000",
+ "__meta_kubernetes_endpointslice_port_app_protocol": "http",
+ "__meta_kubernetes_endpointslice_port_name": "testport",
+ "__meta_kubernetes_endpointslice_port_protocol": "TCP",
+ "__meta_kubernetes_node_label_az": "us-west2",
+ "__meta_kubernetes_node_labelpresent_az": "true",
+ "__meta_kubernetes_node_name": "barbaz",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
@@ -1007,6 +1145,18 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
},
+ {
+ "__address__": "4.5.6.7:9000",
+ "__meta_kubernetes_endpointslice_address_target_kind": "Node",
+ "__meta_kubernetes_endpointslice_address_target_name": "barbaz",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
+ "__meta_kubernetes_endpointslice_port": "9000",
+ "__meta_kubernetes_endpointslice_port_app_protocol": "http",
+ "__meta_kubernetes_endpointslice_port_name": "testport",
+ "__meta_kubernetes_endpointslice_port_protocol": "TCP",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
@@ -1139,6 +1289,18 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
},
+ {
+ "__address__": "4.5.6.7:9000",
+ "__meta_kubernetes_endpointslice_address_target_kind": "Node",
+ "__meta_kubernetes_endpointslice_address_target_name": "barbaz",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
+ "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
+ "__meta_kubernetes_endpointslice_port": "9000",
+ "__meta_kubernetes_endpointslice_port_app_protocol": "http",
+ "__meta_kubernetes_endpointslice_port_name": "testport",
+ "__meta_kubernetes_endpointslice_port_protocol": "TCP",
+ },
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
diff --git a/discovery/kubernetes/ingress.go b/discovery/kubernetes/ingress.go
index 8c9249f54..ad47c341a 100644
--- a/discovery/kubernetes/ingress.go
+++ b/discovery/kubernetes/ingress.go
@@ -89,7 +89,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
}
go func() {
- for i.process(ctx, ch) {
+ for i.process(ctx, ch) { // nolint:revive
}
}()
diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go
index 0f03e2cdb..e87a1c9b2 100644
--- a/discovery/kubernetes/kubernetes.go
+++ b/discovery/kubernetes/kubernetes.go
@@ -299,12 +299,13 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
err error
ownNamespace string
)
- if conf.KubeConfig != "" {
+ switch {
+ case conf.KubeConfig != "":
kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig)
if err != nil {
return nil, err
}
- } else if conf.APIServer.URL == nil {
+ case conf.APIServer.URL == nil:
// Use the Kubernetes provided pod service account
// as described in https://kubernetes.io/docs/admin/service-accounts-admin/
kcfg, err = rest.InClusterConfig()
@@ -324,7 +325,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
}
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
- } else {
+ default:
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
if err != nil {
return nil, err
@@ -760,15 +761,21 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints)
if !ok {
- return nil, fmt.Errorf("object is not a pod")
+ return nil, fmt.Errorf("object is not endpoints")
}
var nodes []string
for _, target := range e.Subsets {
for _, addr := range target.Addresses {
- if addr.NodeName == nil {
- continue
+ if addr.TargetRef != nil {
+ switch addr.TargetRef.Kind {
+ case "Pod":
+ if addr.NodeName != nil {
+ nodes = append(nodes, *addr.NodeName)
+ }
+ case "Node":
+ nodes = append(nodes, addr.TargetRef.Name)
+ }
}
- nodes = append(nodes, *addr.NodeName)
}
}
return nodes, nil
@@ -788,17 +795,29 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
switch e := obj.(type) {
case *disv1.EndpointSlice:
for _, target := range e.Endpoints {
- if target.NodeName == nil {
- continue
+ if target.TargetRef != nil {
+ switch target.TargetRef.Kind {
+ case "Pod":
+ if target.NodeName != nil {
+ nodes = append(nodes, *target.NodeName)
+ }
+ case "Node":
+ nodes = append(nodes, target.TargetRef.Name)
+ }
}
- nodes = append(nodes, *target.NodeName)
}
case *disv1beta1.EndpointSlice:
for _, target := range e.Endpoints {
- if target.NodeName == nil {
- continue
+ if target.TargetRef != nil {
+ switch target.TargetRef.Kind {
+ case "Pod":
+ if target.NodeName != nil {
+ nodes = append(nodes, *target.NodeName)
+ }
+ case "Node":
+ nodes = append(nodes, target.TargetRef.Name)
+ }
}
- nodes = append(nodes, *target.NodeName)
}
default:
return nil, fmt.Errorf("object is not an endpointslice")
diff --git a/discovery/kubernetes/node.go b/discovery/kubernetes/node.go
index 93adf7825..d0a6d2780 100644
--- a/discovery/kubernetes/node.go
+++ b/discovery/kubernetes/node.go
@@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
}
go func() {
- for n.process(ctx, ch) {
+ for n.process(ctx, ch) { // nolint:revive
}
}()
@@ -209,7 +209,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group {
return tg
}
-// nodeAddresses returns the provided node's address, based on the priority:
+// nodeAddress returns the provided node's address, based on the priority:
// 1. NodeInternalIP
// 2. NodeInternalDNS
// 3. NodeExternalIP
diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go
index 396720c22..732cf52ad 100644
--- a/discovery/kubernetes/pod.go
+++ b/discovery/kubernetes/pod.go
@@ -132,7 +132,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
}
go func() {
- for p.process(ctx, ch) {
+ for p.process(ctx, ch) { // nolint:revive
}
}()
diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go
index a19f06e7d..40e17679e 100644
--- a/discovery/kubernetes/service.go
+++ b/discovery/kubernetes/service.go
@@ -92,7 +92,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
}
go func() {
- for s.process(ctx, ch) {
+ for s.process(ctx, ch) { // nolint:revive
}
}()
diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go
index 57c82b72a..13b84e6e3 100644
--- a/discovery/legacymanager/manager_test.go
+++ b/discovery/legacymanager/manager_test.go
@@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
case tgs := <-provUpdates:
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
for _, got := range discoveryManager.allGroups() {
- assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string {
- return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v",
- x,
- got,
- expected)
- })
+ assertEqualGroups(t, got, tc.expectedTargets[x])
}
}
}
@@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
}
}
-func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) {
+func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) {
t.Helper()
// Need to sort by the groups's source as the received order is not guaranteed.
@@ -1079,9 +1074,7 @@ func TestCoordinationWithReceiver(t *testing.T) {
if _, ok := tgs[k]; !ok {
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
}
- assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string {
- return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected)
- })
+ assertEqualGroups(t, tgs[k], expected.tgs[k])
}
}
}
diff --git a/discovery/legacymanager/registry.go b/discovery/legacymanager/registry.go
index 687f09382..955705394 100644
--- a/discovery/legacymanager/registry.go
+++ b/discovery/legacymanager/registry.go
@@ -254,7 +254,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
oldStr := oldTyp.String()
newStr := newTyp.String()
for i, s := range e.Errors {
- e.Errors[i] = strings.Replace(s, oldStr, newStr, -1)
+ e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr)
}
}
return err
diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go
index 0fd0a2c37..12b957514 100644
--- a/discovery/linode/linode.go
+++ b/discovery/linode/linode.go
@@ -249,20 +249,20 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
if detailedIP.Address != ip.String() {
continue
}
-
- if detailedIP.Public && publicIPv4 == "" {
+ switch {
+ case detailedIP.Public && publicIPv4 == "":
publicIPv4 = detailedIP.Address
if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
publicIPv4RDNS = detailedIP.RDNS
}
- } else if !detailedIP.Public && privateIPv4 == "" {
+ case !detailedIP.Public && privateIPv4 == "":
privateIPv4 = detailedIP.Address
if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
privateIPv4RDNS = detailedIP.RDNS
}
- } else {
+ default:
extraIPs = append(extraIPs, detailedIP.Address)
}
}
diff --git a/discovery/manager_test.go b/discovery/manager_test.go
index 970168b0f..537160811 100644
--- a/discovery/manager_test.go
+++ b/discovery/manager_test.go
@@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
case tgs := <-provUpdates:
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
for _, got := range discoveryManager.allGroups() {
- assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string {
- return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v",
- x,
- got,
- expected)
- })
+ assertEqualGroups(t, got, tc.expectedTargets[x])
}
}
}
@@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
}
}
-func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) {
+func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) {
t.Helper()
// Need to sort by the groups's source as the received order is not guaranteed.
@@ -1129,7 +1124,7 @@ type lockStaticConfig struct {
}
func (s lockStaticConfig) Name() string { return "lockstatic" }
-func (s lockStaticConfig) NewDiscoverer(options DiscovererOptions) (Discoverer, error) {
+func (s lockStaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
return (lockStaticDiscoverer)(s), nil
}
@@ -1330,9 +1325,7 @@ func TestCoordinationWithReceiver(t *testing.T) {
if _, ok := tgs[k]; !ok {
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
}
- assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string {
- return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected)
- })
+ assertEqualGroups(t, tgs[k], expected.tgs[k])
}
}
}
@@ -1399,7 +1392,7 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) {
// TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when
// ApplyConfig happens at the same time as targets update.
-func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
+func TestTargetSetTargetGroupsUpdateDuringApplyConfig(*testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
discoveryManager := NewManager(ctx, log.NewNopLogger())
diff --git a/discovery/marathon/marathon.go b/discovery/marathon/marathon.go
index 079f93ad0..cfd3e2c08 100644
--- a/discovery/marathon/marathon.go
+++ b/discovery/marathon/marathon.go
@@ -136,9 +136,10 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
return nil, err
}
- if len(conf.AuthToken) > 0 {
+ switch {
+ case len(conf.AuthToken) > 0:
rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt)
- } else if len(conf.AuthTokenFile) > 0 {
+ case len(conf.AuthTokenFile) > 0:
rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt)
}
if err != nil {
@@ -400,19 +401,20 @@ func targetsForApp(app *app) []model.LabelSet {
var labels []map[string]string
var prefix string
- if len(app.Container.PortMappings) != 0 {
+ switch {
+ case len(app.Container.PortMappings) != 0:
// In Marathon 1.5.x the "container.docker.portMappings" object was moved
// to "container.portMappings".
ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet())
prefix = portMappingLabelPrefix
- } else if len(app.Container.Docker.PortMappings) != 0 {
+ case len(app.Container.Docker.PortMappings) != 0:
// Prior to Marathon 1.5 the port mappings could be found at the path
// "container.docker.portMappings".
ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet())
prefix = portMappingLabelPrefix
- } else if len(app.PortDefinitions) != 0 {
+ case len(app.PortDefinitions) != 0:
// PortDefinitions deprecates the "ports" array and can be used to specify
// a list of ports with metadata in case a mapping is not required.
ports = make([]uint32, len(app.PortDefinitions))
diff --git a/discovery/nomad/nomad.go b/discovery/nomad/nomad.go
index c8d513039..7013f0737 100644
--- a/discovery/nomad/nomad.go
+++ b/discovery/nomad/nomad.go
@@ -161,7 +161,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
return d, nil
}
-func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) {
opts := &nomad.QueryOptions{
AllowStale: d.allowStale,
}
diff --git a/discovery/ovhcloud/dedicated_server.go b/discovery/ovhcloud/dedicated_server.go
index aeb4eccbb..bb5dadcd7 100644
--- a/discovery/ovhcloud/dedicated_server.go
+++ b/discovery/ovhcloud/dedicated_server.go
@@ -102,7 +102,7 @@ func (d *dedicatedServerDiscovery) getSource() string {
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
}
-func (d *dedicatedServerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
client, err := createClient(d.config)
if err != nil {
return nil, err
diff --git a/discovery/ovhcloud/dedicated_server_test.go b/discovery/ovhcloud/dedicated_server_test.go
index 03a01005a..e8ffa4a28 100644
--- a/discovery/ovhcloud/dedicated_server_test.go
+++ b/discovery/ovhcloud/dedicated_server_test.go
@@ -84,7 +84,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) {
return
}
w.Header().Set("Content-Type", "application/json")
- if string(r.URL.Path) == "/dedicated/server" {
+ if r.URL.Path == "/dedicated/server" {
dedicatedServersList, err := os.ReadFile("testdata/dedicated_server/dedicated_servers.json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@@ -96,7 +96,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) {
return
}
}
- if string(r.URL.Path) == "/dedicated/server/abcde" {
+ if r.URL.Path == "/dedicated/server/abcde" {
dedicatedServer, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_details.json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@@ -108,7 +108,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) {
return
}
}
- if string(r.URL.Path) == "/dedicated/server/abcde/ips" {
+ if r.URL.Path == "/dedicated/server/abcde/ips" {
dedicatedServerIPs, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_abcde_ips.json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
diff --git a/discovery/ovhcloud/vps.go b/discovery/ovhcloud/vps.go
index 705b42b65..e2d1dee36 100644
--- a/discovery/ovhcloud/vps.go
+++ b/discovery/ovhcloud/vps.go
@@ -117,7 +117,7 @@ func (d *vpsDiscovery) getSource() string {
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
}
-func (d *vpsDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
+func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
client, err := createClient(d.config)
if err != nil {
return nil, err
diff --git a/discovery/ovhcloud/vps_test.go b/discovery/ovhcloud/vps_test.go
index 31b30fdfc..b1177f215 100644
--- a/discovery/ovhcloud/vps_test.go
+++ b/discovery/ovhcloud/vps_test.go
@@ -91,7 +91,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) {
return
}
w.Header().Set("Content-Type", "application/json")
- if string(r.URL.Path) == "/vps" {
+ if r.URL.Path == "/vps" {
dedicatedServersList, err := os.ReadFile("testdata/vps/vps.json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@@ -103,7 +103,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) {
return
}
}
- if string(r.URL.Path) == "/vps/abc" {
+ if r.URL.Path == "/vps/abc" {
dedicatedServer, err := os.ReadFile("testdata/vps/vps_details.json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
@@ -115,7 +115,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) {
return
}
}
- if string(r.URL.Path) == "/vps/abc/ips" {
+ if r.URL.Path == "/vps/abc/ips" {
dedicatedServerIPs, err := os.ReadFile("testdata/vps/vps_abc_ips.json")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
diff --git a/discovery/registry.go b/discovery/registry.go
index 8274628c2..13168a07a 100644
--- a/discovery/registry.go
+++ b/discovery/registry.go
@@ -253,7 +253,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
oldStr := oldTyp.String()
newStr := newTyp.String()
for i, s := range e.Errors {
- e.Errors[i] = strings.Replace(s, oldStr, newStr, -1)
+ e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr)
}
}
return err
diff --git a/discovery/vultr/vultr.go b/discovery/vultr/vultr.go
index 2f489e7d4..42881d3c1 100644
--- a/discovery/vultr/vultr.go
+++ b/discovery/vultr/vultr.go
@@ -202,10 +202,8 @@ func (d *Discovery) listInstances(ctx context.Context) ([]govultr.Instance, erro
if meta.Links.Next == "" {
break
- } else {
- listOptions.Cursor = meta.Links.Next
- continue
}
+ listOptions.Cursor = meta.Links.Next
}
return instances, nil
diff --git a/discovery/zookeeper/zookeeper.go b/discovery/zookeeper/zookeeper.go
index 308d63a5f..cadff5fd2 100644
--- a/discovery/zookeeper/zookeeper.go
+++ b/discovery/zookeeper/zookeeper.go
@@ -193,7 +193,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
}
for _, pathUpdate := range d.pathUpdates {
// Drain event channel in case the treecache leaks goroutines otherwise.
- for range pathUpdate {
+ for range pathUpdate { // nolint:revive
}
}
d.conn.Close()
diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md
index 42d853b85..673e8c048 100644
--- a/docs/command-line/promtool.md
+++ b/docs/command-line/promtool.md
@@ -27,6 +27,7 @@ Tooling for the Prometheus monitoring system.
| check | Check the resources for validity. |
| query | Run query against a Prometheus server. |
| debug | Fetch debug information. |
+| push | Push to a Prometheus server. |
| test | Unit testing. |
| tsdb | Run tsdb commands. |
@@ -130,6 +131,38 @@ Check if the web config files are valid or not.
+##### `promtool check healthy`
+
+Check if the Prometheus server is healthy.
+
+
+
+###### Flags
+
+| Flag | Description | Default |
+| --- | --- | --- |
+| --http.config.file
| HTTP client configuration file for promtool to connect to Prometheus. | |
+| --url
| The URL for the Prometheus server. | `http://localhost:9090` |
+
+
+
+
+##### `promtool check ready`
+
+Check if the Prometheus server is ready.
+
+
+
+###### Flags
+
+| Flag | Description | Default |
+| --- | --- | --- |
+| --http.config.file
| HTTP client configuration file for promtool to connect to Prometheus. | |
+| --url
| The URL for the Prometheus server. | `http://localhost:9090` |
+
+
+
+
##### `promtool check rules`
Check if the rule files are valid or not.
@@ -148,9 +181,9 @@ Check if the rule files are valid or not.
###### Arguments
-| Argument | Description | Required |
-| --- | --- | --- |
-| rule-files | The rule files to check. | Yes |
+| Argument | Description |
+| --- | --- |
+| rule-files | The rule files to check, default is read from standard input. |
@@ -340,6 +373,48 @@ Fetch all debug information.
+### `promtool push`
+
+Push to a Prometheus server.
+
+
+
+#### Flags
+
+| Flag | Description |
+| --- | --- |
+| --http.config.file
| HTTP client configuration file for promtool to connect to Prometheus. |
+
+
+
+
+##### `promtool push metrics`
+
+Push metrics to a prometheus remote write (for testing purpose only).
+
+
+
+###### Flags
+
+| Flag | Description | Default |
+| --- | --- | --- |
+| --label
| Label to attach to metrics. Can be specified multiple times. | `job=promtool` |
+| --timeout
| The time to wait for pushing metrics. | `30s` |
+| --header
| Prometheus remote write header. | |
+
+
+
+
+###### Arguments
+
+| Argument | Description | Required |
+| --- | --- | --- |
+| remote-write-url | Prometheus remote write url to push metrics. | Yes |
+| metric-files | The metric files to push, default is read from standard input. | |
+
+
+
+
### `promtool test`
Unit testing.
diff --git a/docs/configuration/alerting_rules.md b/docs/configuration/alerting_rules.md
index 74f6c02b1..3c1ec84f0 100644
--- a/docs/configuration/alerting_rules.md
+++ b/docs/configuration/alerting_rules.md
@@ -32,7 +32,11 @@ groups:
```
The optional `for` clause causes Prometheus to wait for a certain duration
-between first encountering a new expression output vector element and counting an alert as firing for this element. In this case, Prometheus will check that the alert continues to be active during each evaluation for 10 minutes before firing the alert. Elements that are active, but not firing yet, are in the pending state.
+between first encountering a new expression output vector element and counting
+an alert as firing for this element. In this case, Prometheus will check that
+the alert continues to be active during each evaluation for 10 minutes before
+firing the alert. Elements that are active, but not firing yet, are in the pending state.
+Alerting rules without the `for` clause will become active on the first evaluation.
The `labels` clause allows specifying a set of additional labels to be attached
to the alert. Any existing conflicting labels will be overwritten. The label
diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md
index 415ad4a27..b094bb4ec 100644
--- a/docs/configuration/configuration.md
+++ b/docs/configuration/configuration.md
@@ -73,6 +73,39 @@ global:
# Reloading the configuration will reopen the file.
[ query_log_file: ]
+ # An uncompressed response body larger than this many bytes will cause the
+ # scrape to fail. 0 means no limit. Example: 100MB.
+ # This is an experimental feature, this behaviour could
+ # change or be removed in the future.
+ [ body_size_limit: | default = 0 ]
+
+ # Per-scrape limit on number of scraped samples that will be accepted.
+ # If more than this number of samples are present after metric relabeling
+ # the entire scrape will be treated as failed. 0 means no limit.
+ [ sample_limit: | default = 0 ]
+
+ # Per-scrape limit on number of labels that will be accepted for a sample. If
+ # more than this number of labels are present post metric-relabeling, the
+ # entire scrape will be treated as failed. 0 means no limit.
+ [ label_limit: | default = 0 ]
+
+ # Per-scrape limit on length of labels name that will be accepted for a sample.
+ # If a label name is longer than this number post metric-relabeling, the entire
+ # scrape will be treated as failed. 0 means no limit.
+ [ label_name_length_limit: | default = 0 ]
+
+ # Per-scrape limit on length of labels value that will be accepted for a sample.
+ # If a label value is longer than this number post metric-relabeling, the
+ # entire scrape will be treated as failed. 0 means no limit.
+ [ label_value_length_limit: | default = 0 ]
+
+ # Per-scrape config limit on number of unique targets that will be
+ # accepted. If more than this number of targets are present after target
+ # relabeling, Prometheus will mark the targets as failed without scraping them.
+ # 0 means no limit. This is an experimental feature, this behaviour could
+ # change in the future.
+ [ target_limit: | default = 0 ]
+
# Rule files specifies a list of globs. Rules and alerts are read from
# all matching files.
rule_files:
@@ -134,6 +167,10 @@ job_name:
# Per-scrape timeout when scraping this job.
[ scrape_timeout: | default = ]
+# Whether to scrape a classic histogram that is also exposed as a native
+# histogram (has no effect without --enable-feature=native-histograms).
+[ scrape_classic_histograms: | default = false ]
+
# The HTTP resource path on which to fetch metrics from targets.
[ metrics_path: | default = /metrics ]
@@ -205,7 +242,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# Configures the scrape request's TLS settings.
tls_config:
@@ -218,7 +255,7 @@ tls_config:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -350,6 +387,7 @@ metric_relabel_configs:
# This is an experimental feature, this behaviour could
# change or be removed in the future.
[ body_size_limit: | default = 0 ]
+
# Per-scrape limit on number of scraped samples that will be accepted.
# If more than this number of samples are present after metric relabeling
# the entire scrape will be treated as failed. 0 means no limit.
@@ -376,6 +414,11 @@ metric_relabel_configs:
# 0 means no limit. This is an experimental feature, this behaviour could
# change in the future.
[ target_limit: | default = 0 ]
+
+# Limit on total number of positive and negative buckets allowed in a single
+# native histogram. If this is exceeded, the entire scrape will be treated as
+# failed. 0 means no limit.
+[ native_histogram_bucket_limit: | default = 0 ]
```
Where `` must be unique across all scrape configurations.
@@ -385,11 +428,16 @@ Where `` must be unique across all scrape configurations.
A `tls_config` allows configuring TLS connections.
```yaml
-# CA certificate to validate API server certificate with.
+# CA certificate to validate API server certificate with. At most one of ca and ca_file is allowed.
+[ ca: ]
[ ca_file: ]
-# Certificate and key files for client cert authentication to the server.
+# Certificate and key for client cert authentication to the server.
+# At most one of cert and cert_file is allowed.
+# At most one of key and key_file is allowed.
+[ cert: ]
[ cert_file: ]
+[ key: ]
[ key_file: ]
# ServerName extension to indicate the name of the server.
@@ -447,7 +495,7 @@ tls_config:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -535,7 +583,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -544,7 +592,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -576,6 +624,8 @@ The following meta labels are available on targets during [relabeling](#relabel_
# The information to access the Consul API. It is to be defined
# as the Consul documentation requires.
[ server: | default = "localhost:8500" ]
+# Prefix for URIs for when consul is behind an API gateway (reverse proxy).
+[ path_prefix: ]
[ token: ]
[ datacenter: ]
# Namespaces are only supported in Consul Enterprise.
@@ -646,7 +696,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -655,7 +705,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -733,7 +783,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -742,7 +792,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -791,7 +841,7 @@ host:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -849,7 +899,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
```
@@ -966,7 +1016,7 @@ host:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -1026,7 +1076,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
```
@@ -1173,7 +1223,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -1182,7 +1232,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -1448,7 +1498,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -1457,7 +1507,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
```
See [this example Prometheus configuration file](/documentation/examples/prometheus-puppetdb.yml)
@@ -1665,7 +1715,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -1674,7 +1724,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -1759,7 +1809,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -1768,7 +1818,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -1842,7 +1892,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -1851,7 +1901,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -2067,7 +2117,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2076,7 +2126,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -2116,7 +2166,7 @@ attach_metadata:
See [this example Prometheus configuration file](/documentation/examples/prometheus-kubernetes.yml)
for a detailed example of configuring Prometheus for Kubernetes.
-You may wish to check out the 3rd party [Prometheus Operator](https://github.com/coreos/prometheus-operator),
+You may wish to check out the 3rd party [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator),
which automates the Prometheus setup on top of Kubernetes.
### ``
@@ -2153,7 +2203,7 @@ server:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2193,7 +2243,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
```
The [relabeling phase](#relabel_config) is the preferred and more powerful way
@@ -2280,7 +2330,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2289,7 +2339,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -2361,7 +2411,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2370,7 +2420,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -2456,7 +2506,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration for connecting to marathon servers
tls_config:
@@ -2469,7 +2519,7 @@ tls_config:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2567,7 +2617,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2576,7 +2626,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -2753,7 +2803,7 @@ tls_config:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2762,7 +2812,7 @@ tls_config:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# Refresh interval to re-read the app instance list.
[ refresh_interval: | default = 30s ]
@@ -2869,7 +2919,7 @@ tags_filter:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# Optional proxy URL.
[ proxy_url: ]
@@ -2878,7 +2928,7 @@ tags_filter:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2954,7 +3004,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -2963,7 +3013,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -3036,7 +3086,7 @@ oauth2:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -3045,7 +3095,7 @@ oauth2:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# TLS configuration.
tls_config:
@@ -3238,7 +3288,7 @@ tls_config:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -3247,7 +3297,7 @@ tls_config:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# List of Azure service discovery configurations.
azure_sd_configs:
@@ -3422,7 +3472,7 @@ authorization:
[ credentials_file: ]
# Optionally configures AWS's Signature Verification 4 signing process to
-# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2.
+# sign requests. Cannot be set at the same time as basic_auth, authorization, oauth2, or azuread.
# To use the default credentials from the AWS SDK, use `sigv4: {}`.
sigv4:
# The AWS region. If blank, the region from the default credentials chain
@@ -3441,10 +3491,20 @@ sigv4:
[ role_arn: ]
# Optional OAuth 2.0 configuration.
-# Cannot be used at the same time as basic_auth, authorization, or sigv4.
+# Cannot be used at the same time as basic_auth, authorization, sigv4, or azuread.
oauth2:
[ ]
+# Optional AzureAD configuration.
+# Cannot be used at the same time as basic_auth, authorization, oauth2, or sigv4.
+azuread:
+ # The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'.
+ [ cloud: | default = AzurePublic ]
+
+ # Azure User-assigned Managed identity.
+ [ managed_identity:
+ [ client_id: ]
+
# Configures the remote write request's TLS settings.
tls_config:
[ ]
@@ -3456,7 +3516,7 @@ tls_config:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -3465,7 +3525,7 @@ tls_config:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# Configures the queue used to write to remote storage.
queue_config:
@@ -3569,7 +3629,7 @@ tls_config:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default: false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -3578,7 +3638,7 @@ tls_config:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default: true ]
# Whether to use the external labels as selectors for the remote read endpoint.
[ filter_external_labels: | default = true ]
diff --git a/docs/configuration/https.md b/docs/configuration/https.md
index 0513612f5..bc83e07a3 100644
--- a/docs/configuration/https.md
+++ b/docs/configuration/https.md
@@ -44,6 +44,13 @@ tls_server_config:
# CA certificate for client certificate authentication to the server.
[ client_ca_file: ]
+ # Verify that the client certificate has a Subject Alternate Name (SAN)
+ # which is an exact match to an entry in this list, else terminate the
+ # connection. SAN match can be one or multiple of the following: DNS,
+ # IP, e-mail, or URI address from https://pkg.go.dev/crypto/x509#Certificate.
+ [ client_allowed_sans:
+ [ -