mirror of https://github.com/prometheus/prometheus
Merge remote-tracking branch 'upstream/main' into merge-release-2.30 and upgrade prometheus/common to v0.31.1
Signed-off-by: Ganesh Vernekar <ganeshvern@gmail.com>pull/9410/head
commit
f69e4590fb
|
@ -47,30 +47,24 @@ jobs:
|
|||
- store_test_results:
|
||||
path: test-results
|
||||
|
||||
test_react:
|
||||
test_ui:
|
||||
executor: golang
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v3-npm-deps-{{ checksum "web/ui/react-app/package-lock.json" }}
|
||||
- v3-npm-deps-{{ checksum "web/ui/package-lock.json" }}
|
||||
- v3-npm-deps-
|
||||
- run:
|
||||
command: make react-app-test
|
||||
- run: make ui-install
|
||||
- run: make ui-lint
|
||||
- run: make ui-build-module
|
||||
- run: make ui-test
|
||||
- save_cache:
|
||||
key: v3-npm-deps-{{ checksum "web/ui/react-app/package-lock.json" }}
|
||||
key: v3-npm-deps-{{ checksum "web/ui/package-lock.json" }}
|
||||
paths:
|
||||
- ~/.npm
|
||||
|
||||
test_web_module:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- run: make web-module-install
|
||||
- run: make web-module-test
|
||||
- run: make web-module-lint
|
||||
|
||||
test_windows:
|
||||
executor:
|
||||
name: win/default
|
||||
|
@ -126,6 +120,7 @@ jobs:
|
|||
steps:
|
||||
- checkout
|
||||
- run: ./scripts/sync_repo_files.sh
|
||||
- run: ./scripts/sync_codemirror.sh
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
|
@ -135,11 +130,7 @@ workflows:
|
|||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
- test_react:
|
||||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
- test_web_module:
|
||||
- test_ui:
|
||||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
|
@ -165,7 +156,7 @@ workflows:
|
|||
context: org-context
|
||||
requires:
|
||||
- test_go
|
||||
- test_react
|
||||
- test_ui
|
||||
- build
|
||||
filters:
|
||||
branches:
|
||||
|
@ -175,7 +166,7 @@ workflows:
|
|||
context: org-context
|
||||
requires:
|
||||
- test_go
|
||||
- test_react
|
||||
- test_ui
|
||||
- build
|
||||
filters:
|
||||
tags:
|
||||
|
|
|
@ -4,3 +4,4 @@
|
|||
/discovery/kubernetes @brancz
|
||||
/tsdb @codesome
|
||||
/promql @codesome @roidelapluie
|
||||
/cmd/promtool @jessicagreben @dgl
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
name: golangci-lint
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "go.sum"
|
||||
- "go.mod"
|
||||
- "**.go"
|
||||
- "scripts/errcheck_excludes.txt"
|
||||
- ".github/workflows/golangci-lint.yml"
|
||||
pull_request:
|
||||
paths:
|
||||
- "go.sum"
|
||||
- "go.mod"
|
||||
- "**.go"
|
||||
- "scripts/errcheck_excludes.txt"
|
||||
- ".github/workflows/golangci-lint.yml"
|
||||
|
||||
jobs:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: v1.42.0
|
|
@ -25,3 +25,5 @@ npm_licenses.tar.bz2
|
|||
|
||||
/vendor
|
||||
/.build
|
||||
|
||||
/**/node_modules
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
FROM gitpod/workspace-full
|
||||
|
||||
ENV CUSTOM_NODE_VERSION=16
|
||||
|
||||
RUN bash -c ". .nvm/nvm.sh && nvm install ${CUSTOM_NODE_VERSION} && nvm use ${CUSTOM_NODE_VERSION} && nvm alias default ${CUSTOM_NODE_VERSION}"
|
||||
|
||||
RUN echo "nvm use default &>/dev/null" >> ~/.bashrc.d/51-nvm-fix
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
image:
|
||||
file: .gitpod.Dockerfile
|
||||
tasks:
|
||||
- init:
|
||||
make build
|
||||
|
@ -6,7 +7,7 @@ tasks:
|
|||
gp sync-done build
|
||||
./prometheus --config.file=documentation/examples/prometheus.yml
|
||||
- command: |
|
||||
cd web/ui/react-app
|
||||
cd web/ui/
|
||||
gp sync-await build
|
||||
unset BROWSER
|
||||
export DANGEROUSLY_DISABLE_HOST_CHECK=true
|
||||
|
|
|
@ -24,3 +24,4 @@ rules:
|
|||
.github/workflows/funcbench.yml
|
||||
.github/workflows/fuzzing.yml
|
||||
.github/workflows/prombench.yml
|
||||
.github/workflows/golangci-lint.yml
|
||||
|
|
|
@ -52,7 +52,7 @@ All our issues are regularly tagged so that you can also filter down the issues
|
|||
|
||||
* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
|
||||
|
||||
* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://web.libera.chat/?channels=#prometheus) on irc.libera.chat (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)).
|
||||
* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on the IRC channel [#prometheus-dev](https://web.libera.chat/?channels=#prometheus-dev) on irc.libera.chat (for the easiest start, [join via Element](https://app.element.io/#/room/#prometheus-dev:matrix.org)).
|
||||
|
||||
* Add tests relevant to the fixed bug or new feature.
|
||||
|
||||
|
@ -64,10 +64,10 @@ To add or update a new dependency, use the `go get` command:
|
|||
|
||||
```bash
|
||||
# Pick the latest tagged release.
|
||||
go get example.com/some/module/pkg
|
||||
go install example.com/some/module/pkg@latest
|
||||
|
||||
# Pick a specific version.
|
||||
go get example.com/some/module/pkg@vX.Y.Z
|
||||
go install example.com/some/module/pkg@vX.Y.Z
|
||||
```
|
||||
|
||||
Tidy up the `go.mod` and `go.sum` files:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) is the main/default maintainer, some parts of the codebase have other maintainers:
|
||||
|
||||
* `cmd`
|
||||
* `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl)
|
||||
* `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl), Jessica Grebenschikov (<jessica.greben1@gmail.com> / @jessicagreben)
|
||||
* `discovery`
|
||||
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
|
||||
* `documentation`
|
||||
|
|
71
Makefile
71
Makefile
|
@ -14,13 +14,9 @@
|
|||
# Needs to be defined before including Makefile.common to auto-generate targets
|
||||
DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x
|
||||
|
||||
WEB_MODULE_PATH = web/ui/module
|
||||
REACT_APP_PATH = web/ui/react-app
|
||||
REACT_APP_SOURCE_FILES = $(shell find $(REACT_APP_PATH)/public/ $(REACT_APP_PATH)/src/ $(REACT_APP_PATH)/tsconfig.json)
|
||||
REACT_APP_OUTPUT_DIR = web/ui/static/react
|
||||
REACT_APP_NODE_MODULES_PATH = $(REACT_APP_PATH)/node_modules
|
||||
UI_PATH = web/ui
|
||||
UI_NODE_MODULES_PATH = $(UI_PATH)/node_modules
|
||||
REACT_APP_NPM_LICENSES_TARBALL = "npm_licenses.tar.bz2"
|
||||
REACT_APP_BUILD_SCRIPT = ./scripts/build_react_app.sh
|
||||
|
||||
PROMTOOL = ./promtool
|
||||
TSDB_BENCHMARK_NUM_METRICS ?= 1000
|
||||
|
@ -33,15 +29,28 @@ include Makefile.common
|
|||
|
||||
DOCKER_IMAGE_NAME ?= prometheus
|
||||
|
||||
$(REACT_APP_NODE_MODULES_PATH): $(REACT_APP_PATH)/package.json $(REACT_APP_PATH)/package-lock.json
|
||||
cd $(REACT_APP_PATH) && npm ci
|
||||
.PHONY: ui-install
|
||||
ui-install:
|
||||
cd $(UI_PATH) && npm install
|
||||
|
||||
$(REACT_APP_OUTPUT_DIR): $(REACT_APP_NODE_MODULES_PATH) $(REACT_APP_SOURCE_FILES) $(REACT_APP_BUILD_SCRIPT)
|
||||
@echo ">> building React app"
|
||||
@$(REACT_APP_BUILD_SCRIPT)
|
||||
.PHONY: ui-build
|
||||
ui-build:
|
||||
cd $(UI_PATH) && npm run build
|
||||
|
||||
.PHONY: ui-build-module
|
||||
ui-build-module:
|
||||
cd $(UI_PATH) && npm run build:module
|
||||
|
||||
.PHONY: ui-test
|
||||
ui-test:
|
||||
cd $(UI_PATH) && npm run test:coverage
|
||||
|
||||
.PHONY: ui-lint
|
||||
ui-lint:
|
||||
cd $(UI_PATH) && npm run lint
|
||||
|
||||
.PHONY: assets
|
||||
assets: web-module-install web-module-build $(REACT_APP_OUTPUT_DIR)
|
||||
assets: ui-install ui-build
|
||||
@echo ">> writing assets"
|
||||
# Un-setting GOOS and GOARCH here because the generated Go code is always the same,
|
||||
# but the cached object code is incompatible between architectures and OSes (which
|
||||
|
@ -49,52 +58,20 @@ assets: web-module-install web-module-build $(REACT_APP_OUTPUT_DIR)
|
|||
cd web/ui && GO111MODULE=$(GO111MODULE) GOOS= GOARCH= $(GO) generate -x -v $(GOOPTS)
|
||||
@$(GOFMT) -w ./web/ui
|
||||
|
||||
.PHONY: react-app-lint
|
||||
react-app-lint:
|
||||
@echo ">> running React app linting"
|
||||
cd $(REACT_APP_PATH) && npm run lint:ci
|
||||
|
||||
.PHONY: react-app-lint-fix
|
||||
react-app-lint-fix:
|
||||
@echo ">> running React app linting and fixing errors where possible"
|
||||
cd $(REACT_APP_PATH) && npm run lint
|
||||
|
||||
.PHONY: react-app-test
|
||||
react-app-test: | $(REACT_APP_NODE_MODULES_PATH) react-app-lint
|
||||
@echo ">> running React app tests"
|
||||
cd $(REACT_APP_PATH) && npm run test --no-watch --coverage
|
||||
|
||||
.PHONY: web-module-build
|
||||
web-module-build:
|
||||
@cd ${WEB_MODULE_PATH} && ./build.sh --build
|
||||
|
||||
.PHONY: web-module-lint
|
||||
web-module-lint:
|
||||
@cd ${WEB_MODULE_PATH} && ./build.sh --lint
|
||||
|
||||
.PHONY: web-module-test
|
||||
web-module-test:
|
||||
@cd ${WEB_MODULE_PATH} && ./build.sh --test
|
||||
|
||||
.PHONY: web-module-install
|
||||
web-module-install:
|
||||
@cd ${WEB_MODULE_PATH} && ./build.sh --install
|
||||
|
||||
.PHONY: test
|
||||
# If we only want to only test go code we have to change the test target
|
||||
# which is called by all.
|
||||
ifeq ($(GO_ONLY),1)
|
||||
test: common-test
|
||||
else
|
||||
test: common-test react-app-test web-module-test web-module-lint
|
||||
test: common-test ui-build-module ui-test ui-lint
|
||||
endif
|
||||
|
||||
|
||||
.PHONY: npm_licenses
|
||||
npm_licenses: $(REACT_APP_NODE_MODULES_PATH)
|
||||
npm_licenses: ui-install
|
||||
@echo ">> bundling npm licenses"
|
||||
rm -f $(REACT_APP_NPM_LICENSES_TARBALL)
|
||||
find $(REACT_APP_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --transform 's/^/npm_licenses\//' --files-from=-
|
||||
find $(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --transform 's/^/npm_licenses\//' --files-from=-
|
||||
|
||||
.PHONY: tarball
|
||||
tarball: npm_licenses common-tarball
|
||||
|
|
|
@ -83,12 +83,18 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.39.0
|
||||
GOLANGCI_LINT_VERSION ?= v1.42.0
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||
# If we're in CI and there is an Actions file, that means the linter
|
||||
# is being run in Actions, so we don't need to run it here.
|
||||
ifeq (,$(CIRCLE_JOB))
|
||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
|
||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ Prometheus will now be reachable at http://localhost:9090/.
|
|||
|
||||
### Building from source
|
||||
|
||||
To build Prometheus from source code, first ensure that have a working
|
||||
To build Prometheus from source code, first ensure that you have a working
|
||||
Go environment with [version 1.14 or greater installed](https://golang.org/doc/install).
|
||||
You also need [Node.js](https://nodejs.org/) and [npm](https://www.npmjs.com/)
|
||||
installed in order to build the frontend assets.
|
||||
|
|
|
@ -137,6 +137,7 @@ func main() {
|
|||
analyzePath := tsdbAnalyzeCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||
analyzeBlockID := tsdbAnalyzeCmd.Arg("block id", "Block to analyze (default is the last block).").String()
|
||||
analyzeLimit := tsdbAnalyzeCmd.Flag("limit", "How many items to show in each list.").Default("20").Int()
|
||||
analyzeRunExtended := tsdbAnalyzeCmd.Flag("extended", "Run extended analysis.").Bool()
|
||||
|
||||
tsdbListCmd := tsdbCmd.Command("list", "List tsdb blocks.")
|
||||
listHumanReadable := tsdbListCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool()
|
||||
|
@ -237,7 +238,7 @@ func main() {
|
|||
os.Exit(checkErr(benchmarkWrite(*benchWriteOutPath, *benchSamplesFile, *benchWriteNumMetrics, *benchWriteNumScrapes)))
|
||||
|
||||
case tsdbAnalyzeCmd.FullCommand():
|
||||
os.Exit(checkErr(analyzeBlock(*analyzePath, *analyzeBlockID, *analyzeLimit)))
|
||||
os.Exit(checkErr(analyzeBlock(*analyzePath, *analyzeBlockID, *analyzeLimit, *analyzeRunExtended)))
|
||||
|
||||
case tsdbListCmd.FullCommand():
|
||||
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
|
||||
|
|
|
@ -418,7 +418,7 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
|
|||
return db, block, nil
|
||||
}
|
||||
|
||||
func analyzeBlock(path, blockID string, limit int) error {
|
||||
func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
||||
db, block, err := openBlock(path, blockID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -564,7 +564,11 @@ func analyzeBlock(path, blockID string, limit int) error {
|
|||
fmt.Printf("\nHighest cardinality metric names:\n")
|
||||
printInfo(postingInfos)
|
||||
|
||||
return analyzeCompaction(block, ir)
|
||||
if runExtended {
|
||||
return analyzeCompaction(block, ir)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err error) {
|
||||
|
|
|
@ -45,6 +45,7 @@ import (
|
|||
"github.com/prometheus/prometheus/discovery/marathon"
|
||||
"github.com/prometheus/prometheus/discovery/moby"
|
||||
"github.com/prometheus/prometheus/discovery/openstack"
|
||||
"github.com/prometheus/prometheus/discovery/puppetdb"
|
||||
"github.com/prometheus/prometheus/discovery/scaleway"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/discovery/triton"
|
||||
|
@ -790,6 +791,34 @@ var expectedConf = &Config{
|
|||
}},
|
||||
},
|
||||
},
|
||||
{
|
||||
JobName: "service-puppetdb",
|
||||
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
|
||||
ServiceDiscoveryConfigs: discovery.Configs{&puppetdb.SDConfig{
|
||||
URL: "https://puppetserver/",
|
||||
Query: "resources { type = \"Package\" and title = \"httpd\" }",
|
||||
IncludeParameters: true,
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
FollowRedirects: true,
|
||||
TLSConfig: config.TLSConfig{
|
||||
CAFile: "testdata/valid_ca_file",
|
||||
CertFile: "testdata/valid_cert_file",
|
||||
KeyFile: "testdata/valid_key_file",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
JobName: "hetzner",
|
||||
HonorTimestamps: true,
|
||||
|
@ -1262,6 +1291,22 @@ var expectedErrors = []struct {
|
|||
filename: "empty_static_config.bad.yml",
|
||||
errMsg: "empty or null section in static_configs",
|
||||
},
|
||||
{
|
||||
filename: "puppetdb_no_query.bad.yml",
|
||||
errMsg: "query missing",
|
||||
},
|
||||
{
|
||||
filename: "puppetdb_no_url.bad.yml",
|
||||
errMsg: "URL is missing",
|
||||
},
|
||||
{
|
||||
filename: "puppetdb_bad_url.bad.yml",
|
||||
errMsg: "host is missing in URL",
|
||||
},
|
||||
{
|
||||
filename: "puppetdb_no_scheme.bad.yml",
|
||||
errMsg: "URL scheme must be 'http' or 'https'",
|
||||
},
|
||||
{
|
||||
filename: "hetzner_role.bad.yml",
|
||||
errMsg: "unknown role",
|
||||
|
|
|
@ -307,6 +307,18 @@ scrape_configs:
|
|||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
- job_name: service-puppetdb
|
||||
puppetdb_sd_configs:
|
||||
- url: https://puppetserver/
|
||||
query: 'resources { type = "Package" and title = "httpd" }'
|
||||
include_parameters: true
|
||||
port: 80
|
||||
refresh_interval: 1m
|
||||
tls_config:
|
||||
ca_file: valid_ca_file
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
- job_name: hetzner
|
||||
hetzner_sd_configs:
|
||||
- role: hcloud
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
scrape_configs:
|
||||
- puppetdb_sd_configs:
|
||||
- url: http://
|
||||
query: 'resources { type = "Package" and title = "httpd" }'
|
|
@ -0,0 +1,3 @@
|
|||
scrape_configs:
|
||||
- puppetdb_sd_configs:
|
||||
- url: http://puppetserver/
|
|
@ -0,0 +1,4 @@
|
|||
scrape_configs:
|
||||
- puppetdb_sd_configs:
|
||||
- url: ftp://puppet
|
||||
query: 'resources { type = "Package" and title = "httpd" }'
|
|
@ -0,0 +1,3 @@
|
|||
scrape_configs:
|
||||
- puppetdb_sd_configs:
|
||||
- query: 'resources { type = "Package" and title = "httpd" }'
|
|
@ -199,7 +199,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithHTTP2Disabled(), config.WithIdleConnTimeout(2*watchTimeout))
|
||||
wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithIdleConnTimeout(2*watchTimeout))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
port: conf.Port,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "digitalocean_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ type Discovery struct {
|
|||
|
||||
// NewDiscovery creates a new Eureka discovery for the given role.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, er
|
|||
port: conf.Port,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro
|
|||
endpoint: conf.robotEndpoint,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "hetzner_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", config.WithHTTP2Disabled())
|
||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
_ "github.com/prometheus/prometheus/discovery/marathon" // register marathon
|
||||
_ "github.com/prometheus/prometheus/discovery/moby" // register moby
|
||||
_ "github.com/prometheus/prometheus/discovery/openstack" // register openstack
|
||||
_ "github.com/prometheus/prometheus/discovery/puppetdb" // register puppetdb
|
||||
_ "github.com/prometheus/prometheus/discovery/scaleway" // register scaleway
|
||||
_ "github.com/prometheus/prometheus/discovery/triton" // register triton
|
||||
_ "github.com/prometheus/prometheus/discovery/xds" // register xds
|
||||
|
|
|
@ -283,7 +283,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
|||
}
|
||||
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
|
||||
} else {
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -132,7 +132,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||
eventPollingEnabled: true,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "linode_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "linode_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ type Discovery struct {
|
|||
|
||||
// NewDiscovery returns a new Marathon Discovery.
|
||||
func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger) (*DockerDiscove
|
|||
// unix, which are not supported by the HTTP client. Passing HTTP client
|
||||
// options to the Docker client makes those non-HTTP requests fail.
|
||||
if hostURL.Scheme == "http" || hostURL.Scheme == "https" {
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "docker_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "docker_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -146,7 +146,7 @@ func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger) (*Discovery, err
|
|||
// unix, which are not supported by the HTTP client. Passing HTTP client
|
||||
// options to the Docker client makes those non-HTTP requests fail.
|
||||
if hostURL.Scheme == "http" || hostURL.Scheme == "https" {
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "dockerswarm_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
[
|
||||
{
|
||||
"certname": "edinburgh.example.com",
|
||||
"environment": "prod",
|
||||
"exported": false,
|
||||
"file": "/etc/puppetlabs/code/environments/prod/modules/upstream/apache/manifests/init.pp",
|
||||
"line": 384,
|
||||
"parameters": {
|
||||
"access_log": true,
|
||||
"access_log_file": "ssl_access_log",
|
||||
"additional_includes": [ ],
|
||||
"directoryindex": "",
|
||||
"docroot": "/var/www/html",
|
||||
"ensure": "absent",
|
||||
"options": [
|
||||
"Indexes",
|
||||
"FollowSymLinks",
|
||||
"MultiViews"
|
||||
],
|
||||
"php_flags": { },
|
||||
"labels": {
|
||||
"alias": "edinburgh"
|
||||
},
|
||||
"scriptaliases": [
|
||||
{
|
||||
"alias": "/cgi-bin",
|
||||
"path": "/var/www/cgi-bin"
|
||||
}
|
||||
]
|
||||
},
|
||||
"resource": "49af83866dc5a1518968b68e58a25319107afe11",
|
||||
"tags": [
|
||||
"roles::hypervisor",
|
||||
"apache",
|
||||
"apache::vhost",
|
||||
"class",
|
||||
"default-ssl",
|
||||
"profile_hypervisor",
|
||||
"vhost",
|
||||
"profile_apache",
|
||||
"hypervisor",
|
||||
"__node_regexp__edinburgh",
|
||||
"roles",
|
||||
"node"
|
||||
],
|
||||
"title": "default-ssl",
|
||||
"type": "Apache::Vhost"
|
||||
}
|
||||
]
|
|
@ -0,0 +1,252 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package puppetdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/version"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
pdbLabel = model.MetaLabelPrefix + "puppetdb_"
|
||||
pdbLabelCertname = pdbLabel + "certname"
|
||||
pdbLabelResource = pdbLabel + "resource"
|
||||
pdbLabelType = pdbLabel + "type"
|
||||
pdbLabelTitle = pdbLabel + "title"
|
||||
pdbLabelExported = pdbLabel + "exported"
|
||||
pdbLabelTags = pdbLabel + "tags"
|
||||
pdbLabelFile = pdbLabel + "file"
|
||||
pdbLabelEnvironment = pdbLabel + "environment"
|
||||
pdbLabelParameter = pdbLabel + "parameter_"
|
||||
separator = ","
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultSDConfig is the default PuppetDB SD configuration.
|
||||
DefaultSDConfig = SDConfig{
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
Port: 80,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
}
|
||||
matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`)
|
||||
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
)
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
}
|
||||
|
||||
// SDConfig is the configuration for PuppetDB based discovery.
|
||||
type SDConfig struct {
|
||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
|
||||
URL string `yaml:"url"`
|
||||
Query string `yaml:"query"`
|
||||
IncludeParameters bool `yaml:"include_parameters"`
|
||||
Port int `yaml:"port"`
|
||||
}
|
||||
|
||||
// Name returns the name of the Config.
|
||||
func (*SDConfig) Name() string { return "puppetdb" }
|
||||
|
||||
// NewDiscoverer returns a Discoverer for the Config.
|
||||
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
return NewDiscovery(c, opts.Logger)
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
func (c *SDConfig) SetDirectory(dir string) {
|
||||
c.HTTPClientConfig.SetDirectory(dir)
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultSDConfig
|
||||
type plain SDConfig
|
||||
err := unmarshal((*plain)(c))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.URL == "" {
|
||||
return fmt.Errorf("URL is missing")
|
||||
}
|
||||
parsedURL, err := url.Parse(c.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
|
||||
return fmt.Errorf("URL scheme must be 'http' or 'https'")
|
||||
}
|
||||
if parsedURL.Host == "" {
|
||||
return fmt.Errorf("host is missing in URL")
|
||||
}
|
||||
if c.Query == "" {
|
||||
return fmt.Errorf("query missing")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Discovery provides service discovery functionality based
|
||||
// on PuppetDB resources.
|
||||
type Discovery struct {
|
||||
*refresh.Discovery
|
||||
url string
|
||||
query string
|
||||
port int
|
||||
includeParameters bool
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewDiscovery returns a new PuppetDB discovery for the given config.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.Timeout = time.Duration(conf.RefreshInterval)
|
||||
|
||||
u, err := url.Parse(conf.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.Path = path.Join(u.Path, "pdb/query/v4")
|
||||
|
||||
d := &Discovery{
|
||||
url: u.String(),
|
||||
port: conf.Port,
|
||||
query: conf.Query,
|
||||
includeParameters: conf.IncludeParameters,
|
||||
client: client,
|
||||
}
|
||||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
logger,
|
||||
"http",
|
||||
time.Duration(conf.RefreshInterval),
|
||||
d.refresh,
|
||||
)
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
body := struct {
|
||||
Query string `json:"query"`
|
||||
}{d.query}
|
||||
bodyBytes, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", d.url, bytes.NewBuffer(bodyBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := d.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, errors.Errorf("server returned HTTP status %s", resp.Status)
|
||||
}
|
||||
|
||||
if ct := resp.Header.Get("Content-Type"); !matchContentType.MatchString(ct) {
|
||||
return nil, errors.Errorf("unsupported content type %s", resp.Header.Get("Content-Type"))
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resources []Resource
|
||||
|
||||
if err := json.Unmarshal(b, &resources); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tg := &targetgroup.Group{
|
||||
// Use a pseudo-URL as source.
|
||||
Source: d.url + "?query=" + d.query,
|
||||
}
|
||||
|
||||
for _, resource := range resources {
|
||||
labels := model.LabelSet{
|
||||
pdbLabelCertname: model.LabelValue(resource.Certname),
|
||||
pdbLabelResource: model.LabelValue(resource.Resource),
|
||||
pdbLabelType: model.LabelValue(resource.Type),
|
||||
pdbLabelTitle: model.LabelValue(resource.Title),
|
||||
pdbLabelExported: model.LabelValue(fmt.Sprintf("%t", resource.Exported)),
|
||||
pdbLabelFile: model.LabelValue(resource.File),
|
||||
pdbLabelEnvironment: model.LabelValue(resource.Environment),
|
||||
}
|
||||
|
||||
addr := net.JoinHostPort(resource.Certname, strconv.FormatUint(uint64(d.port), 10))
|
||||
labels[model.AddressLabel] = model.LabelValue(addr)
|
||||
|
||||
if len(resource.Tags) > 0 {
|
||||
// We surround the separated list with the separator as well. This way regular expressions
|
||||
// in relabeling rules don't have to consider tag positions.
|
||||
tags := separator + strings.Join(resource.Tags, separator) + separator
|
||||
labels[pdbLabelTags] = model.LabelValue(tags)
|
||||
}
|
||||
|
||||
// Parameters are not included by default. This should only be enabled
|
||||
// on select resources as it might expose secrets on the Prometheus UI
|
||||
// for certain resources.
|
||||
if d.includeParameters {
|
||||
for k, v := range resource.Parameters.toLabels() {
|
||||
labels[pdbLabelParameter+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
tg.Targets = append(tg.Targets, labels)
|
||||
}
|
||||
|
||||
return []*targetgroup.Group{tg}, nil
|
||||
}
|
|
@ -0,0 +1,195 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package puppetdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func mockServer(t *testing.T) *httptest.Server {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var request struct {
|
||||
Query string `json:"query"`
|
||||
}
|
||||
err := json.NewDecoder(r.Body).Decode(&request)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
http.ServeFile(w, r, "fixtures/"+request.Query+".json")
|
||||
}))
|
||||
t.Cleanup(ts.Close)
|
||||
return ts
|
||||
}
|
||||
|
||||
func TestPuppetSlashInURL(t *testing.T) {
|
||||
tests := map[string]string{
|
||||
"https://puppetserver": "https://puppetserver/pdb/query/v4",
|
||||
"https://puppetserver/": "https://puppetserver/pdb/query/v4",
|
||||
"http://puppetserver:8080/": "http://puppetserver:8080/pdb/query/v4",
|
||||
"http://puppetserver:8080": "http://puppetserver:8080/pdb/query/v4",
|
||||
}
|
||||
|
||||
for serverURL, apiURL := range tests {
|
||||
cfg := SDConfig{
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
URL: serverURL,
|
||||
Query: "vhosts", // This is not a valid PuppetDB query, but it is used by the mock.
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, apiURL, d.url)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPuppetDBRefresh(t *testing.T) {
|
||||
ts := mockServer(t)
|
||||
|
||||
cfg := SDConfig{
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
URL: ts.URL,
|
||||
Query: "vhosts", // This is not a valid PuppetDB query, but it is used by the mock.
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
tgs, err := d.refresh(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedTargets := []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("edinburgh.example.com:80"),
|
||||
model.LabelName("__meta_puppetdb_certname"): model.LabelValue("edinburgh.example.com"),
|
||||
model.LabelName("__meta_puppetdb_environment"): model.LabelValue("prod"),
|
||||
model.LabelName("__meta_puppetdb_exported"): model.LabelValue("false"),
|
||||
model.LabelName("__meta_puppetdb_file"): model.LabelValue("/etc/puppetlabs/code/environments/prod/modules/upstream/apache/manifests/init.pp"),
|
||||
model.LabelName("__meta_puppetdb_resource"): model.LabelValue("49af83866dc5a1518968b68e58a25319107afe11"),
|
||||
model.LabelName("__meta_puppetdb_tags"): model.LabelValue(",roles::hypervisor,apache,apache::vhost,class,default-ssl,profile_hypervisor,vhost,profile_apache,hypervisor,__node_regexp__edinburgh,roles,node,"),
|
||||
model.LabelName("__meta_puppetdb_title"): model.LabelValue("default-ssl"),
|
||||
model.LabelName("__meta_puppetdb_type"): model.LabelValue("Apache::Vhost"),
|
||||
},
|
||||
},
|
||||
Source: ts.URL + "/pdb/query/v4?query=vhosts",
|
||||
},
|
||||
}
|
||||
require.Equal(t, tgs, expectedTargets)
|
||||
}
|
||||
|
||||
func TestPuppetDBRefreshWithParameters(t *testing.T) {
|
||||
ts := mockServer(t)
|
||||
|
||||
cfg := SDConfig{
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
URL: ts.URL,
|
||||
Query: "vhosts", // This is not a valid PuppetDB query, but it is used by the mock.
|
||||
Port: 80,
|
||||
IncludeParameters: true,
|
||||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
tgs, err := d.refresh(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedTargets := []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
model.AddressLabel: model.LabelValue("edinburgh.example.com:80"),
|
||||
model.LabelName("__meta_puppetdb_certname"): model.LabelValue("edinburgh.example.com"),
|
||||
model.LabelName("__meta_puppetdb_environment"): model.LabelValue("prod"),
|
||||
model.LabelName("__meta_puppetdb_exported"): model.LabelValue("false"),
|
||||
model.LabelName("__meta_puppetdb_file"): model.LabelValue("/etc/puppetlabs/code/environments/prod/modules/upstream/apache/manifests/init.pp"),
|
||||
model.LabelName("__meta_puppetdb_parameter_access_log"): model.LabelValue("true"),
|
||||
model.LabelName("__meta_puppetdb_parameter_access_log_file"): model.LabelValue("ssl_access_log"),
|
||||
model.LabelName("__meta_puppetdb_parameter_docroot"): model.LabelValue("/var/www/html"),
|
||||
model.LabelName("__meta_puppetdb_parameter_ensure"): model.LabelValue("absent"),
|
||||
model.LabelName("__meta_puppetdb_parameter_labels_alias"): model.LabelValue("edinburgh"),
|
||||
model.LabelName("__meta_puppetdb_parameter_options"): model.LabelValue("Indexes,FollowSymLinks,MultiViews"),
|
||||
model.LabelName("__meta_puppetdb_resource"): model.LabelValue("49af83866dc5a1518968b68e58a25319107afe11"),
|
||||
model.LabelName("__meta_puppetdb_tags"): model.LabelValue(",roles::hypervisor,apache,apache::vhost,class,default-ssl,profile_hypervisor,vhost,profile_apache,hypervisor,__node_regexp__edinburgh,roles,node,"),
|
||||
model.LabelName("__meta_puppetdb_title"): model.LabelValue("default-ssl"),
|
||||
model.LabelName("__meta_puppetdb_type"): model.LabelValue("Apache::Vhost"),
|
||||
},
|
||||
},
|
||||
Source: ts.URL + "/pdb/query/v4?query=vhosts",
|
||||
},
|
||||
}
|
||||
require.Equal(t, tgs, expectedTargets)
|
||||
}
|
||||
|
||||
func TestPuppetDBInvalidCode(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
}))
|
||||
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
cfg := SDConfig{
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
URL: ts.URL,
|
||||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = d.refresh(ctx)
|
||||
require.EqualError(t, err, "server returned HTTP status 400 Bad Request")
|
||||
}
|
||||
|
||||
func TestPuppetDBInvalidFormat(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, "{}")
|
||||
}))
|
||||
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
cfg := SDConfig{
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
URL: ts.URL,
|
||||
RefreshInterval: model.Duration(30 * time.Second),
|
||||
}
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = d.refresh(ctx)
|
||||
require.EqualError(t, err, "unsupported content type text/plain; charset=utf-8")
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package puppetdb
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
)
|
||||
|
||||
type Resource struct {
|
||||
Certname string `json:"certname"`
|
||||
Resource string `json:"resource"`
|
||||
Type string `json:"type"`
|
||||
Title string `json:"title"`
|
||||
Exported bool `json:"exported"`
|
||||
Tags []string `json:"tags"`
|
||||
File string `json:"file"`
|
||||
Environment string `json:"environment"`
|
||||
Parameters Parameters `json:"parameters"`
|
||||
}
|
||||
|
||||
type Parameters map[string]interface{}
|
||||
|
||||
func (p *Parameters) toLabels() model.LabelSet {
|
||||
labels := model.LabelSet{}
|
||||
|
||||
for k, v := range *p {
|
||||
var labelValue string
|
||||
switch value := v.(type) {
|
||||
case string:
|
||||
labelValue = value
|
||||
case bool:
|
||||
labelValue = strconv.FormatBool(value)
|
||||
case []string:
|
||||
labelValue = separator + strings.Join(value, separator) + separator
|
||||
case []interface{}:
|
||||
if len(value) == 0 {
|
||||
continue
|
||||
}
|
||||
values := make([]string, len(value))
|
||||
for i, v := range value {
|
||||
switch value := v.(type) {
|
||||
case string:
|
||||
values[i] = value
|
||||
case bool:
|
||||
values[i] = strconv.FormatBool(value)
|
||||
case []string:
|
||||
values[i] = separator + strings.Join(value, separator) + separator
|
||||
}
|
||||
}
|
||||
labelValue = strings.Join(values, separator)
|
||||
case map[string]interface{}:
|
||||
subParameter := Parameters(value)
|
||||
prefix := strutil.SanitizeLabelName(k + "_")
|
||||
for subk, subv := range subParameter.toLabels() {
|
||||
labels[model.LabelName(prefix)+subk] = subv
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
if labelValue == "" {
|
||||
continue
|
||||
}
|
||||
name := strutil.SanitizeLabelName(k)
|
||||
labels[model.LabelName(name)] = model.LabelValue(labelValue)
|
||||
}
|
||||
return labels
|
||||
}
|
|
@ -70,7 +70,7 @@ func newBaremetalDiscovery(conf *SDConfig) (*baremetalDiscovery, error) {
|
|||
tagsFilter: conf.TagsFilter,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ func newInstanceDiscovery(conf *SDConfig) (*instanceDiscovery, error) {
|
|||
tagsFilter: conf.TagsFilter,
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd", config.WithHTTP2Disabled())
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "scaleway_sd")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ func NewHTTPResourceClient(conf *HTTPResourceClientConfig, protocolVersion Proto
|
|||
endpointURL.RawQuery = conf.ExtraQueryParams.Encode()
|
||||
}
|
||||
|
||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, conf.Name, config.WithHTTP2Disabled(), config.WithIdleConnTimeout(conf.Timeout))
|
||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, conf.Name, config.WithIdleConnTimeout(conf.Timeout))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -272,6 +272,10 @@ nerve_sd_configs:
|
|||
openstack_sd_configs:
|
||||
[ - <openstack_sd_config> ... ]
|
||||
|
||||
# List of PuppetDB service discovery configurations.
|
||||
puppetdb_sd_configs:
|
||||
[ - <puppetdb_sd_config> ... ]
|
||||
|
||||
# List of Scaleway service discovery configurations.
|
||||
scaleway_sd_configs:
|
||||
[ - <scaleway_sd_config> ... ]
|
||||
|
@ -1069,6 +1073,94 @@ tls_config:
|
|||
[ <tls_config> ]
|
||||
```
|
||||
|
||||
### `<puppetdb_sd_config>`
|
||||
|
||||
PuppetDB SD configurations allow retrieving scrape targets from
|
||||
[PuppetDB](https://puppet.com/docs/puppetdb/latest/index.html) resources.
|
||||
|
||||
This SD discovers resources and will create a target for each resource returned
|
||||
by the API.
|
||||
|
||||
The resource address is the `certname` of the resource and can be changed during
|
||||
[relabeling](#relabel_config).
|
||||
|
||||
The following meta labels are available on targets during [relabeling](#relabel_config):
|
||||
|
||||
* `__meta_puppetdb_certname`: the name of the node associated with the resource
|
||||
* `__meta_puppetdb_resource`: a SHA-1 hash of the resource’s type, title, and parameters, for identification
|
||||
* `__meta_puppetdb_type`: the resource type
|
||||
* `__meta_puppetdb_title`: the resource title
|
||||
* `__meta_puppetdb_exported`: whether the resource is exported (`"true"` or `"false"`)
|
||||
* `__meta_puppetdb_tags`: comma separated list of resource tags
|
||||
* `__meta_puppetdb_file`: the manifest file in which the resource was declared
|
||||
* `__meta_puppetdb_environment`: the environment of the node associated with the resource
|
||||
* `__meta_puppetdb_parameter_<parametername>`: the parameters of the resource
|
||||
|
||||
|
||||
See below for the configuration options for PuppetDB discovery:
|
||||
|
||||
```yaml
|
||||
# The URL of the PuppetDB root query endpoint.
|
||||
url: <string>
|
||||
|
||||
# Puppet Query Language (PQL) query. Only resources are supported.
|
||||
# https://puppet.com/docs/puppetdb/latest/api/query/v4/pql.html
|
||||
query: <string>
|
||||
|
||||
# Whether to include the parameters as meta labels.
|
||||
# Due to the differences between parameter types and Prometheus labels,
|
||||
# some parameters might not be rendered. The format of the parameters might
|
||||
# also change in future releases.
|
||||
#
|
||||
# Note: Enabling this exposes parameters in the Prometheus UI and API. Make sure
|
||||
# that you don't have secrets exposed as parameters if you enable this.
|
||||
[ include_parameters: <boolean> | default = false ]
|
||||
|
||||
# Refresh interval to re-read the resources list.
|
||||
[ refresh_interval: <duration> | default = 60s ]
|
||||
|
||||
# The port to scrape metrics from.
|
||||
[ port: <int> | default = 80 ]
|
||||
|
||||
# TLS configuration to connect to the PuppetDB.
|
||||
tls_config:
|
||||
[ <tls_config> ]
|
||||
|
||||
# basic_auth, authorization, and oauth2, are mutually exclusive.
|
||||
|
||||
# Optional HTTP basic authentication information.
|
||||
basic_auth:
|
||||
[ username: <string> ]
|
||||
[ password: <secret> ]
|
||||
[ password_file: <string> ]
|
||||
|
||||
# `Authorization` HTTP header configuration.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
# Sets the credentials with the credentials read from the configured file.
|
||||
# It is mutually exclusive with `credentials`.
|
||||
[ credentials_file: <filename> ]
|
||||
|
||||
# Optional OAuth 2.0 configuration.
|
||||
# Cannot be used at the same time as basic_auth or authorization.
|
||||
oauth2:
|
||||
[ <oauth2> ]
|
||||
|
||||
# Optional proxy URL.
|
||||
[ proxy_url: <string> ]
|
||||
|
||||
# Configure whether HTTP requests follow HTTP 3xx redirects.
|
||||
[ follow_redirects: <bool> | default = true ]
|
||||
```
|
||||
|
||||
See [this example Prometheus configuration file](/documentation/examples/prometheus-puppetdb.yml)
|
||||
for a detailed example of configuring Prometheus with PuppetDB.
|
||||
|
||||
|
||||
### `<file_sd_config>`
|
||||
|
||||
File-based service discovery provides a more generic way to configure static targets
|
||||
|
@ -2387,6 +2479,10 @@ nerve_sd_configs:
|
|||
openstack_sd_configs:
|
||||
[ - <openstack_sd_config> ... ]
|
||||
|
||||
# List of PuppetDB service discovery configurations.
|
||||
puppetdb_sd_configs:
|
||||
[ - <puppetdb_sd_config> ... ]
|
||||
|
||||
# List of Scaleway service discovery configurations.
|
||||
scaleway_sd_configs:
|
||||
[ - <scaleway_sd_config> ... ]
|
||||
|
|
|
@ -78,6 +78,10 @@ name: <string>
|
|||
# How often rules in the group are evaluated.
|
||||
[ interval: <duration> | default = global.evaluation_interval ]
|
||||
|
||||
# Limit the number of alerts and series individual rules can produce.
|
||||
# 0 is no limit.
|
||||
[ limit: <int> | default = 0 ]
|
||||
|
||||
rules:
|
||||
[ - <rule> ... ]
|
||||
```
|
||||
|
|
|
@ -25,7 +25,7 @@ Here is a table comparing our two generic Service Discovery implementations.
|
|||
|
||||
## Requirements of HTTP SD endpoints
|
||||
|
||||
If you implement an HTTP SD endpoint, here is a few requirements you should be
|
||||
If you implement an HTTP SD endpoint, here are a few requirements you should be
|
||||
aware of.
|
||||
|
||||
The response is consumed as is, unmodified. On each refresh interval (default: 1
|
||||
|
@ -47,7 +47,7 @@ for incremental updates. A Prometheus instance does not send its hostname and it
|
|||
is not possible for a SD endpoint to know if the SD requests is the first one
|
||||
after a restart or not.
|
||||
|
||||
The URL to the HTTP SD is not considered secret. The authentication, and any API
|
||||
The URL to the HTTP SD is not considered secret. The authentication and any API
|
||||
keys should be passed with the appropriate authentication mechanisms. Prometheus
|
||||
supports TLS authentication, basic authentication, OAuth2, and authorization
|
||||
headers.
|
||||
|
|
|
@ -434,3 +434,26 @@ over time and return an instant vector with per-series aggregation results:
|
|||
|
||||
Note that all values in the specified interval have the same weight in the
|
||||
aggregation even if the values are not equally spaced throughout the interval.
|
||||
|
||||
## Trigonometric Functions
|
||||
|
||||
The trigonometric functions work in radians:
|
||||
|
||||
- `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)).
|
||||
- `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)).
|
||||
- `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)).
|
||||
- `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)).
|
||||
- `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)).
|
||||
- `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)).
|
||||
- `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)).
|
||||
- `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)).
|
||||
- `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)).
|
||||
- `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)).
|
||||
- `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)).
|
||||
- `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)).
|
||||
|
||||
The following are useful for converting between degrees and radians:
|
||||
|
||||
- `deg(v instant-vector)`: converts radians to degrees for all elements in `v`.
|
||||
- `pi()`: returns pi.
|
||||
- `rad(v instant-vector)`: converts degrees to radians for all elements in `v`.
|
|
@ -40,6 +40,16 @@ grouping labels becoming the output label set. The metric name is dropped. Entri
|
|||
for which no matching entry in the right-hand vector can be found are not part of
|
||||
the result.
|
||||
|
||||
### Trigonometric binary operators
|
||||
|
||||
The following trigonometric binary operators, which work in radians, exist in Prometheus:
|
||||
|
||||
* `atan2` (based on https://pkg.go.dev/math#Atan2)
|
||||
|
||||
Trigonometric operators allow trigonometric functions to be executed on two vectors using
|
||||
vector matching, which isn't available with normal functions. They act in the same manner
|
||||
as arithmetic operators.
|
||||
|
||||
### Comparison binary operators
|
||||
|
||||
The following binary comparison operators exist in Prometheus:
|
||||
|
@ -264,7 +274,7 @@ The following list shows the precedence of binary operators in Prometheus, from
|
|||
highest to lowest.
|
||||
|
||||
1. `^`
|
||||
2. `*`, `/`, `%`
|
||||
2. `*`, `/`, `%`, `atan2`
|
||||
3. `+`, `-`
|
||||
4. `==`, `!=`, `<=`, `<`, `>=`, `>`
|
||||
5. `and`, `unless`
|
||||
|
|
|
@ -27,7 +27,7 @@ replayed when the Prometheus server restarts. Write-ahead log files are stored
|
|||
in the `wal` directory in 128MB segments. These files contain raw data that
|
||||
has not yet been compacted; thus they are significantly larger than regular block
|
||||
files. Prometheus will retain a minimum of three write-ahead log files.
|
||||
High-traffic servers may retain more than three WAL files in order to to keep at
|
||||
High-traffic servers may retain more than three WAL files in order to keep at
|
||||
least two hours of raw data.
|
||||
|
||||
A Prometheus server's data directory looks something like this:
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -217,14 +216,7 @@ func TestGenerateTargetGroups(t *testing.T) {
|
|||
|
||||
for _, testCase := range testCases {
|
||||
result := generateTargetGroups(testCase.targetGroup)
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedCustomSD) {
|
||||
t.Errorf("%q failed\ngot: %#v\nexpected: %v",
|
||||
testCase.title,
|
||||
result,
|
||||
testCase.expectedCustomSD)
|
||||
}
|
||||
|
||||
require.Equal(t, testCase.expectedCustomSD, result)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
# Prometheus example configuration to be used with PuppetDB.
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'puppetdb-node-exporter'
|
||||
puppetdb_sd_configs:
|
||||
# This example discovers the nodes which have the class Prometheus::Node_exporter.
|
||||
- url: https://puppetdb.example.com
|
||||
query: 'resources { type = "Class" and title = "Prometheus::Node_exporter" }'
|
||||
port: 9100
|
||||
tls_config:
|
||||
cert_file: prometheus-public.pem
|
||||
key_file: prometheus-private.pem
|
||||
ca_file: ca.pem
|
||||
|
||||
- job_name: 'puppetdb-scrape-jobs'
|
||||
puppetdb_sd_configs:
|
||||
# This example uses the Prometheus::Scrape_job
|
||||
# exported resources.
|
||||
# https://github.com/camptocamp/prometheus-puppetdb-sd
|
||||
# This examples is compatible with Prometheus-puppetdb-sd,
|
||||
# if the exported Prometheus::Scrape_job only have at most one target.
|
||||
- url: https://puppetdb.example.com
|
||||
query: 'resources { type = "Prometheus::Scrape_job" and exported = true }'
|
||||
include_parameters: true
|
||||
tls_config:
|
||||
cert_file: prometheus-public.pem
|
||||
key_file: prometheus-private.pem
|
||||
ca_file: ca.pem
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_puppetdb_certname]
|
||||
target_label: certname
|
||||
- source_labels: [__meta_puppetdb_parameter_targets]
|
||||
regex: '(.+),?.*'
|
||||
replacement: $1
|
||||
target_label: __address__
|
||||
- source_labels: [__meta_puppetdb_parameter_job_name]
|
||||
target_label: job
|
||||
- regex: '__meta_puppetdb_parameter_labels_(.+)'
|
||||
replacement: '$1'
|
||||
action: labelmap
|
|
@ -17,6 +17,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -32,17 +33,13 @@ func TestEscape(t *testing.T) {
|
|||
value := "abzABZ019(){},'\"\\"
|
||||
expected := "abzABZ019\\(\\)\\{\\}\\,\\'\\\"\\\\"
|
||||
actual := escape(model.LabelValue(value))
|
||||
if expected != actual {
|
||||
t.Errorf("Expected %s, got %s", expected, actual)
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
|
||||
// Test percent-encoding.
|
||||
value = "é/|_;:%."
|
||||
expected = "%C3%A9%2F|_;:%25%2E"
|
||||
actual = escape(model.LabelValue(value))
|
||||
if expected != actual {
|
||||
t.Errorf("Expected %s, got %s", expected, actual)
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestPathFromMetric(t *testing.T) {
|
||||
|
@ -51,7 +48,5 @@ func TestPathFromMetric(t *testing.T) {
|
|||
".many_chars.abc!ABC:012-3!45%C3%B667~89%2E%2F\\(\\)\\{\\}\\,%3D%2E\\\"\\\\" +
|
||||
".testlabel.test:value")
|
||||
actual := pathFromMetric(metric, "prefix.")
|
||||
if expected != actual {
|
||||
t.Errorf("Expected %s, got %s", expected, actual)
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
influx "github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
|
@ -73,28 +74,17 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123
|
|||
|
||||
server := httptest.NewServer(http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "POST" {
|
||||
t.Fatalf("Unexpected method; expected POST, got %s", r.Method)
|
||||
}
|
||||
if r.URL.Path != "/write" {
|
||||
t.Fatalf("Unexpected path; expected %s, got %s", "/write", r.URL.Path)
|
||||
}
|
||||
require.Equal(t, "POST", r.Method, "Unexpected method.")
|
||||
require.Equal(t, "/write", r.URL.Path, "Unexpected path.")
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading body: %s", err)
|
||||
}
|
||||
|
||||
if string(b) != expectedBody {
|
||||
t.Fatalf("Unexpected request body; expected:\n\n%s\n\ngot:\n\n%s", expectedBody, string(b))
|
||||
}
|
||||
require.NoError(t, err, "Error reading body.")
|
||||
require.Equal(t, expectedBody, string(b), "Unexpected request body.")
|
||||
},
|
||||
))
|
||||
defer server.Close()
|
||||
|
||||
serverURL, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to parse server URL %s: %s", server.URL, err)
|
||||
}
|
||||
require.NoError(t, err, "Unable to parse server URL.")
|
||||
|
||||
conf := influx.HTTPConfig{
|
||||
Addr: serverURL.String(),
|
||||
|
@ -103,8 +93,6 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123
|
|||
Timeout: time.Minute,
|
||||
}
|
||||
c := NewClient(nil, conf, "test_db", "default")
|
||||
|
||||
if err := c.Write(samples); err != nil {
|
||||
t.Fatalf("Error sending samples: %s", err)
|
||||
}
|
||||
err = c.Write(samples)
|
||||
require.NoError(t, err, "Error sending samples.")
|
||||
}
|
||||
|
|
|
@ -14,12 +14,11 @@
|
|||
package opentsdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -36,9 +35,7 @@ func TestTagsFromMetric(t *testing.T) {
|
|||
"many_chars": TagValue("abc!ABC:012-3!45ö67~89./"),
|
||||
}
|
||||
actual := tagsFromMetric(metric)
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Errorf("Expected %#v, got %#v", expected, actual)
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestMarshalStoreSamplesRequest(t *testing.T) {
|
||||
|
@ -51,25 +48,11 @@ func TestMarshalStoreSamplesRequest(t *testing.T) {
|
|||
expectedJSON := []byte(`{"metric":"test_.metric","timestamp":4711,"value":3.1415,"tags":{"many_chars":"abc_21ABC_.012-3_2145_C3_B667_7E89./","testlabel":"test_.value"}}`)
|
||||
|
||||
resultingJSON, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
t.Fatalf("Marshal(request) resulted in err: %s", err)
|
||||
}
|
||||
if !bytes.Equal(resultingJSON, expectedJSON) {
|
||||
t.Errorf(
|
||||
"Marshal(request) => %q, want %q",
|
||||
resultingJSON, expectedJSON,
|
||||
)
|
||||
}
|
||||
require.NoError(t, err, "Marshal(request) resulted in err.")
|
||||
require.Equal(t, expectedJSON, resultingJSON)
|
||||
|
||||
var unmarshaledRequest StoreSamplesRequest
|
||||
err = json.Unmarshal(expectedJSON, &unmarshaledRequest)
|
||||
if err != nil {
|
||||
t.Fatalf("Unmarshal(expectedJSON, &unmarshaledRequest) resulted in err: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(unmarshaledRequest, request) {
|
||||
t.Errorf(
|
||||
"Unmarshal(expectedJSON, &unmarshaledRequest) => %#v, want %#v",
|
||||
unmarshaledRequest, request,
|
||||
)
|
||||
}
|
||||
require.NoError(t, err, "Unmarshal(expectedJSON, &unmarshaledRequest) resulted in err.")
|
||||
require.Equal(t, request, unmarshaledRequest)
|
||||
}
|
||||
|
|
|
@ -14,9 +14,10 @@
|
|||
package opentsdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var stringtests = []struct {
|
||||
|
@ -32,17 +33,9 @@ var stringtests = []struct {
|
|||
|
||||
func TestTagValueMarshaling(t *testing.T) {
|
||||
for i, tt := range stringtests {
|
||||
json, err := json.Marshal(tt.tv)
|
||||
if err != nil {
|
||||
t.Errorf("%d. Marshal(%q) returned err: %s", i, tt.tv, err)
|
||||
} else {
|
||||
if !bytes.Equal(json, tt.json) {
|
||||
t.Errorf(
|
||||
"%d. Marshal(%q) => %q, want %q",
|
||||
i, tt.tv, json, tt.json,
|
||||
)
|
||||
}
|
||||
}
|
||||
got, err := json.Marshal(tt.tv)
|
||||
require.NoError(t, err, "%d. Marshal(%q) returned error.", i, tt.tv)
|
||||
require.Equal(t, tt.json, got, "%d. Marshal(%q) not equal.", i, tt.tv)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -50,15 +43,7 @@ func TestTagValueUnMarshaling(t *testing.T) {
|
|||
for i, tt := range stringtests {
|
||||
var tv TagValue
|
||||
err := json.Unmarshal(tt.json, &tv)
|
||||
if err != nil {
|
||||
t.Errorf("%d. Unmarshal(%q, &str) returned err: %s", i, tt.json, err)
|
||||
} else {
|
||||
if tv != tt.tv {
|
||||
t.Errorf(
|
||||
"%d. Unmarshal(%q, &str) => str==%q, want %q",
|
||||
i, tt.json, tv, tt.tv,
|
||||
)
|
||||
}
|
||||
}
|
||||
require.NoError(t, err, "%d. Unmarshal(%q, &str) returned error.", i, tt.json)
|
||||
require.Equal(t, tt.tv, tv, "%d. Unmarshal(%q, &str) not equal.", i, tt.json)
|
||||
}
|
||||
}
|
||||
|
|
4
go.mod
4
go.mod
|
@ -8,7 +8,7 @@ require (
|
|||
github.com/Azure/go-autorest/autorest/adal v0.9.15
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15
|
||||
github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922
|
||||
github.com/aws/aws-sdk-go v1.40.37
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/containerd/containerd v1.5.4 // indirect
|
||||
|
@ -46,7 +46,7 @@ require (
|
|||
github.com/prometheus/alertmanager v0.23.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.30.1
|
||||
github.com/prometheus/common v0.31.1
|
||||
github.com/prometheus/common/sigv4 v0.1.0
|
||||
github.com/prometheus/exporter-toolkit v0.6.1
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44
|
||||
|
|
7
go.sum
7
go.sum
|
@ -151,8 +151,9 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
|||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4=
|
||||
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922 h1:8ypNbf5sd3Sm3cKJ9waOGoQv6dKAFiFty9L6NP1AqJ4=
|
||||
github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
|
||||
|
@ -1140,8 +1141,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8
|
|||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.30.1 h1:MKLrb1ClCc+Yjs6xdw/FW0AMRLNiNgi2ByUZxgeG/wo=
|
||||
github.com/prometheus/common v0.30.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.31.1 h1:d18hG4PkHnNAKNMOmFuXFaiY8Us0nird/2m60uS1AMs=
|
||||
github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/exporter-toolkit v0.6.1 h1:Aqk75wQD92N9CqmTlZwjKwq6272nOGrWIbc8Z7+xQO0=
|
||||
|
|
|
@ -634,7 +634,7 @@ type alertmanagerSet struct {
|
|||
}
|
||||
|
||||
func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metrics *alertMetrics) (*alertmanagerSet, error) {
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager", config_util.WithHTTP2Disabled())
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ func TestHandlerSendAll(t *testing.T) {
|
|||
Username: "prometheus",
|
||||
Password: "testing_password",
|
||||
},
|
||||
}, "auth_alertmanager", config_util.WithHTTP2Disabled())
|
||||
}, "auth_alertmanager")
|
||||
|
||||
h.alertmanagers = make(map[string]*alertmanagerSet)
|
||||
|
||||
|
@ -392,7 +392,7 @@ func TestHandlerQueuing(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
return
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("Alerts were not pushed")
|
||||
require.FailNow(t, "Alerts were not pushed.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ func TestHandlerQueuing(t *testing.T) {
|
|||
case err := <-errc:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("Alerts were not pushed")
|
||||
require.FailNow(t, "Alerts were not pushed.")
|
||||
}
|
||||
|
||||
// Verify that we receive the last 3 batches.
|
||||
|
@ -480,14 +480,12 @@ alerting:
|
|||
alertmanagers:
|
||||
- static_configs:
|
||||
`
|
||||
if err := yaml.UnmarshalStrict([]byte(s), cfg); err != nil {
|
||||
t.Fatalf("Unable to load YAML config: %s", err)
|
||||
}
|
||||
err := yaml.UnmarshalStrict([]byte(s), cfg)
|
||||
require.NoError(t, err, "Unable to load YAML config.")
|
||||
require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs))
|
||||
|
||||
if err := n.ApplyConfig(cfg); err != nil {
|
||||
t.Fatalf("Error Applying the config:%v", err)
|
||||
}
|
||||
err = n.ApplyConfig(cfg)
|
||||
require.NoError(t, err, "Error applying the config.")
|
||||
|
||||
tgs := make(map[string][]*targetgroup.Group)
|
||||
for _, tt := range tests {
|
||||
|
@ -534,14 +532,12 @@ alerting:
|
|||
regex: 'alertmanager:9093'
|
||||
action: drop
|
||||
`
|
||||
if err := yaml.UnmarshalStrict([]byte(s), cfg); err != nil {
|
||||
t.Fatalf("Unable to load YAML config: %s", err)
|
||||
}
|
||||
err := yaml.UnmarshalStrict([]byte(s), cfg)
|
||||
require.NoError(t, err, "Unable to load YAML config.")
|
||||
require.Equal(t, 1, len(cfg.AlertingConfig.AlertmanagerConfigs))
|
||||
|
||||
if err := n.ApplyConfig(cfg); err != nil {
|
||||
t.Fatalf("Error Applying the config:%v", err)
|
||||
}
|
||||
err = n.ApplyConfig(cfg)
|
||||
require.NoError(t, err, "Error applying the config.")
|
||||
|
||||
tgs := make(map[string][]*targetgroup.Group)
|
||||
for _, tt := range tests {
|
||||
|
|
|
@ -107,6 +107,7 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
|
|||
type RuleGroup struct {
|
||||
Name string `yaml:"name"`
|
||||
Interval model.Duration `yaml:"interval,omitempty"`
|
||||
Limit int `yaml:"limit,omitempty"`
|
||||
Rules []RuleNode `yaml:"rules"`
|
||||
}
|
||||
|
||||
|
@ -239,6 +240,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) {
|
|||
model.Time(timestamp.FromTime(time.Now())),
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
return tmpl.ParseTest()
|
||||
}
|
||||
|
|
|
@ -15,19 +15,14 @@ package rulefmt
|
|||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseFileSuccess(t *testing.T) {
|
||||
if _, errs := ParseFile("testdata/test.yaml"); len(errs) > 0 {
|
||||
t.Errorf("unexpected errors parsing file")
|
||||
for _, err := range errs {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
_, errs := ParseFile("testdata/test.yaml")
|
||||
require.Empty(t, errs, "unexpected errors parsing file")
|
||||
}
|
||||
|
||||
func TestParseFileFailure(t *testing.T) {
|
||||
|
@ -79,15 +74,9 @@ func TestParseFileFailure(t *testing.T) {
|
|||
|
||||
for _, c := range table {
|
||||
_, errs := ParseFile(filepath.Join("testdata", c.filename))
|
||||
if errs == nil {
|
||||
t.Errorf("Expected error parsing %s but got none", c.filename)
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(errs[0].Error(), c.errMsg) {
|
||||
t.Errorf("Expected error for %s to contain %q but got: %s", c.filename, c.errMsg, errs)
|
||||
}
|
||||
require.NotNil(t, errs, "Expected error parsing %s but got none", c.filename)
|
||||
require.Error(t, errs[0], c.errMsg, "Expected error for %s.", c.filename)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestTemplateParsing(t *testing.T) {
|
||||
|
|
|
@ -71,7 +71,7 @@ func BenchmarkRangeQuery(b *testing.B) {
|
|||
a := storage.Appender(context.Background())
|
||||
ts := int64(s * 10000) // 10s interval.
|
||||
for i, metric := range metrics {
|
||||
ref, _ := a.Append(refs[i], metric, ts, float64(s))
|
||||
ref, _ := a.Append(refs[i], metric, ts, float64(s)+float64(i)/float64(len(metrics)))
|
||||
refs[i] = ref
|
||||
}
|
||||
if err := a.Commit(); err != nil {
|
||||
|
@ -130,6 +130,9 @@ func BenchmarkRangeQuery(b *testing.B) {
|
|||
{
|
||||
expr: "a_X unless b_X{l=~'.*[0-4]$'}",
|
||||
},
|
||||
{
|
||||
expr: "a_X and b_X{l='notfound'}",
|
||||
},
|
||||
// Simple functions.
|
||||
{
|
||||
expr: "abs(a_X)",
|
||||
|
@ -159,6 +162,9 @@ func BenchmarkRangeQuery(b *testing.B) {
|
|||
{
|
||||
expr: "count_values('value', h_X)",
|
||||
},
|
||||
{
|
||||
expr: "topk(1, a_X)",
|
||||
},
|
||||
// Combinations.
|
||||
{
|
||||
expr: "rate(a_X[1m]) + rate(b_X[1m])",
|
||||
|
@ -172,6 +178,10 @@ func BenchmarkRangeQuery(b *testing.B) {
|
|||
{
|
||||
expr: "histogram_quantile(0.9, rate(h_X[5m]))",
|
||||
},
|
||||
// Many-to-one join.
|
||||
{
|
||||
expr: "a_X + on(l) group_right a_one",
|
||||
},
|
||||
}
|
||||
|
||||
// X in an expr will be replaced by different metric sizes.
|
||||
|
|
146
promql/engine.go
146
promql/engine.go
|
@ -913,6 +913,8 @@ func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws storage.Warnings
|
|||
type EvalSeriesHelper struct {
|
||||
// The grouping key used by aggregation.
|
||||
groupingKey uint64
|
||||
// Used to map left-hand to right-hand in binary operations.
|
||||
signature string
|
||||
}
|
||||
|
||||
// EvalNodeHelper stores extra information and caches for evaluating a single node across steps.
|
||||
|
@ -925,8 +927,6 @@ type EvalNodeHelper struct {
|
|||
// Caches.
|
||||
// DropMetricName and label_*.
|
||||
Dmn map[uint64]labels.Labels
|
||||
// signatureFunc.
|
||||
sigf map[string]string
|
||||
// funcHistogramQuantile.
|
||||
signatureToMetricWithBuckets map[string]*metricWithBuckets
|
||||
// label_replace.
|
||||
|
@ -957,23 +957,6 @@ func (enh *EvalNodeHelper) DropMetricName(l labels.Labels) labels.Labels {
|
|||
return ret
|
||||
}
|
||||
|
||||
func (enh *EvalNodeHelper) signatureFunc(on bool, names ...string) func(labels.Labels) string {
|
||||
if enh.sigf == nil {
|
||||
enh.sigf = make(map[string]string, len(enh.Out))
|
||||
}
|
||||
f := signatureFunc(on, enh.lblBuf, names...)
|
||||
return func(l labels.Labels) string {
|
||||
enh.lblBuf = l.Bytes(enh.lblBuf)
|
||||
ret, ok := enh.sigf[string(enh.lblBuf)]
|
||||
if ok {
|
||||
return ret
|
||||
}
|
||||
ret = f(l)
|
||||
enh.sigf[string(enh.lblBuf)] = ret
|
||||
return ret
|
||||
}
|
||||
}
|
||||
|
||||
// rangeEval evaluates the given expressions, and then for each step calls
|
||||
// the given funcCall with the values computed for each expression at that
|
||||
// step. The return value is the combination into time series of all the
|
||||
|
@ -1432,22 +1415,28 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) {
|
|||
return append(enh.Out, Sample{Point: Point{V: val}}), nil
|
||||
}, e.LHS, e.RHS)
|
||||
case lt == parser.ValueTypeVector && rt == parser.ValueTypeVector:
|
||||
// Function to compute the join signature for each series.
|
||||
buf := make([]byte, 0, 1024)
|
||||
sigf := signatureFunc(e.VectorMatching.On, buf, e.VectorMatching.MatchingLabels...)
|
||||
initSignatures := func(series labels.Labels, h *EvalSeriesHelper) {
|
||||
h.signature = sigf(series)
|
||||
}
|
||||
switch e.Op {
|
||||
case parser.LAND:
|
||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
|
||||
return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
|
||||
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
|
||||
return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
|
||||
}, e.LHS, e.RHS)
|
||||
case parser.LOR:
|
||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
|
||||
return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
|
||||
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
|
||||
return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
|
||||
}, e.LHS, e.RHS)
|
||||
case parser.LUNLESS:
|
||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
|
||||
return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh), nil
|
||||
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
|
||||
return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil
|
||||
}, e.LHS, e.RHS)
|
||||
default:
|
||||
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
|
||||
return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, enh), nil
|
||||
return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
|
||||
return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh), nil
|
||||
}, e.LHS, e.RHS)
|
||||
}
|
||||
|
||||
|
@ -1774,62 +1763,72 @@ func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, m
|
|||
return out
|
||||
}
|
||||
|
||||
func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, enh *EvalNodeHelper) Vector {
|
||||
func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
|
||||
if matching.Card != parser.CardManyToMany {
|
||||
panic("set operations must only use many-to-many matching")
|
||||
}
|
||||
sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...)
|
||||
if len(lhs) == 0 || len(rhs) == 0 {
|
||||
return nil // Short-circuit: AND with nothing is nothing.
|
||||
}
|
||||
|
||||
// The set of signatures for the right-hand side Vector.
|
||||
rightSigs := map[string]struct{}{}
|
||||
// Add all rhs samples to a map so we can easily find matches later.
|
||||
for _, rs := range rhs {
|
||||
rightSigs[sigf(rs.Metric)] = struct{}{}
|
||||
for _, sh := range rhsh {
|
||||
rightSigs[sh.signature] = struct{}{}
|
||||
}
|
||||
|
||||
for _, ls := range lhs {
|
||||
for i, ls := range lhs {
|
||||
// If there's a matching entry in the right-hand side Vector, add the sample.
|
||||
if _, ok := rightSigs[sigf(ls.Metric)]; ok {
|
||||
if _, ok := rightSigs[lhsh[i].signature]; ok {
|
||||
enh.Out = append(enh.Out, ls)
|
||||
}
|
||||
}
|
||||
return enh.Out
|
||||
}
|
||||
|
||||
func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, enh *EvalNodeHelper) Vector {
|
||||
func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
|
||||
if matching.Card != parser.CardManyToMany {
|
||||
panic("set operations must only use many-to-many matching")
|
||||
}
|
||||
sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...)
|
||||
if len(lhs) == 0 { // Short-circuit.
|
||||
return rhs
|
||||
} else if len(rhs) == 0 {
|
||||
return lhs
|
||||
}
|
||||
|
||||
leftSigs := map[string]struct{}{}
|
||||
// Add everything from the left-hand-side Vector.
|
||||
for _, ls := range lhs {
|
||||
leftSigs[sigf(ls.Metric)] = struct{}{}
|
||||
for i, ls := range lhs {
|
||||
leftSigs[lhsh[i].signature] = struct{}{}
|
||||
enh.Out = append(enh.Out, ls)
|
||||
}
|
||||
// Add all right-hand side elements which have not been added from the left-hand side.
|
||||
for _, rs := range rhs {
|
||||
if _, ok := leftSigs[sigf(rs.Metric)]; !ok {
|
||||
for j, rs := range rhs {
|
||||
if _, ok := leftSigs[rhsh[j].signature]; !ok {
|
||||
enh.Out = append(enh.Out, rs)
|
||||
}
|
||||
}
|
||||
return enh.Out
|
||||
}
|
||||
|
||||
func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatching, enh *EvalNodeHelper) Vector {
|
||||
func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatching, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
|
||||
if matching.Card != parser.CardManyToMany {
|
||||
panic("set operations must only use many-to-many matching")
|
||||
}
|
||||
sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...)
|
||||
|
||||
rightSigs := map[string]struct{}{}
|
||||
for _, rs := range rhs {
|
||||
rightSigs[sigf(rs.Metric)] = struct{}{}
|
||||
// Short-circuit: empty rhs means we will return everything in lhs;
|
||||
// empty lhs means we will return empty - don't need to build a map.
|
||||
if len(lhs) == 0 || len(rhs) == 0 {
|
||||
return lhs
|
||||
}
|
||||
|
||||
for _, ls := range lhs {
|
||||
if _, ok := rightSigs[sigf(ls.Metric)]; !ok {
|
||||
rightSigs := map[string]struct{}{}
|
||||
for _, sh := range rhsh {
|
||||
rightSigs[sh.signature] = struct{}{}
|
||||
}
|
||||
|
||||
for i, ls := range lhs {
|
||||
if _, ok := rightSigs[lhsh[i].signature]; !ok {
|
||||
enh.Out = append(enh.Out, ls)
|
||||
}
|
||||
}
|
||||
|
@ -1837,17 +1836,20 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi
|
|||
}
|
||||
|
||||
// VectorBinop evaluates a binary operation between two Vectors, excluding set operators.
|
||||
func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, enh *EvalNodeHelper) Vector {
|
||||
func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
|
||||
if matching.Card == parser.CardManyToMany {
|
||||
panic("many-to-many only allowed for set operators")
|
||||
}
|
||||
sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...)
|
||||
if len(lhs) == 0 || len(rhs) == 0 {
|
||||
return nil // Short-circuit: nothing is going to match.
|
||||
}
|
||||
|
||||
// The control flow below handles one-to-one or many-to-one matching.
|
||||
// For one-to-many, swap sidedness and account for the swap when calculating
|
||||
// values.
|
||||
if matching.Card == parser.CardOneToMany {
|
||||
lhs, rhs = rhs, lhs
|
||||
lhsh, rhsh = rhsh, lhsh
|
||||
}
|
||||
|
||||
// All samples from the rhs hashed by the matching label/values.
|
||||
|
@ -1861,8 +1863,8 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
|||
rightSigs := enh.rightSigs
|
||||
|
||||
// Add all rhs samples to a map so we can easily find matches later.
|
||||
for _, rs := range rhs {
|
||||
sig := sigf(rs.Metric)
|
||||
for i, rs := range rhs {
|
||||
sig := rhsh[i].signature
|
||||
// The rhs is guaranteed to be the 'one' side. Having multiple samples
|
||||
// with the same signature means that the matching is many-to-many.
|
||||
if duplSample, found := rightSigs[sig]; found {
|
||||
|
@ -1892,8 +1894,8 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *
|
|||
|
||||
// For all lhs samples find a respective rhs sample and perform
|
||||
// the binary operation.
|
||||
for _, ls := range lhs {
|
||||
sig := sigf(ls.Metric)
|
||||
for i, ls := range lhs {
|
||||
sig := lhsh[i].signature
|
||||
|
||||
rs, found := rightSigs[sig] // Look for a match in the rhs Vector.
|
||||
if !found {
|
||||
|
@ -2114,6 +2116,8 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64) (float64, bool) {
|
|||
return lhs, lhs >= rhs
|
||||
case parser.LTE:
|
||||
return lhs, lhs <= rhs
|
||||
case parser.ATAN2:
|
||||
return math.Atan2(lhs, rhs), true
|
||||
}
|
||||
panic(errors.Errorf("operator %q not allowed for operations between Vectors", op))
|
||||
}
|
||||
|
@ -2210,22 +2214,24 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
resultSize := k
|
||||
if k > inputVecLen {
|
||||
resultSize = inputVecLen
|
||||
} else if k == 0 {
|
||||
resultSize = 1
|
||||
}
|
||||
switch op {
|
||||
case parser.STDVAR, parser.STDDEV:
|
||||
result[groupingKey].value = 0
|
||||
case parser.TOPK, parser.QUANTILE:
|
||||
result[groupingKey].heap = make(vectorByValueHeap, 0, resultSize)
|
||||
heap.Push(&result[groupingKey].heap, &Sample{
|
||||
result[groupingKey].heap = make(vectorByValueHeap, 1, resultSize)
|
||||
result[groupingKey].heap[0] = Sample{
|
||||
Point: Point{V: s.V},
|
||||
Metric: s.Metric,
|
||||
})
|
||||
}
|
||||
case parser.BOTTOMK:
|
||||
result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 0, resultSize)
|
||||
heap.Push(&result[groupingKey].reverseHeap, &Sample{
|
||||
result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 1, resultSize)
|
||||
result[groupingKey].reverseHeap[0] = Sample{
|
||||
Point: Point{V: s.V},
|
||||
Metric: s.Metric,
|
||||
})
|
||||
}
|
||||
case parser.GROUP:
|
||||
result[groupingKey].value = 1
|
||||
}
|
||||
|
@ -2283,6 +2289,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
case parser.TOPK:
|
||||
if int64(len(group.heap)) < k || group.heap[0].V < s.V || math.IsNaN(group.heap[0].V) {
|
||||
if int64(len(group.heap)) == k {
|
||||
if k == 1 { // For k==1 we can replace in-situ.
|
||||
group.heap[0] = Sample{
|
||||
Point: Point{V: s.V},
|
||||
Metric: s.Metric,
|
||||
}
|
||||
break
|
||||
}
|
||||
heap.Pop(&group.heap)
|
||||
}
|
||||
heap.Push(&group.heap, &Sample{
|
||||
|
@ -2294,6 +2307,13 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
case parser.BOTTOMK:
|
||||
if int64(len(group.reverseHeap)) < k || group.reverseHeap[0].V > s.V || math.IsNaN(group.reverseHeap[0].V) {
|
||||
if int64(len(group.reverseHeap)) == k {
|
||||
if k == 1 { // For k==1 we can replace in-situ.
|
||||
group.reverseHeap[0] = Sample{
|
||||
Point: Point{V: s.V},
|
||||
Metric: s.Metric,
|
||||
}
|
||||
break
|
||||
}
|
||||
heap.Pop(&group.reverseHeap)
|
||||
}
|
||||
heap.Push(&group.reverseHeap, &Sample{
|
||||
|
@ -2327,7 +2347,9 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
|
||||
case parser.TOPK:
|
||||
// The heap keeps the lowest value on top, so reverse it.
|
||||
sort.Sort(sort.Reverse(aggr.heap))
|
||||
if len(aggr.heap) > 1 {
|
||||
sort.Sort(sort.Reverse(aggr.heap))
|
||||
}
|
||||
for _, v := range aggr.heap {
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: v.Metric,
|
||||
|
@ -2338,7 +2360,9 @@ func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without
|
|||
|
||||
case parser.BOTTOMK:
|
||||
// The heap keeps the highest value on top, so reverse it.
|
||||
sort.Sort(sort.Reverse(aggr.reverseHeap))
|
||||
if len(aggr.reverseHeap) > 1 {
|
||||
sort.Sort(sort.Reverse(aggr.reverseHeap))
|
||||
}
|
||||
for _, v := range aggr.reverseHeap {
|
||||
enh.Out = append(enh.Out, Sample{
|
||||
Metric: v.Metric,
|
||||
|
|
|
@ -72,7 +72,7 @@ func TestQueryConcurrency(t *testing.T) {
|
|||
case <-processing:
|
||||
// Expected.
|
||||
case <-time.After(20 * time.Millisecond):
|
||||
t.Fatalf("Query within concurrency threshold not being executed")
|
||||
require.Fail(t, "Query within concurrency threshold not being executed")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ func TestQueryConcurrency(t *testing.T) {
|
|||
|
||||
select {
|
||||
case <-processing:
|
||||
t.Fatalf("Query above concurrency threshold being executed")
|
||||
require.Fail(t, "Query above concurrency threshold being executed")
|
||||
case <-time.After(20 * time.Millisecond):
|
||||
// Expected.
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ func TestQueryConcurrency(t *testing.T) {
|
|||
case <-processing:
|
||||
// Expected.
|
||||
case <-time.After(20 * time.Millisecond):
|
||||
t.Fatalf("Query within concurrency threshold not being executed")
|
||||
require.Fail(t, "Query within concurrency threshold not being executed")
|
||||
}
|
||||
|
||||
// Terminate remaining queries.
|
||||
|
@ -604,7 +604,7 @@ func TestEngineShutdown(t *testing.T) {
|
|||
require.Equal(t, errQueryCanceled, res.Err)
|
||||
|
||||
query2 := engine.newTestQuery(func(context.Context) error {
|
||||
t.Fatalf("reached query execution unexpectedly")
|
||||
require.FailNow(t, "reached query execution unexpectedly")
|
||||
return nil
|
||||
})
|
||||
|
||||
|
@ -1121,9 +1121,7 @@ func TestRecoverEvaluatorRuntime(t *testing.T) {
|
|||
//nolint:govet
|
||||
a[123] = 1
|
||||
|
||||
if err.Error() != "unexpected error" {
|
||||
t.Fatalf("wrong error message: %q, expected %q", err, "unexpected error")
|
||||
}
|
||||
require.EqualError(t, err, "unexpected error")
|
||||
}
|
||||
|
||||
func TestRecoverEvaluatorError(t *testing.T) {
|
||||
|
@ -1133,9 +1131,7 @@ func TestRecoverEvaluatorError(t *testing.T) {
|
|||
e := errors.New("custom error")
|
||||
|
||||
defer func() {
|
||||
if err.Error() != e.Error() {
|
||||
t.Fatalf("wrong error message: %q, expected %q", err, e)
|
||||
}
|
||||
require.EqualError(t, err, e.Error())
|
||||
}()
|
||||
defer ev.recover(nil, &err)
|
||||
|
||||
|
@ -1154,12 +1150,8 @@ func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) {
|
|||
}
|
||||
|
||||
defer func() {
|
||||
if err.Error() != e.Error() {
|
||||
t.Fatalf("wrong error message: %q, expected %q", err, e)
|
||||
}
|
||||
if len(ws) != len(warnings) && ws[0] != warnings[0] {
|
||||
t.Fatalf("wrong warning message: %q, expected %q", ws[0], warnings[0])
|
||||
}
|
||||
require.EqualError(t, err, e.Error())
|
||||
require.Equal(t, warnings, ws, "wrong warning message")
|
||||
}()
|
||||
defer ev.recover(&ws, &err)
|
||||
|
||||
|
|
|
@ -570,6 +570,87 @@ func funcLog10(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
|
|||
return simpleFunc(vals, enh, math.Log10)
|
||||
}
|
||||
|
||||
// === sin(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcSin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, math.Sin)
|
||||
}
|
||||
|
||||
// === cos(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcCos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, math.Cos)
|
||||
}
|
||||
|
||||
// === tan(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcTan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, math.Tan)
|
||||
}
|
||||
|
||||
// == asin(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcAsin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, math.Asin)
|
||||
}
|
||||
|
||||
// == acos(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcAcos(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, math.Acos)
|
||||
}
|
||||
|
||||
// == atan(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcAtan(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, math.Atan)
|
||||
}
|
||||
|
||||
// == sinh(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcSinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, math.Sinh)
|
||||
}
|
||||
|
||||
// == cosh(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcCosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, math.Cosh)
|
||||
}
|
||||
|
||||
// == tanh(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcTanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, math.Tanh)
|
||||
}
|
||||
|
||||
// == asinh(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcAsinh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, math.Asinh)
|
||||
}
|
||||
|
||||
// == acosh(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcAcosh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, math.Acosh)
|
||||
}
|
||||
|
||||
// == atanh(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcAtanh(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, math.Atanh)
|
||||
}
|
||||
|
||||
// === rad(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcRad(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, func(v float64) float64 {
|
||||
return v * math.Pi / 180
|
||||
})
|
||||
}
|
||||
|
||||
// === deg(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, func(v float64) float64 {
|
||||
return v * 180 / math.Pi
|
||||
})
|
||||
}
|
||||
|
||||
// === pi() Scalar ===
|
||||
func funcPi(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return Vector{Sample{Point: Point{
|
||||
V: math.Pi,
|
||||
}}}
|
||||
}
|
||||
|
||||
// === sgn(Vector parser.ValueTypeVector) Vector ===
|
||||
func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector {
|
||||
return simpleFunc(vals, enh, func(v float64) float64 {
|
||||
|
@ -935,16 +1016,25 @@ var FunctionCalls = map[string]FunctionCall{
|
|||
"abs": funcAbs,
|
||||
"absent": funcAbsent,
|
||||
"absent_over_time": funcAbsentOverTime,
|
||||
"acos": funcAcos,
|
||||
"acosh": funcAcosh,
|
||||
"asin": funcAsin,
|
||||
"asinh": funcAsinh,
|
||||
"atan": funcAtan,
|
||||
"atanh": funcAtanh,
|
||||
"avg_over_time": funcAvgOverTime,
|
||||
"ceil": funcCeil,
|
||||
"changes": funcChanges,
|
||||
"clamp": funcClamp,
|
||||
"clamp_max": funcClampMax,
|
||||
"clamp_min": funcClampMin,
|
||||
"cos": funcCos,
|
||||
"cosh": funcCosh,
|
||||
"count_over_time": funcCountOverTime,
|
||||
"days_in_month": funcDaysInMonth,
|
||||
"day_of_month": funcDayOfMonth,
|
||||
"day_of_week": funcDayOfWeek,
|
||||
"deg": funcDeg,
|
||||
"delta": funcDelta,
|
||||
"deriv": funcDeriv,
|
||||
"exp": funcExp,
|
||||
|
@ -965,20 +1055,26 @@ var FunctionCalls = map[string]FunctionCall{
|
|||
"min_over_time": funcMinOverTime,
|
||||
"minute": funcMinute,
|
||||
"month": funcMonth,
|
||||
"pi": funcPi,
|
||||
"predict_linear": funcPredictLinear,
|
||||
"present_over_time": funcPresentOverTime,
|
||||
"quantile_over_time": funcQuantileOverTime,
|
||||
"rad": funcRad,
|
||||
"rate": funcRate,
|
||||
"resets": funcResets,
|
||||
"round": funcRound,
|
||||
"scalar": funcScalar,
|
||||
"sgn": funcSgn,
|
||||
"sin": funcSin,
|
||||
"sinh": funcSinh,
|
||||
"sort": funcSort,
|
||||
"sort_desc": funcSortDesc,
|
||||
"sqrt": funcSqrt,
|
||||
"stddev_over_time": funcStddevOverTime,
|
||||
"stdvar_over_time": funcStdvarOverTime,
|
||||
"sum_over_time": funcSumOverTime,
|
||||
"tan": funcTan,
|
||||
"tanh": funcTanh,
|
||||
"time": funcTime,
|
||||
"timestamp": funcTimestamp,
|
||||
"vector": funcVector,
|
||||
|
|
|
@ -39,9 +39,34 @@ var Functions = map[string]*Function{
|
|||
ArgTypes: []ValueType{ValueTypeMatrix},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"present_over_time": {
|
||||
Name: "present_over_time",
|
||||
ArgTypes: []ValueType{ValueTypeMatrix},
|
||||
"acos": {
|
||||
Name: "acos",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"acosh": {
|
||||
Name: "acosh",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"asin": {
|
||||
Name: "asin",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"asinh": {
|
||||
Name: "asinh",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"atan": {
|
||||
Name: "atan",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"atanh": {
|
||||
Name: "atanh",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"avg_over_time": {
|
||||
|
@ -74,6 +99,16 @@ var Functions = map[string]*Function{
|
|||
ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"cos": {
|
||||
Name: "cos",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"cosh": {
|
||||
Name: "cosh",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"count_over_time": {
|
||||
Name: "count_over_time",
|
||||
ArgTypes: []ValueType{ValueTypeMatrix},
|
||||
|
@ -97,6 +132,11 @@ var Functions = map[string]*Function{
|
|||
Variadic: 1,
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"deg": {
|
||||
Name: "deg",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"delta": {
|
||||
Name: "delta",
|
||||
ArgTypes: []ValueType{ValueTypeMatrix},
|
||||
|
@ -201,16 +241,31 @@ var Functions = map[string]*Function{
|
|||
Variadic: 1,
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"pi": {
|
||||
Name: "pi",
|
||||
ArgTypes: []ValueType{},
|
||||
ReturnType: ValueTypeScalar,
|
||||
},
|
||||
"predict_linear": {
|
||||
Name: "predict_linear",
|
||||
ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"present_over_time": {
|
||||
Name: "present_over_time",
|
||||
ArgTypes: []ValueType{ValueTypeMatrix},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"quantile_over_time": {
|
||||
Name: "quantile_over_time",
|
||||
ArgTypes: []ValueType{ValueTypeScalar, ValueTypeMatrix},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"rad": {
|
||||
Name: "rad",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"rate": {
|
||||
Name: "rate",
|
||||
ArgTypes: []ValueType{ValueTypeMatrix},
|
||||
|
@ -237,6 +292,16 @@ var Functions = map[string]*Function{
|
|||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"sin": {
|
||||
Name: "sin",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"sinh": {
|
||||
Name: "sinh",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"sort": {
|
||||
Name: "sort",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
|
@ -267,6 +332,16 @@ var Functions = map[string]*Function{
|
|||
ArgTypes: []ValueType{ValueTypeMatrix},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"tan": {
|
||||
Name: "tan",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"tanh": {
|
||||
Name: "tanh",
|
||||
ArgTypes: []ValueType{ValueTypeVector},
|
||||
ReturnType: ValueTypeVector,
|
||||
},
|
||||
"time": {
|
||||
Name: "time",
|
||||
ArgTypes: []ValueType{},
|
||||
|
|
|
@ -84,6 +84,7 @@ NEQ_REGEX
|
|||
POW
|
||||
SUB
|
||||
AT
|
||||
ATAN2
|
||||
%token operatorsEnd
|
||||
|
||||
// Aggregators.
|
||||
|
@ -156,7 +157,7 @@ START_METRIC_SELECTOR
|
|||
%left LAND LUNLESS
|
||||
%left EQLC GTE GTR LSS LTE NEQ
|
||||
%left ADD SUB
|
||||
%left MUL DIV MOD
|
||||
%left MUL DIV MOD ATAN2
|
||||
%right POW
|
||||
|
||||
// Offset modifiers do not have associativity.
|
||||
|
@ -237,6 +238,7 @@ aggregate_modifier:
|
|||
|
||||
// Operator precedence only works if each of those is listed separately.
|
||||
binary_expr : expr ADD bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
|
||||
| expr ATAN2 bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
|
||||
| expr DIV bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
|
||||
| expr EQLC bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
|
||||
| expr GTE bin_modifier expr { $$ = yylex.(*parser).newBinaryExpression($1, $2, $3, $4) }
|
||||
|
@ -674,7 +676,7 @@ series_value : IDENTIFIER
|
|||
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK ;
|
||||
|
||||
// inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
|
||||
maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END;
|
||||
maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2;
|
||||
|
||||
unary_op : ADD | SUB;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -97,6 +97,7 @@ var key = map[string]ItemType{
|
|||
"and": LAND,
|
||||
"or": LOR,
|
||||
"unless": LUNLESS,
|
||||
"atan2": ATAN2,
|
||||
|
||||
// Aggregators.
|
||||
"sum": SUM,
|
||||
|
|
|
@ -340,6 +340,10 @@ var tests = []struct {
|
|||
input: "bool",
|
||||
expected: []Item{{BOOL, 0, "bool"}},
|
||||
},
|
||||
{
|
||||
input: "atan2",
|
||||
expected: []Item{{ATAN2, 0, "atan2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -737,13 +741,13 @@ func TestLexer(t *testing.T) {
|
|||
}
|
||||
if !hasError {
|
||||
t.Logf("%d: input %q", i, test.input)
|
||||
t.Fatalf("expected lexing error but did not fail")
|
||||
require.Fail(t, "expected lexing error but did not fail")
|
||||
}
|
||||
continue
|
||||
}
|
||||
if lastItem.Typ == ERROR {
|
||||
t.Logf("%d: input %q", i, test.input)
|
||||
t.Fatalf("unexpected lexing error at position %d: %s", lastItem.Pos, lastItem)
|
||||
require.Fail(t, "unexpected lexing error at position %d: %s", lastItem.Pos, lastItem)
|
||||
}
|
||||
|
||||
eofItem := Item{EOF, Pos(len(test.input)), ""}
|
||||
|
|
|
@ -55,18 +55,16 @@ func TestQueryLogging(t *testing.T) {
|
|||
queryLogger.Insert(context.Background(), queries[i])
|
||||
|
||||
have := string(fileAsBytes[start:end])
|
||||
if !regexp.MustCompile(want[i]).MatchString(have) {
|
||||
t.Fatalf("Query not written correctly: %s.\nHave %s\nWant %s", queries[i], have, want[i])
|
||||
}
|
||||
require.True(t, regexp.MustCompile(want[i]).MatchString(have),
|
||||
"Query not written correctly: %s", queries[i])
|
||||
}
|
||||
|
||||
// Check if all queries have been deleted.
|
||||
for i := 0; i < 4; i++ {
|
||||
queryLogger.Delete(1 + i*entrySize)
|
||||
}
|
||||
if !regexp.MustCompile(`^\x00+$`).Match(fileAsBytes[1 : 1+entrySize*4]) {
|
||||
t.Fatalf("All queries not deleted properly. Have %s\nWant only null bytes \\x00", string(fileAsBytes[1:1+entrySize*4]))
|
||||
}
|
||||
require.True(t, regexp.MustCompile(`^\x00+$`).Match(fileAsBytes[1:1+entrySize*4]),
|
||||
"All queries not deleted properly. Want only null bytes \\x00")
|
||||
}
|
||||
|
||||
func TestIndexReuse(t *testing.T) {
|
||||
|
@ -101,9 +99,8 @@ func TestIndexReuse(t *testing.T) {
|
|||
end := start + entrySize
|
||||
|
||||
have := queryBytes[start:end]
|
||||
if !regexp.MustCompile(want[i]).Match(have) {
|
||||
t.Fatalf("Index not reused properly:\nHave %s\nWant %s", string(queryBytes[start:end]), want[i])
|
||||
}
|
||||
require.True(t, regexp.MustCompile(want[i]).Match(have),
|
||||
"Index not reused properly.")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -124,14 +121,10 @@ func TestMMapFile(t *testing.T) {
|
|||
|
||||
bytes := make([]byte, 4)
|
||||
n, err := f.Read(bytes)
|
||||
require.Equal(t, n, 2)
|
||||
require.NoError(t, err, "Unexpected error while reading file.")
|
||||
|
||||
if n != 2 || err != nil {
|
||||
t.Fatalf("Error reading file")
|
||||
}
|
||||
|
||||
if string(bytes[:2]) != string(fileAsBytes) {
|
||||
t.Fatalf("Mmap failed")
|
||||
}
|
||||
require.Equal(t, fileAsBytes, bytes[:2], "Mmap failed")
|
||||
}
|
||||
|
||||
func TestParseBrokenJSON(t *testing.T) {
|
||||
|
@ -163,12 +156,9 @@ func TestParseBrokenJSON(t *testing.T) {
|
|||
} {
|
||||
t.Run("", func(t *testing.T) {
|
||||
out, ok := parseBrokenJSON(tc.b)
|
||||
if tc.ok != ok {
|
||||
t.Fatalf("expected %t, got %t", tc.ok, ok)
|
||||
return
|
||||
}
|
||||
if ok && tc.out != out {
|
||||
t.Fatalf("expected %s, got %s", tc.out, out)
|
||||
require.Equal(t, tc.ok, ok)
|
||||
if ok {
|
||||
require.Equal(t, tc.out, out)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/exemplar"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
|
@ -597,9 +598,8 @@ func (t *Test) exec(tc testCommand) error {
|
|||
// clear the current test storage of all inserted samples.
|
||||
func (t *Test) clear() {
|
||||
if t.storage != nil {
|
||||
if err := t.storage.Close(); err != nil {
|
||||
t.T.Fatalf("closing test storage: %s", err)
|
||||
}
|
||||
err := t.storage.Close()
|
||||
require.NoError(t.T, err, "Unexpected error while closing test storage.")
|
||||
}
|
||||
if t.cancelCtx != nil {
|
||||
t.cancelCtx()
|
||||
|
@ -623,9 +623,8 @@ func (t *Test) clear() {
|
|||
func (t *Test) Close() {
|
||||
t.cancelCtx()
|
||||
|
||||
if err := t.storage.Close(); err != nil {
|
||||
t.T.Fatalf("closing test storage: %s", err)
|
||||
}
|
||||
err := t.storage.Close()
|
||||
require.NoError(t.T, err, "Unexpected error while closing test storage.")
|
||||
}
|
||||
|
||||
// samplesAlmostEqual returns true if the two sample lines only differ by a
|
||||
|
@ -722,9 +721,8 @@ func (ll *LazyLoader) parse(input string) error {
|
|||
// clear the current test storage of all inserted samples.
|
||||
func (ll *LazyLoader) clear() {
|
||||
if ll.storage != nil {
|
||||
if err := ll.storage.Close(); err != nil {
|
||||
ll.T.Fatalf("closing test storage: %s", err)
|
||||
}
|
||||
err := ll.storage.Close()
|
||||
require.NoError(ll.T, err, "Unexpected error while closing test storage.")
|
||||
}
|
||||
if ll.cancelCtx != nil {
|
||||
ll.cancelCtx()
|
||||
|
@ -798,8 +796,6 @@ func (ll *LazyLoader) Storage() storage.Storage {
|
|||
// Close closes resources associated with the LazyLoader.
|
||||
func (ll *LazyLoader) Close() {
|
||||
ll.cancelCtx()
|
||||
|
||||
if err := ll.storage.Close(); err != nil {
|
||||
ll.T.Fatalf("closing test storage: %s", err)
|
||||
}
|
||||
err := ll.storage.Close()
|
||||
require.NoError(ll.T, err, "Unexpected error while closing test storage.")
|
||||
}
|
||||
|
|
|
@ -467,3 +467,17 @@ eval instant at 5m test_total < bool test_smaller
|
|||
{instance="localhost"} 0
|
||||
|
||||
eval instant at 5m test_total < test_smaller
|
||||
|
||||
clear
|
||||
|
||||
# Testing atan2.
|
||||
load 5m
|
||||
trigy{} 10
|
||||
trigx{} 20
|
||||
trigNaN{} NaN
|
||||
|
||||
eval instant at 5m trigy atan2 trigx
|
||||
trigy{} 0.4636476090008061
|
||||
|
||||
eval instant at 5m trigy atan2 trigNaN
|
||||
trigy{} NaN
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
# Testing sin() cos() tan() asin() acos() atan() sinh() cosh() tanh() rad() deg() pi().
|
||||
|
||||
load 5m
|
||||
trig{l="x"} 10
|
||||
trig{l="y"} 20
|
||||
trig{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m sin(trig)
|
||||
{l="x"} -0.5440211108893699
|
||||
{l="y"} 0.9129452507276277
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m cos(trig)
|
||||
{l="x"} -0.8390715290764524
|
||||
{l="y"} 0.40808206181339196
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m tan(trig)
|
||||
{l="x"} 0.6483608274590867
|
||||
{l="y"} 2.2371609442247427
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m asin(trig - 10.1)
|
||||
{l="x"} -0.10016742116155944
|
||||
{l="y"} NaN
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m acos(trig - 10.1)
|
||||
{l="x"} 1.670963747956456
|
||||
{l="y"} NaN
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m atan(trig)
|
||||
{l="x"} 1.4711276743037345
|
||||
{l="y"} 1.5208379310729538
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m sinh(trig)
|
||||
{l="x"} 11013.232920103324
|
||||
{l="y"} 2.4258259770489514e+08
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m cosh(trig)
|
||||
{l="x"} 11013.232920103324
|
||||
{l="y"} 2.4258259770489514e+08
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m tanh(trig)
|
||||
{l="x"} 0.9999999958776927
|
||||
{l="y"} 1
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m asinh(trig)
|
||||
{l="x"} 2.99822295029797
|
||||
{l="y"} 3.6895038689889055
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m acosh(trig)
|
||||
{l="x"} 2.993222846126381
|
||||
{l="y"} 3.6882538673612966
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m atanh(trig - 10.1)
|
||||
{l="x"} -0.10033534773107522
|
||||
{l="y"} NaN
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m rad(trig)
|
||||
{l="x"} 0.17453292519943295
|
||||
{l="y"} 0.3490658503988659
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m rad(trig - 10)
|
||||
{l="x"} 0
|
||||
{l="y"} 0.17453292519943295
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m rad(trig - 20)
|
||||
{l="x"} -0.17453292519943295
|
||||
{l="y"} 0
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m deg(trig)
|
||||
{l="x"} 572.9577951308232
|
||||
{l="y"} 1145.9155902616465
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m deg(trig - 10)
|
||||
{l="x"} 0
|
||||
{l="y"} 572.9577951308232
|
||||
{l="NaN"} NaN
|
||||
|
||||
eval instant at 5m deg(trig - 20)
|
||||
{l="x"} -572.9577951308232
|
||||
{l="y"} 0
|
||||
{l="NaN"} NaN
|
||||
|
||||
clear
|
||||
|
||||
eval instant at 0s pi()
|
||||
3.141592653589793
|
|
@ -297,7 +297,7 @@ const resolvedRetention = 15 * time.Minute
|
|||
|
||||
// Eval evaluates the rule expression and then creates pending alerts and fires
|
||||
// or removes previously pending alerts accordingly.
|
||||
func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, externalURL *url.URL) (promql.Vector, error) {
|
||||
func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) {
|
||||
res, err := query(ctx, r.vector.String(), ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -338,6 +338,7 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
|
|||
model.Time(timestamp.FromTime(ts)),
|
||||
template.QueryFunc(query),
|
||||
externalURL,
|
||||
nil,
|
||||
)
|
||||
result, err := tmpl.Expand()
|
||||
if err != nil {
|
||||
|
@ -414,6 +415,12 @@ func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc,
|
|||
}
|
||||
}
|
||||
|
||||
numActive := len(r.active)
|
||||
if limit != 0 && numActive > limit {
|
||||
r.active = map[uint64]*Alert{}
|
||||
return nil, errors.Errorf("exceeded limit of %d with %d alerts", limit, numActive)
|
||||
}
|
||||
|
||||
return vec, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -170,7 +170,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) {
|
|||
t.Logf("case %d", i)
|
||||
evalTime := baseTime.Add(time.Duration(i) * time.Minute)
|
||||
result[0].Point.T = timestamp.FromTime(evalTime)
|
||||
res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
|
||||
res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||
|
@ -252,7 +252,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
|||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||
res, err := ruleWithoutExternalLabels.Eval(
|
||||
suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
|
||||
suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for _, smpl := range res {
|
||||
|
@ -266,7 +266,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
|
|||
}
|
||||
|
||||
res, err = ruleWithExternalLabels.Eval(
|
||||
suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
|
||||
suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for _, smpl := range res {
|
||||
|
@ -346,7 +346,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
|||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||
res, err := ruleWithoutExternalURL.Eval(
|
||||
suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
|
||||
suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for _, smpl := range res {
|
||||
|
@ -360,7 +360,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
|
|||
}
|
||||
|
||||
res, err = ruleWithExternalURL.Eval(
|
||||
suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
|
||||
suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for _, smpl := range res {
|
||||
|
@ -417,7 +417,7 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
|
|||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||
res, err := rule.Eval(
|
||||
suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
|
||||
suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
for _, smpl := range res {
|
||||
|
@ -460,7 +460,61 @@ func TestAlertingRuleDuplicate(t *testing.T) {
|
|||
"",
|
||||
true, log.NewNopLogger(),
|
||||
)
|
||||
_, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil)
|
||||
_, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, 0)
|
||||
require.Error(t, err)
|
||||
require.EqualError(t, err, "vector contains metrics with the same labelset after applying alert labels")
|
||||
}
|
||||
|
||||
func TestAlertingRuleLimit(t *testing.T) {
|
||||
storage := teststorage.New(t)
|
||||
defer storage.Close()
|
||||
|
||||
opts := promql.EngineOpts{
|
||||
Logger: nil,
|
||||
Reg: nil,
|
||||
MaxSamples: 10,
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
engine := promql.NewEngine(opts)
|
||||
ctx, cancelCtx := context.WithCancel(context.Background())
|
||||
defer cancelCtx()
|
||||
|
||||
now := time.Now()
|
||||
|
||||
suite := []struct {
|
||||
limit int
|
||||
err string
|
||||
}{
|
||||
{
|
||||
limit: 0,
|
||||
},
|
||||
{
|
||||
limit: 1,
|
||||
},
|
||||
{
|
||||
limit: -1,
|
||||
err: "exceeded limit of -1 with 1 alerts",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range suite {
|
||||
expr, _ := parser.ParseExpr(`1`)
|
||||
rule := NewAlertingRule(
|
||||
"foo",
|
||||
expr,
|
||||
time.Minute,
|
||||
labels.FromStrings("test", "test"),
|
||||
nil,
|
||||
nil,
|
||||
"",
|
||||
true, log.NewNopLogger(),
|
||||
)
|
||||
_, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, test.limit)
|
||||
if test.err == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Equal(t, test.err, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -213,7 +213,7 @@ type Rule interface {
|
|||
// Labels of the rule.
|
||||
Labels() labels.Labels
|
||||
// eval evaluates the rule, including any associated recording or alerting actions.
|
||||
Eval(context.Context, time.Time, QueryFunc, *url.URL) (promql.Vector, error)
|
||||
Eval(context.Context, time.Time, QueryFunc, *url.URL, int) (promql.Vector, error)
|
||||
// String returns a human-readable string representation of the rule.
|
||||
String() string
|
||||
// Query returns the rule query expression.
|
||||
|
@ -244,6 +244,7 @@ type Group struct {
|
|||
name string
|
||||
file string
|
||||
interval time.Duration
|
||||
limit int
|
||||
rules []Rule
|
||||
seriesInPreviousEval []map[string]labels.Labels // One per Rule.
|
||||
staleSeries []labels.Labels
|
||||
|
@ -267,6 +268,7 @@ type Group struct {
|
|||
type GroupOptions struct {
|
||||
Name, File string
|
||||
Interval time.Duration
|
||||
Limit int
|
||||
Rules []Rule
|
||||
ShouldRestore bool
|
||||
Opts *ManagerOptions
|
||||
|
@ -295,6 +297,7 @@ func NewGroup(o GroupOptions) *Group {
|
|||
name: o.Name,
|
||||
file: o.File,
|
||||
interval: o.Interval,
|
||||
limit: o.Limit,
|
||||
rules: o.Rules,
|
||||
shouldRestore: o.ShouldRestore,
|
||||
opts: o.Opts,
|
||||
|
@ -319,6 +322,9 @@ func (g *Group) Rules() []Rule { return g.rules }
|
|||
// Interval returns the group's interval.
|
||||
func (g *Group) Interval() time.Duration { return g.interval }
|
||||
|
||||
// Limit returns the group's limit.
|
||||
func (g *Group) Limit() int { return g.limit }
|
||||
|
||||
func (g *Group) run(ctx context.Context) {
|
||||
defer close(g.terminated)
|
||||
|
||||
|
@ -591,7 +597,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
|||
|
||||
g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
|
||||
|
||||
vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL)
|
||||
vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit())
|
||||
if err != nil {
|
||||
rule.SetHealth(HealthBad)
|
||||
rule.SetLastError(err)
|
||||
|
@ -850,6 +856,10 @@ func (g *Group) Equals(ng *Group) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
if g.limit != ng.limit {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(g.rules) != len(ng.rules) {
|
||||
return false
|
||||
}
|
||||
|
@ -1086,6 +1096,7 @@ func (m *Manager) LoadGroups(
|
|||
Name: rg.Name,
|
||||
File: fn,
|
||||
Interval: itv,
|
||||
Limit: rg.Limit,
|
||||
Rules: rules,
|
||||
ShouldRestore: shouldRestore,
|
||||
Opts: m.opts,
|
||||
|
|
|
@ -156,7 +156,7 @@ func TestAlertingRule(t *testing.T) {
|
|||
|
||||
evalTime := baseTime.Add(test.time)
|
||||
|
||||
res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
|
||||
res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
|
||||
|
@ -305,7 +305,7 @@ func TestForStateAddSamples(t *testing.T) {
|
|||
forState = float64(value.StaleNaN)
|
||||
}
|
||||
|
||||
res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
|
||||
res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
var filteredRes promql.Vector // After removing 'ALERTS' samples.
|
||||
|
@ -773,6 +773,12 @@ func TestUpdate(t *testing.T) {
|
|||
}
|
||||
reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)
|
||||
|
||||
// Update limit and reload.
|
||||
for i := range rgs.Groups {
|
||||
rgs.Groups[i].Limit = 1
|
||||
}
|
||||
reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)
|
||||
|
||||
// Change group rules and reload.
|
||||
for i, g := range rgs.Groups {
|
||||
for j, r := range g.Rules {
|
||||
|
@ -791,6 +797,7 @@ type ruleGroupsTest struct {
|
|||
type ruleGroupTest struct {
|
||||
Name string `yaml:"name"`
|
||||
Interval model.Duration `yaml:"interval,omitempty"`
|
||||
Limit int `yaml:"limit,omitempty"`
|
||||
Rules []rulefmt.Rule `yaml:"rules"`
|
||||
}
|
||||
|
||||
|
@ -812,6 +819,7 @@ func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest {
|
|||
tmp = append(tmp, ruleGroupTest{
|
||||
Name: g.Name,
|
||||
Interval: g.Interval,
|
||||
Limit: g.Limit,
|
||||
Rules: rtmp,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ func (rule *RecordingRule) Labels() labels.Labels {
|
|||
}
|
||||
|
||||
// Eval evaluates the rule and then overrides the metric names and labels accordingly.
|
||||
func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, _ *url.URL) (promql.Vector, error) {
|
||||
func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, _ *url.URL, limit int) (promql.Vector, error) {
|
||||
vector, err := query(ctx, rule.vector.String(), ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -99,6 +99,13 @@ func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFu
|
|||
return nil, fmt.Errorf("vector contains metrics with the same labelset after applying rule labels")
|
||||
}
|
||||
|
||||
numSamples := len(vector)
|
||||
if limit != 0 && numSamples > limit {
|
||||
return nil, fmt.Errorf("exceeded limit %d with %d samples", limit, numSamples)
|
||||
}
|
||||
|
||||
rule.SetHealth(HealthGood)
|
||||
rule.SetLastError(err)
|
||||
return vector, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,9 @@ func TestRuleEval(t *testing.T) {
|
|||
name string
|
||||
expr parser.Expr
|
||||
labels labels.Labels
|
||||
limit int
|
||||
result promql.Vector
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "nolabels",
|
||||
|
@ -69,12 +71,43 @@ func TestRuleEval(t *testing.T) {
|
|||
Point: promql.Point{V: 1, T: timestamp.FromTime(now)},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "underlimit",
|
||||
expr: &parser.NumberLiteral{Val: 1},
|
||||
labels: labels.FromStrings("foo", "bar"),
|
||||
limit: 2,
|
||||
result: promql.Vector{promql.Sample{
|
||||
Metric: labels.FromStrings("__name__", "underlimit", "foo", "bar"),
|
||||
Point: promql.Point{V: 1, T: timestamp.FromTime(now)},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "atlimit",
|
||||
expr: &parser.NumberLiteral{Val: 1},
|
||||
labels: labels.FromStrings("foo", "bar"),
|
||||
limit: 1,
|
||||
result: promql.Vector{promql.Sample{
|
||||
Metric: labels.FromStrings("__name__", "atlimit", "foo", "bar"),
|
||||
Point: promql.Point{V: 1, T: timestamp.FromTime(now)},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "overlimit",
|
||||
expr: &parser.NumberLiteral{Val: 1},
|
||||
labels: labels.FromStrings("foo", "bar"),
|
||||
limit: -1,
|
||||
err: "exceeded limit -1 with 1 samples",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range suite {
|
||||
rule := NewRecordingRule(test.name, test.expr, test.labels)
|
||||
result, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil)
|
||||
require.NoError(t, err)
|
||||
result, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, test.limit)
|
||||
if test.err == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Equal(t, test.err, err.Error())
|
||||
}
|
||||
require.Equal(t, test.result, result)
|
||||
}
|
||||
}
|
||||
|
@ -114,7 +147,7 @@ func TestRuleEvalDuplicate(t *testing.T) {
|
|||
|
||||
expr, _ := parser.ParseExpr(`vector(0) or label_replace(vector(0),"test","x","","")`)
|
||||
rule := NewRecordingRule("foo", expr, labels.FromStrings("test", "test"))
|
||||
_, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil)
|
||||
_, err := rule.Eval(ctx, now, EngineQueryFunc(engine, storage), nil, 0)
|
||||
require.Error(t, err)
|
||||
require.EqualError(t, err, "vector contains metrics with the same labelset after applying rule labels")
|
||||
}
|
||||
|
|
|
@ -269,7 +269,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
|
|||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, config_util.WithHTTP2Disabled())
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName)
|
||||
if err != nil {
|
||||
targetScrapePoolsFailed.Inc()
|
||||
return nil, errors.Wrap(err, "error creating HTTP client")
|
||||
|
@ -380,7 +380,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
targetScrapePoolReloads.Inc()
|
||||
start := time.Now()
|
||||
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, config_util.WithHTTP2Disabled())
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName)
|
||||
if err != nil {
|
||||
targetScrapePoolReloadsFailed.Inc()
|
||||
return errors.Wrap(err, "error creating HTTP client")
|
||||
|
@ -721,7 +721,7 @@ var errBodySizeLimit = errors.New("body size limit exceeded")
|
|||
|
||||
const acceptHeader = `application/openmetrics-text; version=0.0.1,text/plain;version=0.0.4;q=0.5,*/*;q=0.1`
|
||||
|
||||
var userAgentHeader = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
|
||||
func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error) {
|
||||
if s.req == nil {
|
||||
|
@ -731,7 +731,7 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
|
|||
}
|
||||
req.Header.Add("Accept", acceptHeader)
|
||||
req.Header.Add("Accept-Encoding", "gzip")
|
||||
req.Header.Set("User-Agent", userAgentHeader)
|
||||
req.Header.Set("User-Agent", UserAgent)
|
||||
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64))
|
||||
|
||||
s.req = req
|
||||
|
|
|
@ -149,7 +149,7 @@ func TestNewHTTPBearerToken(t *testing.T) {
|
|||
cfg := config_util.HTTPClientConfig{
|
||||
BearerToken: "1234",
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ func TestNewHTTPBearerTokenFile(t *testing.T) {
|
|||
cfg := config_util.HTTPClientConfig{
|
||||
BearerTokenFile: "testdata/bearertoken.txt",
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ func TestNewHTTPBasicAuth(t *testing.T) {
|
|||
Password: "password123",
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ func TestNewHTTPCACert(t *testing.T) {
|
|||
CAFile: caCertPath,
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ func TestNewHTTPClientCert(t *testing.T) {
|
|||
KeyFile: "testdata/client.key",
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -295,7 +295,7 @@ func TestNewHTTPWithServerName(t *testing.T) {
|
|||
ServerName: "prometheus.rocks",
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -324,7 +324,7 @@ func TestNewHTTPWithBadServerName(t *testing.T) {
|
|||
ServerName: "badname",
|
||||
},
|
||||
}
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
c, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -362,7 +362,7 @@ func TestNewClientWithBadTLSConfig(t *testing.T) {
|
|||
KeyFile: "testdata/nonexistent_client.key",
|
||||
},
|
||||
}
|
||||
_, err := config_util.NewClientFromConfig(cfg, "test", config_util.WithHTTP2Disabled())
|
||||
_, err := config_util.NewClientFromConfig(cfg, "test")
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error, got nil.")
|
||||
}
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# Build React web UI.
|
||||
# Run from repository root.
|
||||
set -e
|
||||
set -u
|
||||
|
||||
if ! [[ "$0" =~ "scripts/build_react_app.sh" ]]; then
|
||||
echo "must be run from repository root"
|
||||
exit 255
|
||||
fi
|
||||
|
||||
cd web/ui/react-app
|
||||
|
||||
echo "building React app"
|
||||
PUBLIC_URL=. npm run build
|
||||
rm -rf ../static/react
|
||||
mv build ../static/react
|
|
@ -0,0 +1,135 @@
|
|||
#!/usr/bin/env bash
|
||||
# vim: ts=2 et
|
||||
# Setting -x is absolutely forbidden as it could leak the GitHub token.
|
||||
set -uo pipefail
|
||||
|
||||
# GITHUB_TOKEN required scope: repo.repo_public
|
||||
|
||||
git_mail="prometheus-team@googlegroups.com"
|
||||
git_user="prombot"
|
||||
branch="repo_sync_codemirror"
|
||||
commit_msg="Update codemirror"
|
||||
pr_title="Synchronize codemirror from prometheus/prometheus"
|
||||
pr_msg="Propagating changes from prometheus/prometheus default branch."
|
||||
target_repo="prometheus-community/codemirror-promql"
|
||||
source_path="web/ui/module/codemirror-promql"
|
||||
|
||||
color_red='\e[31m'
|
||||
color_green='\e[32m'
|
||||
color_yellow='\e[33m'
|
||||
color_none='\e[0m'
|
||||
|
||||
echo_red() {
|
||||
echo -e "${color_red}$@${color_none}" 1>&2
|
||||
}
|
||||
|
||||
echo_green() {
|
||||
echo -e "${color_green}$@${color_none}" 1>&2
|
||||
}
|
||||
|
||||
echo_yellow() {
|
||||
echo -e "${color_yellow}$@${color_none}" 1>&2
|
||||
}
|
||||
|
||||
GITHUB_TOKEN="${GITHUB_TOKEN:-}"
|
||||
if [ -z "${GITHUB_TOKEN}" ]; then
|
||||
echo_red 'GitHub token (GITHUB_TOKEN) not set. Terminating.'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# List of files that should not be synced.
|
||||
excluded_files="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint MAINTAINERS.md"
|
||||
excluded_dirs=".github .circleci"
|
||||
|
||||
# Go to the root of the repo
|
||||
cd "$(git rev-parse --show-cdup)" || exit 1
|
||||
|
||||
source_dir="$(pwd)/${source_path}"
|
||||
|
||||
tmp_dir="$(mktemp -d)"
|
||||
trap 'rm -rf "${tmp_dir}"' EXIT
|
||||
|
||||
## Internal functions
|
||||
github_api() {
|
||||
local url
|
||||
url="https://api.github.com/${1}"
|
||||
shift 1
|
||||
curl --retry 5 --silent --fail -u "${git_user}:${GITHUB_TOKEN}" "${url}" "$@"
|
||||
}
|
||||
|
||||
get_default_branch() {
|
||||
github_api "repos/${1}" 2> /dev/null |
|
||||
jq -r .default_branch
|
||||
}
|
||||
|
||||
push_branch() {
|
||||
local git_url
|
||||
git_url="https://${git_user}:${GITHUB_TOKEN}@github.com/${1}"
|
||||
# stdout and stderr are redirected to /dev/null otherwise git-push could leak
|
||||
# the token in the logs.
|
||||
# Delete the remote branch in case it was merged but not deleted.
|
||||
git push --quiet "${git_url}" ":${branch}" 1>/dev/null 2>&1
|
||||
git push --quiet "${git_url}" --set-upstream "${branch}" 1>/dev/null 2>&1
|
||||
}
|
||||
|
||||
post_pull_request() {
|
||||
local repo="$1"
|
||||
local default_branch="$2"
|
||||
local post_json
|
||||
post_json="$(printf '{"title":"%s","base":"%s","head":"%s","body":"%s"}' "${pr_title}" "${default_branch}" "${branch}" "${pr_msg}")"
|
||||
echo "Posting PR to ${default_branch} on ${repo}"
|
||||
github_api "repos/${repo}/pulls" --data "${post_json}" --show-error |
|
||||
jq -r '"PR URL " + .html_url'
|
||||
}
|
||||
|
||||
process_repo() {
|
||||
local org_repo
|
||||
local default_branch
|
||||
org_repo="$1"
|
||||
mkdir -p "${tmp_dir}/${org_repo}"
|
||||
echo_green "Processing '${org_repo}'"
|
||||
|
||||
default_branch="$(get_default_branch "${org_repo}")"
|
||||
if [[ -z "${default_branch}" ]]; then
|
||||
echo "Can't get the default branch."
|
||||
return
|
||||
fi
|
||||
echo "Default branch: ${default_branch}"
|
||||
|
||||
# Clone target repo to temporary directory and checkout to new branch
|
||||
git clone --quiet "https://github.com/${org_repo}.git" "${tmp_dir}/${org_repo}"
|
||||
cd "${tmp_dir}/${org_repo}" || return 1
|
||||
git checkout -b "${branch}" || return 1
|
||||
|
||||
git rm -r .
|
||||
|
||||
cp -ra ${source_dir}/. .
|
||||
git add .
|
||||
|
||||
for excluded_dir in ${excluded_dirs}; do
|
||||
git reset -- "${excluded_dir}/*"
|
||||
git checkout -- "${excluded_dir}/*"
|
||||
done
|
||||
|
||||
for excluded_file in ${excluded_files}; do
|
||||
git reset -- "${excluded_file}"
|
||||
git checkout -- "${excluded_file}"
|
||||
done
|
||||
|
||||
if [[ -n "$(git status --porcelain)" ]]; then
|
||||
git config user.email "${git_mail}"
|
||||
git config user.name "${git_user}"
|
||||
git add .
|
||||
git commit -s -m "${commit_msg}"
|
||||
if push_branch "${org_repo}"; then
|
||||
if ! post_pull_request "${org_repo}" "${default_branch}"; then
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
echo "Pushing ${branch} to ${org_repo} failed"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
process_repo ${target_repo}
|
|
@ -37,7 +37,7 @@ if [ -z "${GITHUB_TOKEN}" ]; then
|
|||
fi
|
||||
|
||||
# List of files that should be synced.
|
||||
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint"
|
||||
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint .github/workflows/golangci-lint.yml"
|
||||
|
||||
# Go to the root of the repo
|
||||
cd "$(git rev-parse --show-cdup)" || exit 1
|
||||
|
@ -96,6 +96,15 @@ check_license() {
|
|||
echo "$1" | grep --quiet --no-messages --ignore-case 'Apache License'
|
||||
}
|
||||
|
||||
check_go() {
|
||||
local org_repo
|
||||
local default_branch
|
||||
org_repo="$1"
|
||||
default_branch="$2"
|
||||
|
||||
curl -sLf -o /dev/null "https://raw.githubusercontent.com/${org_repo}/${default_branch}/go.mod"
|
||||
}
|
||||
|
||||
check_circleci_orb() {
|
||||
local org_repo
|
||||
local default_branch
|
||||
|
@ -136,10 +145,14 @@ process_repo() {
|
|||
echo "LICENSE in ${org_repo} is not apache, skipping."
|
||||
continue
|
||||
fi
|
||||
if [[ "${source_file}" == '.github/workflows/golangci-lint.yml' ]] && ! check_go "${org_repo}" "${default_branch}" ; then
|
||||
echo "${org_repo} is not Go, skipping .github/workflows/golangci-lint.yml."
|
||||
continue
|
||||
fi
|
||||
if [[ -z "${target_file}" ]]; then
|
||||
echo "${source_file} doesn't exist in ${org_repo}"
|
||||
case "${source_file}" in
|
||||
CODE_OF_CONDUCT.md | SECURITY.md)
|
||||
CODE_OF_CONDUCT.md | SECURITY.md | .github/workflows/golangci-lint.yml)
|
||||
echo "${source_file} missing in ${org_repo}, force updating."
|
||||
needs_update+=("${source_file}")
|
||||
;;
|
||||
|
@ -172,6 +185,9 @@ process_repo() {
|
|||
cd "${tmp_dir}/${org_repo}" || return 1
|
||||
git checkout -b "${branch}" || return 1
|
||||
|
||||
# If we need to add an Actions file this directory needs to be present.
|
||||
mkdir -p "./.github/workflows"
|
||||
|
||||
# Update the files in target repo by one from prometheus/prometheus.
|
||||
for source_file in "${needs_update[@]}"; do
|
||||
case "${source_file}" in
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"container/heap"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
@ -197,15 +196,13 @@ func mergeStrings(a, b []string) []string {
|
|||
res := make([]string, 0, maxl*10/9)
|
||||
|
||||
for len(a) > 0 && len(b) > 0 {
|
||||
d := strings.Compare(a[0], b[0])
|
||||
|
||||
if d == 0 {
|
||||
if a[0] == b[0] {
|
||||
res = append(res, a[0])
|
||||
a, b = a[1:], b[1:]
|
||||
} else if d < 0 {
|
||||
} else if a[0] < b[0] {
|
||||
res = append(res, a[0])
|
||||
a = a[1:]
|
||||
} else if d > 0 {
|
||||
} else {
|
||||
res = append(res, b[0])
|
||||
b = b[1:]
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ type ReadClient interface {
|
|||
|
||||
// NewReadClient creates a new client for remote read.
|
||||
func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) {
|
||||
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_read_client", config_util.WithHTTP2Disabled())
|
||||
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_read_client")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) {
|
|||
|
||||
// NewWriteClient creates a new client for remote write.
|
||||
func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
|
||||
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_write_client", config_util.WithHTTP2Disabled())
|
||||
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_write_client")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -111,9 +111,8 @@ func TestClientRetryAfter(t *testing.T) {
|
|||
|
||||
c := getClient(conf)
|
||||
err = c.Store(context.Background(), []byte{})
|
||||
if _, ok := err.(RecoverableError); ok {
|
||||
t.Fatal("recoverable error not expected")
|
||||
}
|
||||
_, ok := err.(RecoverableError)
|
||||
require.False(t, ok, "Recoverable error not expected.")
|
||||
|
||||
conf = &ClientConfig{
|
||||
URL: &config_util.URL{URL: serverURL},
|
||||
|
@ -123,9 +122,8 @@ func TestClientRetryAfter(t *testing.T) {
|
|||
|
||||
c = getClient(conf)
|
||||
err = c.Store(context.Background(), []byte{})
|
||||
if _, ok := err.(RecoverableError); !ok {
|
||||
t.Fatal("recoverable error was expected")
|
||||
}
|
||||
_, ok = err.(RecoverableError)
|
||||
require.True(t, ok, "Recoverable error was expected.")
|
||||
}
|
||||
|
||||
func TestRetryAfterDuration(t *testing.T) {
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/exemplar"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/textparse"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
|
@ -450,6 +451,17 @@ func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, erro
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar {
|
||||
timestamp := ep.Timestamp
|
||||
|
||||
return exemplar.Exemplar{
|
||||
Labels: labelProtosToLabels(ep.Labels),
|
||||
Value: ep.Value,
|
||||
Ts: timestamp,
|
||||
HasTs: timestamp != 0,
|
||||
}
|
||||
}
|
||||
|
||||
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric
|
||||
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
|
||||
metric := make(model.Metric, len(labelPairs))
|
||||
|
|
|
@ -36,7 +36,8 @@ var writeRequestFixture = &prompb.WriteRequest{
|
|||
{Name: "d", Value: "e"},
|
||||
{Name: "foo", Value: "bar"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
|
||||
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
|
||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
|
@ -46,7 +47,8 @@ var writeRequestFixture = &prompb.WriteRequest{
|
|||
{Name: "d", Value: "e"},
|
||||
{Name: "foo", Value: "bar"},
|
||||
},
|
||||
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
|
||||
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
|
||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -78,9 +78,7 @@ func TestSampledReadEndpoint(t *testing.T) {
|
|||
recorder := httptest.NewRecorder()
|
||||
h.ServeHTTP(recorder, request)
|
||||
|
||||
if recorder.Code/100 != 2 {
|
||||
t.Fatal(recorder.Code)
|
||||
}
|
||||
require.Equal(t, 2, recorder.Code/100)
|
||||
|
||||
require.Equal(t, "application/x-protobuf", recorder.Result().Header.Get("Content-Type"))
|
||||
require.Equal(t, "snappy", recorder.Result().Header.Get("Content-Encoding"))
|
||||
|
@ -96,9 +94,7 @@ func TestSampledReadEndpoint(t *testing.T) {
|
|||
err = proto.Unmarshal(uncompressed, &resp)
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(resp.Results) != 1 {
|
||||
t.Fatalf("Expected 1 result, got %d", len(resp.Results))
|
||||
}
|
||||
require.Equal(t, 1, len(resp.Results), "Expected 1 result.")
|
||||
|
||||
require.Equal(t, &prompb.QueryResult{
|
||||
Timeseries: []*prompb.TimeSeries{
|
||||
|
@ -189,9 +185,7 @@ func TestStreamReadEndpoint(t *testing.T) {
|
|||
recorder := httptest.NewRecorder()
|
||||
api.ServeHTTP(recorder, request)
|
||||
|
||||
if recorder.Code/100 != 2 {
|
||||
t.Fatal(recorder.Code)
|
||||
}
|
||||
require.Equal(t, 2, recorder.Code/100)
|
||||
|
||||
require.Equal(t, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", recorder.Result().Header.Get("Content-Type"))
|
||||
require.Equal(t, "", recorder.Result().Header.Get("Content-Encoding"))
|
||||
|
@ -208,9 +202,7 @@ func TestStreamReadEndpoint(t *testing.T) {
|
|||
results = append(results, res)
|
||||
}
|
||||
|
||||
if len(results) != 5 {
|
||||
t.Fatalf("Expected 5 result, got %d", len(results))
|
||||
}
|
||||
require.Equal(t, 5, len(results), "Expected 5 results.")
|
||||
|
||||
require.Equal(t, []*prompb.ChunkedReadResponse{
|
||||
{
|
||||
|
|
|
@ -15,10 +15,14 @@ package remote
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/exemplar"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
)
|
||||
|
@ -62,16 +66,35 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// checkAppendExemplarError modifies the AppendExamplar's returned error based on the error cause.
|
||||
func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error {
|
||||
switch errors.Cause(err) {
|
||||
case storage.ErrNotFound:
|
||||
return storage.ErrNotFound
|
||||
case storage.ErrOutOfOrderExemplar:
|
||||
*outOfOrderErrs++
|
||||
level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e))
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
|
||||
var (
|
||||
outOfOrderExemplarErrs = 0
|
||||
)
|
||||
|
||||
app := h.appendable.Appender(ctx)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
app.Rollback()
|
||||
_ = app.Rollback()
|
||||
return
|
||||
}
|
||||
err = app.Commit()
|
||||
}()
|
||||
|
||||
var exemplarErr error
|
||||
for _, ts := range req.Timeseries {
|
||||
labels := labelProtosToLabels(ts.Labels)
|
||||
for _, s := range ts.Samples {
|
||||
|
@ -79,7 +102,23 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for _, ep := range ts.Exemplars {
|
||||
e := exemplarProtoToExemplar(ep)
|
||||
|
||||
_, exemplarErr = app.AppendExemplar(0, labels, e)
|
||||
exemplarErr = h.checkAppendExemplarError(exemplarErr, e, &outOfOrderExemplarErrs)
|
||||
if exemplarErr != nil {
|
||||
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors.
|
||||
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if outOfOrderExemplarErrs > 0 {
|
||||
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -23,11 +23,12 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/exemplar"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRemoteWriteHandler(t *testing.T) {
|
||||
|
@ -47,16 +48,23 @@ func TestRemoteWriteHandler(t *testing.T) {
|
|||
require.Equal(t, http.StatusNoContent, resp.StatusCode)
|
||||
|
||||
i := 0
|
||||
j := 0
|
||||
for _, ts := range writeRequestFixture.Timeseries {
|
||||
labels := labelProtosToLabels(ts.Labels)
|
||||
for _, s := range ts.Samples {
|
||||
require.Equal(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
|
||||
i++
|
||||
}
|
||||
|
||||
for _, e := range ts.Exemplars {
|
||||
exemplarLabels := labelProtosToLabels(e.Labels)
|
||||
require.Equal(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
|
||||
j++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutOfOrder(t *testing.T) {
|
||||
func TestOutOfOrderSample(t *testing.T) {
|
||||
buf, _, err := buildWriteRequest([]prompb.TimeSeries{{
|
||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
|
||||
|
@ -67,7 +75,7 @@ func TestOutOfOrder(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
appendable := &mockAppendable{
|
||||
latest: 100,
|
||||
latestSample: 100,
|
||||
}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), appendable)
|
||||
|
||||
|
@ -78,6 +86,32 @@ func TestOutOfOrder(t *testing.T) {
|
|||
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||
}
|
||||
|
||||
// This test case currently aims to verify that the WriteHandler endpoint
|
||||
// don't fail on ingestion errors since the exemplar storage is
|
||||
// still experimental.
|
||||
func TestOutOfOrderExemplar(t *testing.T) {
|
||||
buf, _, err := buildWriteRequest([]prompb.TimeSeries{{
|
||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "foo", Value: "bar"}}, Value: 1, Timestamp: 0}},
|
||||
}}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||
require.NoError(t, err)
|
||||
|
||||
appendable := &mockAppendable{
|
||||
latestExemplar: 100,
|
||||
}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), appendable)
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
||||
resp := recorder.Result()
|
||||
// TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental.
|
||||
require.Equal(t, http.StatusNoContent, resp.StatusCode)
|
||||
}
|
||||
|
||||
func TestCommitErr(t *testing.T) {
|
||||
buf, _, err := buildWriteRequest(writeRequestFixture.Timeseries, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
@ -101,9 +135,11 @@ func TestCommitErr(t *testing.T) {
|
|||
}
|
||||
|
||||
type mockAppendable struct {
|
||||
latest int64
|
||||
samples []mockSample
|
||||
commitErr error
|
||||
latestSample int64
|
||||
samples []mockSample
|
||||
latestExemplar int64
|
||||
exemplars []mockExemplar
|
||||
commitErr error
|
||||
}
|
||||
|
||||
type mockSample struct {
|
||||
|
@ -112,16 +148,23 @@ type mockSample struct {
|
|||
v float64
|
||||
}
|
||||
|
||||
type mockExemplar struct {
|
||||
l labels.Labels
|
||||
el labels.Labels
|
||||
t int64
|
||||
v float64
|
||||
}
|
||||
|
||||
func (m *mockAppendable) Appender(_ context.Context) storage.Appender {
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mockAppendable) Append(_ uint64, l labels.Labels, t int64, v float64) (uint64, error) {
|
||||
if t < m.latest {
|
||||
if t < m.latestSample {
|
||||
return 0, storage.ErrOutOfOrderSample
|
||||
}
|
||||
|
||||
m.latest = t
|
||||
m.latestSample = t
|
||||
m.samples = append(m.samples, mockSample{l, t, v})
|
||||
return 0, nil
|
||||
}
|
||||
|
@ -134,7 +177,12 @@ func (*mockAppendable) Rollback() error {
|
|||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (*mockAppendable) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) {
|
||||
// noop until we implement exemplars over remote write
|
||||
func (m *mockAppendable) AppendExemplar(_ uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) {
|
||||
if e.Ts < m.latestExemplar {
|
||||
return 0, storage.ErrOutOfOrderExemplar
|
||||
}
|
||||
|
||||
m.latestExemplar = e.Ts
|
||||
m.exemplars = append(m.exemplars, mockExemplar{l, e.Labels, e.Ts, e.Value})
|
||||
return 0, nil
|
||||
}
|
||||
|
|
|
@ -115,6 +115,7 @@ type Expander struct {
|
|||
name string
|
||||
data interface{}
|
||||
funcMap text_template.FuncMap
|
||||
options []string
|
||||
}
|
||||
|
||||
// NewTemplateExpander returns a template expander ready to use.
|
||||
|
@ -126,7 +127,11 @@ func NewTemplateExpander(
|
|||
timestamp model.Time,
|
||||
queryFunc QueryFunc,
|
||||
externalURL *url.URL,
|
||||
options []string,
|
||||
) *Expander {
|
||||
if options == nil {
|
||||
options = []string{"missingkey=zero"}
|
||||
}
|
||||
return &Expander{
|
||||
text: text,
|
||||
name: name,
|
||||
|
@ -291,6 +296,7 @@ func NewTemplateExpander(
|
|||
return externalURL.String()
|
||||
},
|
||||
},
|
||||
options: options,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -336,7 +342,9 @@ func (te Expander) Expand() (result string, resultErr error) {
|
|||
|
||||
templateTextExpansionTotal.Inc()
|
||||
|
||||
tmpl, err := text_template.New(te.name).Funcs(te.funcMap).Option("missingkey=zero").Parse(te.text)
|
||||
tmpl := text_template.New(te.name).Funcs(te.funcMap)
|
||||
tmpl.Option(te.options...)
|
||||
tmpl, err := tmpl.Parse(te.text)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error parsing template %v", te.name)
|
||||
}
|
||||
|
@ -361,7 +369,7 @@ func (te Expander) ExpandHTML(templateFiles []string) (result string, resultErr
|
|||
}()
|
||||
|
||||
tmpl := html_template.New(te.name).Funcs(html_template.FuncMap(te.funcMap))
|
||||
tmpl.Option("missingkey=zero")
|
||||
tmpl.Option(te.options...)
|
||||
tmpl.Funcs(html_template.FuncMap{
|
||||
"tmpl": func(name string, data interface{}) (html_template.HTML, error) {
|
||||
var buffer bytes.Buffer
|
||||
|
|
|
@ -31,6 +31,7 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
text string
|
||||
output string
|
||||
input interface{}
|
||||
options []string
|
||||
queryResult promql.Vector
|
||||
shouldFail bool
|
||||
html bool
|
||||
|
@ -153,6 +154,45 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
}},
|
||||
output: "a:11: b:21: ",
|
||||
},
|
||||
{
|
||||
// Missing value is no value for nil options.
|
||||
text: "{{ .Foo }}",
|
||||
output: "<no value>",
|
||||
},
|
||||
{
|
||||
// Missing value is no value for no options.
|
||||
text: "{{ .Foo }}",
|
||||
options: make([]string, 0),
|
||||
output: "<no value>",
|
||||
},
|
||||
{
|
||||
// Assert that missing value returns error with missingkey=error.
|
||||
text: "{{ .Foo }}",
|
||||
options: []string{"missingkey=error"},
|
||||
shouldFail: true,
|
||||
errorMsg: `error executing template test: template: test:1:3: executing "test" at <.Foo>: nil data; no entry for key "Foo"`,
|
||||
},
|
||||
{
|
||||
// Missing value is "" for nil options in ExpandHTML.
|
||||
text: "{{ .Foo }}",
|
||||
output: "",
|
||||
html: true,
|
||||
},
|
||||
{
|
||||
// Missing value is "" for no options in ExpandHTML.
|
||||
text: "{{ .Foo }}",
|
||||
options: make([]string, 0),
|
||||
output: "",
|
||||
html: true,
|
||||
},
|
||||
{
|
||||
// Assert that missing value returns error with missingkey=error in ExpandHTML.
|
||||
text: "{{ .Foo }}",
|
||||
options: []string{"missingkey=error"},
|
||||
shouldFail: true,
|
||||
errorMsg: `error executing template test: template: test:1:3: executing "test" at <.Foo>: nil data; no entry for key "Foo"`,
|
||||
html: true,
|
||||
},
|
||||
{
|
||||
// Unparsable template.
|
||||
text: "{{",
|
||||
|
@ -341,7 +381,7 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
}
|
||||
var result string
|
||||
var err error
|
||||
expander := NewTemplateExpander(context.Background(), s.text, "test", s.input, 0, queryFunc, extURL)
|
||||
expander := NewTemplateExpander(context.Background(), s.text, "test", s.input, 0, queryFunc, extURL, s.options)
|
||||
if s.html {
|
||||
result, err = expander.ExpandHTML(nil)
|
||||
} else {
|
||||
|
@ -356,7 +396,7 @@ func TestTemplateExpansion(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
if err == nil {
|
||||
require.Equal(t, result, s.output)
|
||||
require.Equal(t, s.output, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,4 +17,6 @@ A series of blog posts explaining different components of TSDB:
|
|||
* [WAL and Checkpoint](https://ganeshvernekar.com/blog/prometheus-tsdb-wal-and-checkpoint/)
|
||||
* [Memory Mapping of Head Chunks from Disk](https://ganeshvernekar.com/blog/prometheus-tsdb-mmapping-head-chunks-from-disk/)
|
||||
* [Persistent Block and its Index](https://ganeshvernekar.com/blog/prometheus-tsdb-persistent-block-and-its-index/)
|
||||
* [Queries](https://ganeshvernekar.com/blog/prometheus-tsdb-queries/)
|
||||
* [Queries](https://ganeshvernekar.com/blog/prometheus-tsdb-queries/)
|
||||
* [Compaction and Retention](https://ganeshvernekar.com/blog/prometheus-tsdb-compaction-and-retention/)
|
||||
* [Snapshot on Shutdown](https://ganeshvernekar.com/blog/prometheus-tsdb-snapshot-on-shutdown/)
|
||||
|
|
|
@ -1663,9 +1663,12 @@ func (db *DB) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
|
|||
}(b))
|
||||
}
|
||||
}
|
||||
g.Go(func() error {
|
||||
return db.head.Delete(mint, maxt, ms...)
|
||||
})
|
||||
if db.head.OverlapsClosedInterval(mint, maxt) {
|
||||
g.Go(func() error {
|
||||
return db.head.Delete(mint, maxt, ms...)
|
||||
})
|
||||
}
|
||||
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
|
|
|
@ -736,6 +736,11 @@ func (h *Head) Truncate(mint int64) (err error) {
|
|||
return h.truncateWAL(mint)
|
||||
}
|
||||
|
||||
// OverlapsClosedInterval returns true if the head overlaps [mint, maxt].
|
||||
func (h *Head) OverlapsClosedInterval(mint, maxt int64) bool {
|
||||
return h.MinTime() <= maxt && mint <= h.MaxTime()
|
||||
}
|
||||
|
||||
// truncateMemory removes old data before mint from the head.
|
||||
func (h *Head) truncateMemory(mint int64) (err error) {
|
||||
h.chunkSnapshotMtx.Lock()
|
||||
|
@ -1101,6 +1106,10 @@ func (h *Head) gc() int64 {
|
|||
// Remove deleted series IDs from the postings lists.
|
||||
h.postings.Delete(deleted)
|
||||
|
||||
// Remove tombstones referring to the deleted series.
|
||||
h.tombstones.DeleteTombstones(deleted)
|
||||
h.tombstones.TruncateBefore(mint)
|
||||
|
||||
if h.wal != nil {
|
||||
_, last, _ := wal.Segments(h.wal.Dir())
|
||||
h.deletedMtx.Lock()
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"encoding/binary"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
|
@ -94,8 +93,8 @@ func (p *MemPostings) SortedKeys() []labels.Label {
|
|||
p.mtx.RUnlock()
|
||||
|
||||
sort.Slice(keys, func(i, j int) bool {
|
||||
if d := strings.Compare(keys[i].Name, keys[j].Name); d != 0 {
|
||||
return d < 0
|
||||
if keys[i].Name != keys[j].Name {
|
||||
return keys[i].Name < keys[j].Name
|
||||
}
|
||||
return keys[i].Value < keys[j].Value
|
||||
})
|
||||
|
|
|
@ -252,6 +252,34 @@ func (t *MemTombstones) Get(ref uint64) (Intervals, error) {
|
|||
return t.intvlGroups[ref], nil
|
||||
}
|
||||
|
||||
func (t *MemTombstones) DeleteTombstones(refs map[uint64]struct{}) {
|
||||
t.mtx.Lock()
|
||||
defer t.mtx.Unlock()
|
||||
for ref := range refs {
|
||||
delete(t.intvlGroups, ref)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *MemTombstones) TruncateBefore(beforeT int64) {
|
||||
t.mtx.Lock()
|
||||
defer t.mtx.Unlock()
|
||||
for ref, ivs := range t.intvlGroups {
|
||||
i := len(ivs) - 1
|
||||
for ; i >= 0; i-- {
|
||||
if beforeT > ivs[i].Maxt {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(ivs[i+1:]) == 0 {
|
||||
delete(t.intvlGroups, ref)
|
||||
} else {
|
||||
newIvs := make(Intervals, len(ivs[i+1:]))
|
||||
copy(newIvs, ivs[i+1:])
|
||||
t.intvlGroups[ref] = newIvs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *MemTombstones) Iter(f func(uint64, Intervals) error) error {
|
||||
t.mtx.RLock()
|
||||
defer t.mtx.RUnlock()
|
||||
|
|
|
@ -63,6 +63,66 @@ func TestWriteAndReadbackTombstones(t *testing.T) {
|
|||
require.Equal(t, stones, restr)
|
||||
}
|
||||
|
||||
func TestDeletingTombstones(t *testing.T) {
|
||||
stones := NewMemTombstones()
|
||||
|
||||
ref := uint64(42)
|
||||
mint := rand.Int63n(time.Now().UnixNano())
|
||||
dranges := make(Intervals, 0, 1)
|
||||
dranges = dranges.Add(Interval{mint, mint + rand.Int63n(1000)})
|
||||
stones.AddInterval(ref, dranges...)
|
||||
stones.AddInterval(uint64(43), dranges...)
|
||||
|
||||
intervals, err := stones.Get(ref)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, intervals, dranges)
|
||||
|
||||
stones.DeleteTombstones(map[uint64]struct{}{ref: struct{}{}})
|
||||
|
||||
intervals, err = stones.Get(ref)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, intervals)
|
||||
}
|
||||
|
||||
func TestTruncateBefore(t *testing.T) {
|
||||
cases := []struct {
|
||||
before Intervals
|
||||
beforeT int64
|
||||
after Intervals
|
||||
}{
|
||||
{
|
||||
before: Intervals{{1, 2}, {4, 10}, {12, 100}},
|
||||
beforeT: 3,
|
||||
after: Intervals{{4, 10}, {12, 100}},
|
||||
},
|
||||
{
|
||||
before: Intervals{{1, 2}, {4, 10}, {12, 100}, {200, 1000}},
|
||||
beforeT: 900,
|
||||
after: Intervals{{200, 1000}},
|
||||
},
|
||||
{
|
||||
before: Intervals{{1, 2}, {4, 10}, {12, 100}, {200, 1000}},
|
||||
beforeT: 2000,
|
||||
after: nil,
|
||||
},
|
||||
{
|
||||
before: Intervals{{1, 2}, {4, 10}, {12, 100}, {200, 1000}},
|
||||
beforeT: 0,
|
||||
after: Intervals{{1, 2}, {4, 10}, {12, 100}, {200, 1000}},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
ref := uint64(42)
|
||||
stones := NewMemTombstones()
|
||||
stones.AddInterval(ref, c.before...)
|
||||
|
||||
stones.TruncateBefore(c.beforeT)
|
||||
ts, err := stones.Get(ref)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.after, ts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddingNewIntervals(t *testing.T) {
|
||||
cases := []struct {
|
||||
exist Intervals
|
||||
|
|
|
@ -21,6 +21,8 @@ import (
|
|||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -60,22 +62,14 @@ func TestCompressionHandler_PlainText(t *testing.T) {
|
|||
}
|
||||
|
||||
resp, err := client.Get(server.URL + "/foo_endpoint")
|
||||
|
||||
if err != nil {
|
||||
t.Error("client get failed with unexpected error")
|
||||
}
|
||||
require.NoError(t, err, "client get failed with unexpected error")
|
||||
defer resp.Body.Close()
|
||||
contents, err := ioutil.ReadAll(resp.Body)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error while reading the response body: %s", err.Error())
|
||||
}
|
||||
require.NoError(t, err, "unexpected error while creating the response body reader")
|
||||
|
||||
expected := "Hello World!"
|
||||
actual := string(contents)
|
||||
if expected != actual {
|
||||
t.Errorf("expected response with content %s, but got %s", expected, actual)
|
||||
}
|
||||
require.Equal(t, expected, actual, "expected response with content")
|
||||
}
|
||||
|
||||
func TestCompressionHandler_Gzip(t *testing.T) {
|
||||
|
@ -95,34 +89,22 @@ func TestCompressionHandler_Gzip(t *testing.T) {
|
|||
req.Header.Set(acceptEncodingHeader, gzipEncoding)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Error("client get failed with unexpected error")
|
||||
}
|
||||
require.NoError(t, err, "client get failed with unexpected error")
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error while reading the response body: %s", err.Error())
|
||||
}
|
||||
|
||||
actualHeader := resp.Header.Get(contentEncodingHeader)
|
||||
|
||||
if actualHeader != gzipEncoding {
|
||||
t.Errorf("expected response with encoding header %s, but got %s", gzipEncoding, actualHeader)
|
||||
}
|
||||
require.Equal(t, gzipEncoding, actualHeader, "unexpected encoding header in response")
|
||||
|
||||
var buf bytes.Buffer
|
||||
zr, _ := gzip.NewReader(resp.Body)
|
||||
zr, err := gzip.NewReader(resp.Body)
|
||||
require.NoError(t, err, "unexpected error while creating the response body reader")
|
||||
|
||||
_, err = buf.ReadFrom(zr)
|
||||
if err != nil {
|
||||
t.Error("unexpected error while reading from response body")
|
||||
}
|
||||
require.NoError(t, err, "unexpected error while reading the response body")
|
||||
|
||||
actual := buf.String()
|
||||
expected := "Hello World!"
|
||||
if expected != actual {
|
||||
t.Errorf("expected response with content %s, but got %s", expected, actual)
|
||||
}
|
||||
require.Equal(t, expected, actual, "unexpected response content")
|
||||
}
|
||||
|
||||
func TestCompressionHandler_Deflate(t *testing.T) {
|
||||
|
@ -142,35 +124,20 @@ func TestCompressionHandler_Deflate(t *testing.T) {
|
|||
req.Header.Set(acceptEncodingHeader, deflateEncoding)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.Error("client get failed with unexpected error")
|
||||
}
|
||||
require.NoError(t, err, "client get failed with unexpected error")
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error while reading the response body: %s", err.Error())
|
||||
}
|
||||
|
||||
actualHeader := resp.Header.Get(contentEncodingHeader)
|
||||
|
||||
if actualHeader != deflateEncoding {
|
||||
t.Errorf("expected response with encoding header %s, but got %s", deflateEncoding, actualHeader)
|
||||
}
|
||||
require.Equal(t, deflateEncoding, actualHeader, "expected response with encoding header")
|
||||
|
||||
var buf bytes.Buffer
|
||||
dr, err := zlib.NewReader(resp.Body)
|
||||
if err != nil {
|
||||
t.Error("unexpected error while reading from response body")
|
||||
}
|
||||
require.NoError(t, err, "unexpected error while creating the response body reader")
|
||||
|
||||
_, err = buf.ReadFrom(dr)
|
||||
if err != nil {
|
||||
t.Error("unexpected error while reading from response body")
|
||||
}
|
||||
require.NoError(t, err, "unexpected error while reading the response body")
|
||||
|
||||
actual := buf.String()
|
||||
expected := "Hello World!"
|
||||
if expected != actual {
|
||||
t.Errorf("expected response with content %s, but got %s", expected, actual)
|
||||
}
|
||||
require.Equal(t, expected, actual, "expected response with content")
|
||||
}
|
||||
|
|
|
@ -17,6 +17,8 @@ import (
|
|||
"net/http"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func getCORSHandlerFunc() http.Handler {
|
||||
|
@ -40,41 +42,24 @@ func TestCORSHandler(t *testing.T) {
|
|||
|
||||
// OPTIONS with legit origin
|
||||
req, err := http.NewRequest("OPTIONS", server.URL+"/any_path", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Error("could not create request")
|
||||
}
|
||||
require.NoError(t, err, "could not create request")
|
||||
|
||||
req.Header.Set("Origin", dummyOrigin)
|
||||
resp, err := client.Do(req)
|
||||
|
||||
if err != nil {
|
||||
t.Error("client get failed with unexpected error")
|
||||
}
|
||||
require.NoError(t, err, "client get failed with unexpected error")
|
||||
|
||||
AccessControlAllowOrigin := resp.Header.Get("Access-Control-Allow-Origin")
|
||||
|
||||
if AccessControlAllowOrigin != dummyOrigin {
|
||||
t.Fatalf("%q does not match %q", dummyOrigin, AccessControlAllowOrigin)
|
||||
}
|
||||
require.Equal(t, dummyOrigin, AccessControlAllowOrigin, "expected Access-Control-Allow-Origin header")
|
||||
|
||||
// OPTIONS with bad origin
|
||||
req, err = http.NewRequest("OPTIONS", server.URL+"/any_path", nil)
|
||||
|
||||
if err != nil {
|
||||
t.Error("could not create request")
|
||||
}
|
||||
require.NoError(t, err, "could not create request")
|
||||
|
||||
req.Header.Set("Origin", "https://not-foo.com")
|
||||
resp, err = client.Do(req)
|
||||
|
||||
if err != nil {
|
||||
t.Error("client get failed with unexpected error")
|
||||
}
|
||||
require.NoError(t, err, "client get failed with unexpected error")
|
||||
|
||||
AccessControlAllowOrigin = resp.Header.Get("Access-Control-Allow-Origin")
|
||||
|
||||
if AccessControlAllowOrigin != "" {
|
||||
t.Fatalf("Access-Control-Allow-Origin should not exist but it was set to: %q", AccessControlAllowOrigin)
|
||||
}
|
||||
require.Empty(t, AccessControlAllowOrigin, "Access-Control-Allow-Origin header should not exist but it was set")
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
@ -27,19 +28,17 @@ import (
|
|||
func TestTimerGroupNewTimer(t *testing.T) {
|
||||
tg := NewTimerGroup()
|
||||
timer := tg.GetTimer(ExecTotalTime)
|
||||
if duration := timer.Duration(); duration != 0 {
|
||||
t.Fatalf("Expected duration of 0, but it was %f instead.", duration)
|
||||
}
|
||||
duration := timer.Duration()
|
||||
require.Equal(t, 0.0, duration, "Expected duration equal 0")
|
||||
minimum := 2 * time.Millisecond
|
||||
timer.Start()
|
||||
time.Sleep(minimum)
|
||||
timer.Stop()
|
||||
if duration := timer.Duration(); duration == 0 {
|
||||
t.Fatalf("Expected duration greater than 0, but it was %f instead.", duration)
|
||||
}
|
||||
if elapsed := timer.ElapsedTime(); elapsed < minimum {
|
||||
t.Fatalf("Expected elapsed time to be greater than time slept, elapsed was %d, and time slept was %d.", elapsed.Nanoseconds(), minimum)
|
||||
}
|
||||
duration = timer.Duration()
|
||||
require.Greater(t, duration, 0.0, "Expected duration greater than 0")
|
||||
elapsed := timer.ElapsedTime()
|
||||
require.GreaterOrEqual(t, elapsed, minimum,
|
||||
"Expected elapsed time to be greater than time slept.")
|
||||
}
|
||||
|
||||
func TestQueryStatsWithTimers(t *testing.T) {
|
||||
|
@ -51,17 +50,11 @@ func TestQueryStatsWithTimers(t *testing.T) {
|
|||
|
||||
qs := NewQueryStats(qt)
|
||||
actual, err := json.Marshal(qs)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error during serialization: %v", err)
|
||||
}
|
||||
require.NoError(t, err, "unexpected error during serialization")
|
||||
// Timing value is one of multiple fields, unit is seconds (float).
|
||||
match, err := regexp.MatchString(`[,{]"execTotalTime":\d+\.\d+[,}]`, string(actual))
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error while matching string: %v", err)
|
||||
}
|
||||
if !match {
|
||||
t.Fatalf("Expected timings with one non-zero entry, but got %s.", actual)
|
||||
}
|
||||
require.NoError(t, err, "unexpected error while matching string")
|
||||
require.True(t, match, "Expected timings with one non-zero entry.")
|
||||
}
|
||||
|
||||
func TestQueryStatsWithSpanTimers(t *testing.T) {
|
||||
|
@ -72,51 +65,28 @@ func TestQueryStatsWithSpanTimers(t *testing.T) {
|
|||
qst.Finish()
|
||||
qs := NewQueryStats(qt)
|
||||
actual, err := json.Marshal(qs)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error during serialization: %v", err)
|
||||
}
|
||||
require.NoError(t, err, "unexpected error during serialization")
|
||||
// Timing value is one of multiple fields, unit is seconds (float).
|
||||
match, err := regexp.MatchString(`[,{]"execQueueTime":\d+\.\d+[,}]`, string(actual))
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error while matching string: %v", err)
|
||||
}
|
||||
if !match {
|
||||
t.Fatalf("Expected timings with one non-zero entry, but got %s.", actual)
|
||||
}
|
||||
require.NoError(t, err, "unexpected error while matching string")
|
||||
require.True(t, match, "Expected timings with one non-zero entry.")
|
||||
}
|
||||
|
||||
func TestTimerGroup(t *testing.T) {
|
||||
tg := NewTimerGroup()
|
||||
execTotalTimer := tg.GetTimer(ExecTotalTime)
|
||||
if tg.GetTimer(ExecTotalTime).String() != "Exec total time: 0s" {
|
||||
t.Fatalf("Expected string %s, but got %s", "", execTotalTimer.String())
|
||||
}
|
||||
execQueueTimer := tg.GetTimer(ExecQueueTime)
|
||||
if tg.GetTimer(ExecQueueTime).String() != "Exec queue wait time: 0s" {
|
||||
t.Fatalf("Expected string %s, but got %s", "", execQueueTimer.String())
|
||||
}
|
||||
innerEvalTimer := tg.GetTimer(InnerEvalTime)
|
||||
if tg.GetTimer(InnerEvalTime).String() != "Inner eval time: 0s" {
|
||||
t.Fatalf("Expected string %s, but got %s", "", innerEvalTimer.String())
|
||||
}
|
||||
queryPreparationTimer := tg.GetTimer(QueryPreparationTime)
|
||||
if tg.GetTimer(QueryPreparationTime).String() != "Query preparation time: 0s" {
|
||||
t.Fatalf("Expected string %s, but got %s", "", queryPreparationTimer.String())
|
||||
}
|
||||
resultSortTimer := tg.GetTimer(ResultSortTime)
|
||||
if tg.GetTimer(ResultSortTime).String() != "Result sorting time: 0s" {
|
||||
t.Fatalf("Expected string %s, but got %s", "", resultSortTimer.String())
|
||||
}
|
||||
evalTotalTimer := tg.GetTimer(EvalTotalTime)
|
||||
if tg.GetTimer(EvalTotalTime).String() != "Eval total time: 0s" {
|
||||
t.Fatalf("Expected string %s, but got %s", "", evalTotalTimer.String())
|
||||
}
|
||||
require.Equal(t, "Exec total time: 0s", tg.GetTimer(ExecTotalTime).String())
|
||||
|
||||
require.Equal(t, "Exec queue wait time: 0s", tg.GetTimer(ExecQueueTime).String())
|
||||
|
||||
require.Equal(t, "Inner eval time: 0s", tg.GetTimer(InnerEvalTime).String())
|
||||
|
||||
require.Equal(t, "Query preparation time: 0s", tg.GetTimer(QueryPreparationTime).String())
|
||||
|
||||
require.Equal(t, "Result sorting time: 0s", tg.GetTimer(ResultSortTime).String())
|
||||
|
||||
require.Equal(t, "Eval total time: 0s", tg.GetTimer(EvalTotalTime).String())
|
||||
|
||||
actual := tg.String()
|
||||
expected := "Exec total time: 0s\nExec queue wait time: 0s\nInner eval time: 0s\nQuery preparation time: 0s\nResult sorting time: 0s\nEval total time: 0s\n"
|
||||
|
||||
if actual != expected {
|
||||
t.Fatalf("Expected timerGroup string %s, but got %s.", expected, actual)
|
||||
}
|
||||
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue