diff --git a/.circleci/config.yml b/.circleci/config.yml index 2f1f260c8..5ec210d76 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -11,155 +11,158 @@ executors: # should also be updated. golang: docker: - - image: circleci/golang:1.16-node + - image: circleci/golang:1.16-node golang_115: docker: - - image: circleci/golang:1.15-node + - image: circleci/golang:1.15-node jobs: test: executor: golang steps: - - prometheus/setup_environment - - go/load-cache: - key: v1 - - restore_cache: - keys: - - v3-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }} - - v3-npm-deps- - - run: - command: make - environment: - # Run garbage collection more aggressively to avoid getting OOMed during the lint phase. - GOGC: "20" - # By default Go uses GOMAXPROCS but a Circle CI executor has many - # cores (> 30) while the CPU and RAM resources are throttled. If we - # don't limit this to the number of allocated cores, the job is - # likely to get OOMed and killed. - GOOPTS: "-p 2" - GOMAXPROCS: "2" - GO111MODULE: "on" - - prometheus/check_proto: - version: "3.15.8" - - prometheus/store_artifact: - file: prometheus - - prometheus/store_artifact: - file: promtool - - go/save-cache: - key: v1 - - save_cache: - key: v3-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }} - paths: - - /home/circleci/.cache/yarn - - store_test_results: - path: test-results + - prometheus/setup_environment + - go/load-cache: + key: v1 + - restore_cache: + keys: + - v3-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }} + - v3-npm-deps- + - run: + command: sudo apt-get install -y yamllint + - run: + command: make + environment: + # Run garbage collection more aggressively to avoid getting OOMed during the lint phase. + GOGC: "20" + # By default Go uses GOMAXPROCS but a Circle CI executor has many + # cores (> 30) while the CPU and RAM resources are throttled. If we + # don't limit this to the number of allocated cores, the job is + # likely to get OOMed and killed. + GOOPTS: "-p 2" + GOMAXPROCS: "2" + GO111MODULE: "on" + - prometheus/check_proto: + version: "3.15.8" + - prometheus/store_artifact: + file: prometheus + - prometheus/store_artifact: + file: promtool + - go/save-cache: + key: v1 + - save_cache: + key: v3-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }} + paths: + - /home/circleci/.cache/yarn + - store_test_results: + path: test-results test_windows: - executor: + executor: name: win/default shell: powershell working_directory: /go/src/github.com/prometheus/prometheus steps: - - checkout - - run: - # Temporary workaround until circleci updates go. - command: | - choco upgrade -y golang - - run: - command: - refreshenv - - run: - command: | - $env:GOARCH=""; $env:GOOS=""; cd web/ui; go generate - cd ../.. - $TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"} - go test $TestTargets -vet=off -v - environment: - GOGC: "20" - GOOPTS: "-p 2" + - checkout + - run: + # Temporary workaround until circleci updates go. + command: | + choco upgrade -y golang + - run: + command: refreshenv + - run: + command: | + $env:GOARCH=""; $env:GOOS=""; cd web/ui; go generate + cd ../.. + $TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"} + go test $TestTargets -vet=off -v + environment: + GOGC: "20" + GOOPTS: "-p 2" test_tsdb_go115: executor: golang_115 steps: - - checkout - - run: go test ./tsdb/... + - checkout + - run: go test ./tsdb/... test_mixins: executor: golang steps: - - checkout - - run: go install ./cmd/promtool/. - - run: - command: go install -mod=readonly github.com/google/go-jsonnet/cmd/jsonnet github.com/google/go-jsonnet/cmd/jsonnetfmt github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb - working_directory: ~/project/documentation/prometheus-mixin - - run: - command: make clean - working_directory: ~/project/documentation/prometheus-mixin - - run: - command: jb install - working_directory: ~/project/documentation/prometheus-mixin - - run: - command: make - working_directory: ~/project/documentation/prometheus-mixin - - run: - command: git diff --exit-code - working_directory: ~/project/documentation/prometheus-mixin + - checkout + - run: go install ./cmd/promtool/. + - run: + command: go install -mod=readonly github.com/google/go-jsonnet/cmd/jsonnet github.com/google/go-jsonnet/cmd/jsonnetfmt github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb + working_directory: ~/project/documentation/prometheus-mixin + - run: + command: make clean + working_directory: ~/project/documentation/prometheus-mixin + - run: + command: jb install + working_directory: ~/project/documentation/prometheus-mixin + - run: + command: sudo apt-get install -y yamllint + - run: + command: make + working_directory: ~/project/documentation/prometheus-mixin + - run: + command: git diff --exit-code + working_directory: ~/project/documentation/prometheus-mixin repo_sync: executor: golang steps: - - checkout - - run: mkdir -v -p "${PATH%%:*}" && curl -sL --fail https://github.com/mikefarah/yq/releases/download/v4.6.3/yq_linux_amd64 -o "${PATH%%:*}/yq" && chmod -v +x "${PATH%%:*}/yq" - - run: sha256sum -c <(echo "c4343783c3361495c0d6d1eb742bba7432aa65e13e9fb8d7e201d544bcf14246 ${PATH%%:*}/yq") - - run: ./scripts/sync_repo_files.sh + - checkout + - run: mkdir -v -p "${PATH%%:*}" && curl -sL --fail https://github.com/mikefarah/yq/releases/download/v4.6.3/yq_linux_amd64 -o "${PATH%%:*}/yq" && chmod -v +x "${PATH%%:*}/yq" + - run: sha256sum -c <(echo "c4343783c3361495c0d6d1eb742bba7432aa65e13e9fb8d7e201d544bcf14246 ${PATH%%:*}/yq") + - run: ./scripts/sync_repo_files.sh workflows: version: 2 prometheus: jobs: - - test: - filters: - tags: - only: /.*/ - - test_tsdb_go115: - filters: - tags: - only: /.*/ - - test_mixins: - filters: - tags: - only: /.*/ - - test_windows: - filters: - tags: - only: /.*/ - - prometheus/build: - name: build - parallelism: 12 - filters: - tags: - only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ - - prometheus/publish_main: - context: org-context - requires: - - test - - build - filters: - branches: - only: main - image: circleci/golang:1-node - - prometheus/publish_release: - context: org-context - requires: - - test - - build - filters: - tags: - only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ - branches: - ignore: /.*/ - image: circleci/golang:1-node + - test: + filters: + tags: + only: /.*/ + - test_tsdb_go115: + filters: + tags: + only: /.*/ + - test_mixins: + filters: + tags: + only: /.*/ + - test_windows: + filters: + tags: + only: /.*/ + - prometheus/build: + name: build + parallelism: 12 + filters: + tags: + only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ + - prometheus/publish_main: + context: org-context + requires: + - test + - build + filters: + branches: + only: main + image: circleci/golang:1-node + - prometheus/publish_release: + context: org-context + requires: + - test + - build + filters: + tags: + only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ + branches: + ignore: /.*/ + image: circleci/golang:1-node nightly: triggers: - schedule: @@ -169,5 +172,5 @@ workflows: only: - main jobs: - - repo_sync: - context: org-context + - repo_sync: + context: org-context diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 3f1337d92..fb8c96aba 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -13,12 +13,12 @@ name: "CodeQL" on: push: - branches: [ main, release-* ] + branches: [main, release-*] pull_request: # The branches below must be a subset of the branches above - branches: [ main ] + branches: [main] schedule: - - cron: '26 14 * * 1' + - cron: "26 14 * * 1" jobs: analyze: @@ -28,40 +28,40 @@ jobs: strategy: fail-fast: false matrix: - language: [ 'go', 'javascript' ] + language: ["go", "javascript"] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] # Learn more: # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed steps: - - name: Checkout repository - uses: actions/checkout@v2 + - name: Checkout repository + uses: actions/checkout@v2 - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v1 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - # queries: ./path/to/local/query, your-org/your-repo/queries@main + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v1 + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v1 - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language - #- run: | - # make bootstrap - # make release + #- run: | + # make bootstrap + # make release - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/funcbench.yml b/.github/workflows/funcbench.yml index 811b3bf2a..6583aa95b 100644 --- a/.github/workflows/funcbench.yml +++ b/.github/workflows/funcbench.yml @@ -22,37 +22,37 @@ jobs: PROVIDER: gke ZONE: europe-west3-a steps: - - name: Update status to pending - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"pending","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" - - name: Prepare nodepool - uses: docker://prominfra/funcbench:master - with: - entrypoint: 'docker_entrypoint' - args: make deploy - - name: Delete all resources - if: always() - uses: docker://prominfra/funcbench:master - with: - entrypoint: 'docker_entrypoint' - args: make clean - - name: Update status to failure - if: failure() - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"failure","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" - - name: Update status to success - if: success() - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"success","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Update status to pending + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"pending","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Prepare nodepool + uses: docker://prominfra/funcbench:master + with: + entrypoint: "docker_entrypoint" + args: make deploy + - name: Delete all resources + if: always() + uses: docker://prominfra/funcbench:master + with: + entrypoint: "docker_entrypoint" + args: make clean + - name: Update status to failure + if: failure() + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"failure","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Update status to success + if: success() + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"success","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index bed4d7699..eb4a76d54 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -2,28 +2,28 @@ name: CIFuzz on: pull_request: paths: - - 'go.sum' - - 'go.mod' - - '**.go' + - "go.sum" + - "go.mod" + - "**.go" jobs: - Fuzzing: - runs-on: ubuntu-latest - steps: - - name: Build Fuzzers - id: build - uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master - with: - oss-fuzz-project-name: 'prometheus' - dry-run: false - - name: Run Fuzzers - uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master - with: - oss-fuzz-project-name: 'prometheus' - fuzz-seconds: 600 - dry-run: false - - name: Upload Crash - uses: actions/upload-artifact@v1 - if: failure() && steps.build.outcome == 'success' - with: - name: artifacts - path: ./out/artifacts + Fuzzing: + runs-on: ubuntu-latest + steps: + - name: Build Fuzzers + id: build + uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master + with: + oss-fuzz-project-name: "prometheus" + dry-run: false + - name: Run Fuzzers + uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master + with: + oss-fuzz-project-name: "prometheus" + fuzz-seconds: 600 + dry-run: false + - name: Upload Crash + uses: actions/upload-artifact@v1 + if: failure() && steps.build.outcome == 'success' + with: + name: artifacts + path: ./out/artifacts diff --git a/.github/workflows/prombench.yml b/.github/workflows/prombench.yml index d7c62bd31..6ee172662 100644 --- a/.github/workflows/prombench.yml +++ b/.github/workflows/prombench.yml @@ -1,6 +1,6 @@ on: repository_dispatch: - types: [prombench_start,prombench_restart,prombench_stop] + types: [prombench_start, prombench_restart, prombench_stop] name: Prombench Workflow env: AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }} @@ -22,105 +22,105 @@ jobs: if: github.event.action == 'prombench_start' runs-on: ubuntu-latest steps: - - name: Update status to pending - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"pending", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" - - name: Run make deploy to start test - id: make_deploy - uses: docker://prominfra/prombench:master - with: - args: >- - until make all_nodes_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done; - make deploy; - - name: Update status to failure - if: failure() - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"failure", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" - - name: Update status to success - if: success() - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"success", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Update status to pending + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"pending", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Run make deploy to start test + id: make_deploy + uses: docker://prominfra/prombench:master + with: + args: >- + until make all_nodes_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done; + make deploy; + - name: Update status to failure + if: failure() + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"failure", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Update status to success + if: success() + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"success", "context": "prombench-status-update-start", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" benchmark_cancel: name: Benchmark Cancel if: github.event.action == 'prombench_stop' runs-on: ubuntu-latest steps: - - name: Update status to pending - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"pending", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" - - name: Run make clean to stop test - id: make_clean - uses: docker://prominfra/prombench:master - with: - args: >- - until make all_nodes_running; do echo "waiting for nodepools to be created"; sleep 10; done; - make clean; - - name: Update status to failure - if: failure() - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"failure", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" - - name: Update status to success - if: success() - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"success", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Update status to pending + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"pending", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Run make clean to stop test + id: make_clean + uses: docker://prominfra/prombench:master + with: + args: >- + until make all_nodes_running; do echo "waiting for nodepools to be created"; sleep 10; done; + make clean; + - name: Update status to failure + if: failure() + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"failure", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Update status to success + if: success() + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"success", "context": "prombench-status-update-cancel", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" benchmark_restart: name: Benchmark Restart if: github.event.action == 'prombench_restart' runs-on: ubuntu-latest steps: - - name: Update status to pending - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"pending", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" - - name: Run make clean then make deploy to restart test - id: make_restart - uses: docker://prominfra/prombench:master - with: - args: >- - until make all_nodes_running; do echo "waiting for nodepools to be created"; sleep 10; done; - make clean; - until make all_nodes_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done; - make deploy; - - name: Update status to failure - if: failure() - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"failure", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" - - name: Update status to success - if: success() - run: >- - curl -i -X POST - -H "Authorization: Bearer $GITHUB_TOKEN" - -H "Content-Type: application/json" - --data '{"state":"success", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' - "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Update status to pending + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"pending", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Run make clean then make deploy to restart test + id: make_restart + uses: docker://prominfra/prombench:master + with: + args: >- + until make all_nodes_running; do echo "waiting for nodepools to be created"; sleep 10; done; + make clean; + until make all_nodes_deleted; do echo "waiting for nodepools to be deleted"; sleep 10; done; + make deploy; + - name: Update status to failure + if: failure() + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"failure", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" + - name: Update status to success + if: success() + run: >- + curl -i -X POST + -H "Authorization: Bearer $GITHUB_TOKEN" + -H "Content-Type: application/json" + --data '{"state":"success", "context": "prombench-status-update-restart", "target_url": "'$GITHUB_STATUS_TARGET_URL'"}' + "https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA" diff --git a/.gitpod.yml b/.gitpod.yml index 8585b4a25..563f35223 100644 --- a/.gitpod.yml +++ b/.gitpod.yml @@ -1,5 +1,6 @@ +--- tasks: - - init: + - init: make build command: | gp sync-done build diff --git a/.golangci.yml b/.golangci.yml index 3aa13cb5c..642cf45c8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,14 +3,14 @@ run: linters: enable: - - depguard - - golint + - depguard + - golint issues: exclude-rules: - - path: _test.go - linters: - - errcheck + - path: _test.go + linters: + - errcheck linters-settings: depguard: diff --git a/.yamllint b/.yamllint new file mode 100644 index 000000000..3b83e9d49 --- /dev/null +++ b/.yamllint @@ -0,0 +1,26 @@ +--- +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + commas: disable + comments: disable + comments-indentation: disable + document-start: disable + indentation: + spaces: consistent + key-duplicates: + ignore: | + config/testdata/section_key_dup.bad.yml + line-length: disable + truthy: + ignore: | + .github/workflows/codeql-analysis.yml + .github/workflows/funcbench.yml + .github/workflows/fuzzing.yml + .github/workflows/prombench.yml diff --git a/Makefile b/Makefile index d67245637..a940b1f91 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ TSDB_BENCHMARK_NUM_METRICS ?= 1000 TSDB_BENCHMARK_DATASET ?= ./tsdb/testdata/20kseries.json TSDB_BENCHMARK_OUTPUT_DIR ?= ./benchout -GOLANGCI_LINT_OPTS ?= --timeout 2m +GOLANGCI_LINT_OPTS ?= --timeout 4m include Makefile.common diff --git a/Makefile.common b/Makefile.common index ce80d530a..bbdec8ef5 100644 --- a/Makefile.common +++ b/Makefile.common @@ -118,7 +118,7 @@ endif %: common-% ; .PHONY: common-all -common-all: precheck style check_license lint unused build test +common-all: precheck style check_license lint yamllint unused build test .PHONY: common-style common-style: @@ -198,6 +198,11 @@ else endif endif +.PHONY: common-yamllint +common-yamllint: + @echo ">> running yamllint on all YAML files in the repository" + yamllint . + # For backward-compatibility. .PHONY: common-staticcheck common-staticcheck: lint diff --git a/cmd/promtool/testdata/bad-promql.yml b/cmd/promtool/testdata/bad-promql.yml index 9be8c78b3..39c928998 100644 --- a/cmd/promtool/testdata/bad-promql.yml +++ b/cmd/promtool/testdata/bad-promql.yml @@ -8,5 +8,5 @@ tests: values: 3 promql_expr_test: - # This PromQL generates an error. - - expr: "join_1 + on(a) join_2" + # This PromQL generates an error. + - expr: "join_1 + on(a) join_2" diff --git a/cmd/promtool/testdata/unittest.yml b/cmd/promtool/testdata/unittest.yml index d6221048d..e6745aadf 100644 --- a/cmd/promtool/testdata/unittest.yml +++ b/cmd/promtool/testdata/unittest.yml @@ -8,13 +8,13 @@ tests: - interval: 1m input_series: - series: test_full - values: '0 0' + values: "0 0" - series: test_stale - values: '0 stale' + values: "0 stale" - series: test_missing - values: '0 _ _ _ _ _ _ 0' + values: "0 _ _ _ _ _ _ 0" promql_expr_test: # Ensure the sample is evaluated at the time we expect it to be. @@ -36,7 +36,7 @@ tests: eval_time: 59s exp_samples: - value: 0 - labels: 'test_stale' + labels: "test_stale" - expr: test_stale eval_time: 1m exp_samples: [] @@ -83,10 +83,10 @@ tests: - expr: count(ALERTS) by (alertname, alertstate) eval_time: 4m exp_samples: - - labels: '{alertname="AlwaysFiring",alertstate="firing"}' - value: 1 - - labels: '{alertname="InstanceDown",alertstate="pending"}' - value: 1 + - labels: '{alertname="AlwaysFiring",alertstate="firing"}' + value: 1 + - labels: '{alertname="InstanceDown",alertstate="pending"}' + value: 1 alert_rule_test: - eval_time: 1d @@ -120,7 +120,7 @@ tests: - series: 'test{job="test", instance="x:0"}' # 2 minutes + 1 second of input data, recording rules should only run # once a minute. - values: '0+1x120' + values: "0+1x120" promql_expr_test: - expr: job:test:count_over_time1m diff --git a/config/testdata/azure_authentication_method.bad.yml b/config/testdata/azure_authentication_method.bad.yml index b05fc474a..27d980835 100644 --- a/config/testdata/azure_authentication_method.bad.yml +++ b/config/testdata/azure_authentication_method.bad.yml @@ -1,4 +1,4 @@ scrape_configs: -- azure_sd_configs: - - authentication_method: invalid - subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 + - azure_sd_configs: + - authentication_method: invalid + subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 diff --git a/config/testdata/azure_client_id_missing.bad.yml b/config/testdata/azure_client_id_missing.bad.yml index f8da2ff9c..ffd41ae58 100644 --- a/config/testdata/azure_client_id_missing.bad.yml +++ b/config/testdata/azure_client_id_missing.bad.yml @@ -4,4 +4,4 @@ scrape_configs: - subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 client_id: - client_secret: mysecret \ No newline at end of file + client_secret: mysecret diff --git a/config/testdata/azure_client_secret_missing.bad.yml b/config/testdata/azure_client_secret_missing.bad.yml index 1295c8ad5..d9e956d7f 100644 --- a/config/testdata/azure_client_secret_missing.bad.yml +++ b/config/testdata/azure_client_secret_missing.bad.yml @@ -4,4 +4,4 @@ scrape_configs: - subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C - client_secret: \ No newline at end of file + client_secret: diff --git a/config/testdata/azure_subscription_id_missing.bad.yml b/config/testdata/azure_subscription_id_missing.bad.yml index 997613882..98eadb6de 100644 --- a/config/testdata/azure_subscription_id_missing.bad.yml +++ b/config/testdata/azure_subscription_id_missing.bad.yml @@ -4,4 +4,4 @@ scrape_configs: - subscription_id: tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C - client_secret: mysecret \ No newline at end of file + client_secret: mysecret diff --git a/config/testdata/azure_tenant_id_missing.bad.yml b/config/testdata/azure_tenant_id_missing.bad.yml index ac714d9b5..c8eaf55c1 100644 --- a/config/testdata/azure_tenant_id_missing.bad.yml +++ b/config/testdata/azure_tenant_id_missing.bad.yml @@ -4,4 +4,4 @@ scrape_configs: - subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 tenant_id: client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C - client_secret: mysecret \ No newline at end of file + client_secret: mysecret diff --git a/config/testdata/bearertoken.bad.yml b/config/testdata/bearertoken.bad.yml index 58efc2395..86c2fd5f7 100644 --- a/config/testdata/bearertoken.bad.yml +++ b/config/testdata/bearertoken.bad.yml @@ -3,4 +3,3 @@ scrape_configs: bearer_token: 1234 bearer_token_file: somefile - diff --git a/config/testdata/bearertoken_basicauth.bad.yml b/config/testdata/bearertoken_basicauth.bad.yml index 2584f7fe7..fd2b70ec5 100644 --- a/config/testdata/bearertoken_basicauth.bad.yml +++ b/config/testdata/bearertoken_basicauth.bad.yml @@ -5,4 +5,3 @@ scrape_configs: basic_auth: username: user password: password - diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 61c3070ac..a72031de3 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -1,24 +1,24 @@ # my global config global: - scrape_interval: 15s + scrape_interval: 15s evaluation_interval: 30s # scrape_timeout is set to the global default (10s). external_labels: monitor: codelab - foo: bar + foo: bar rule_files: -- "first.rules" -- "my/*.rules" + - "first.rules" + - "my/*.rules" remote_write: - url: http://remote1/push name: drop_expensive write_relabel_configs: - - source_labels: [__name__] - regex: expensive.* - action: drop + - source_labels: [__name__] + regex: expensive.* + action: drop oauth2: client_id: "123" client_secret: "456" @@ -46,300 +46,298 @@ remote_read: key_file: valid_key_file scrape_configs: -- job_name: prometheus + - job_name: prometheus - honor_labels: true - # scrape_interval is defined by the configured global (15s). - # scrape_timeout is defined by the global default (10s). + honor_labels: true + # scrape_interval is defined by the configured global (15s). + # scrape_timeout is defined by the global default (10s). - # metrics_path defaults to '/metrics' - # scheme defaults to 'http'. + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. - file_sd_configs: - - files: - - foo/*.slow.json - - foo/*.slow.yml - - single/file.yml - refresh_interval: 10m - - files: - - bar/*.yaml + file_sd_configs: + - files: + - foo/*.slow.json + - foo/*.slow.yml + - single/file.yml + refresh_interval: 10m + - files: + - bar/*.yaml - static_configs: - - targets: ['localhost:9090', 'localhost:9191'] - labels: - my: label - your: label + static_configs: + - targets: ["localhost:9090", "localhost:9191"] + labels: + my: label + your: label - relabel_configs: - - source_labels: [job, __meta_dns_name] - regex: (.*)some-[regex] - target_label: job - replacement: foo-${1} - # action defaults to 'replace' - - source_labels: [abc] - target_label: cde - - replacement: static - target_label: abc - - regex: - replacement: static - target_label: abc + relabel_configs: + - source_labels: [job, __meta_dns_name] + regex: (.*)some-[regex] + target_label: job + replacement: foo-${1} + # action defaults to 'replace' + - source_labels: [abc] + target_label: cde + - replacement: static + target_label: abc + - regex: + replacement: static + target_label: abc - authorization: - credentials_file: valid_token_file + authorization: + credentials_file: valid_token_file - -- job_name: service-x - - basic_auth: - username: admin_name - password: "multiline\nmysecret\ntest" - - scrape_interval: 50s - scrape_timeout: 5s - - body_size_limit: 10MB - sample_limit: 1000 - - metrics_path: /my_path - scheme: https - - dns_sd_configs: - - refresh_interval: 15s - names: - - first.dns.address.domain.com - - second.dns.address.domain.com - - names: - - first.dns.address.domain.com - # refresh_interval defaults to 30s. - - relabel_configs: - - source_labels: [job] - regex: (.*)some-[regex] - action: drop - - source_labels: [__address__] - modulus: 8 - target_label: __tmp_hash - action: hashmod - - source_labels: [__tmp_hash] - regex: 1 - action: keep - - action: labelmap - regex: 1 - - action: labeldrop - regex: d - - action: labelkeep - regex: k - - metric_relabel_configs: - - source_labels: [__name__] - regex: expensive_metric.* - action: drop - -- job_name: service-y - - consul_sd_configs: - - server: 'localhost:1234' - token: mysecret - services: ['nginx', 'cache', 'mysql'] - tags: ["canary", "v1"] - node_meta: - rack: "123" - allow_stale: true - scheme: https - tls_config: - ca_file: valid_ca_file - cert_file: valid_cert_file - key_file: valid_key_file - insecure_skip_verify: false - - relabel_configs: - - source_labels: [__meta_sd_consul_tags] - separator: ',' - regex: label:([^=]+)=([^,]+) - target_label: ${1} - replacement: ${2} - -- job_name: service-z - - tls_config: - cert_file: valid_cert_file - key_file: valid_key_file - - authorization: - credentials: mysecret - -- job_name: service-kubernetes - - kubernetes_sd_configs: - - role: endpoints - api_server: 'https://localhost:1234' - tls_config: - cert_file: valid_cert_file - key_file: valid_key_file + - job_name: service-x basic_auth: - username: 'myusername' - password: 'mysecret' + username: admin_name + password: "multiline\nmysecret\ntest" -- job_name: service-kubernetes-namespaces + scrape_interval: 50s + scrape_timeout: 5s - kubernetes_sd_configs: - - role: endpoints - api_server: 'https://localhost:1234' - namespaces: - names: - - default + body_size_limit: 10MB + sample_limit: 1000 - basic_auth: - username: 'myusername' - password_file: valid_password_file + metrics_path: /my_path + scheme: https + dns_sd_configs: + - refresh_interval: 15s + names: + - first.dns.address.domain.com + - second.dns.address.domain.com + - names: + - first.dns.address.domain.com + # refresh_interval defaults to 30s. -- job_name: service-marathon - marathon_sd_configs: - - servers: - - 'https://marathon.example.com:443' + relabel_configs: + - source_labels: [job] + regex: (.*)some-[regex] + action: drop + - source_labels: [__address__] + modulus: 8 + target_label: __tmp_hash + action: hashmod + - source_labels: [__tmp_hash] + regex: 1 + action: keep + - action: labelmap + regex: 1 + - action: labeldrop + regex: d + - action: labelkeep + regex: k + + metric_relabel_configs: + - source_labels: [__name__] + regex: expensive_metric.* + action: drop + + - job_name: service-y + + consul_sd_configs: + - server: "localhost:1234" + token: mysecret + services: ["nginx", "cache", "mysql"] + tags: ["canary", "v1"] + node_meta: + rack: "123" + allow_stale: true + scheme: https + tls_config: + ca_file: valid_ca_file + cert_file: valid_cert_file + key_file: valid_key_file + insecure_skip_verify: false + + relabel_configs: + - source_labels: [__meta_sd_consul_tags] + separator: "," + regex: label:([^=]+)=([^,]+) + target_label: ${1} + replacement: ${2} + + - job_name: service-z - auth_token: "mysecret" tls_config: cert_file: valid_cert_file key_file: valid_key_file -- job_name: service-ec2 - ec2_sd_configs: - - region: us-east-1 - access_key: access - secret_key: mysecret - profile: profile - filters: - - name: tag:environment - values: - - prod + authorization: + credentials: mysecret - - name: tag:service - values: - - web - - db + - job_name: service-kubernetes -- job_name: service-lightsail - lightsail_sd_configs: - - region: us-east-1 - access_key: access - secret_key: mysecret - profile: profile + kubernetes_sd_configs: + - role: endpoints + api_server: "https://localhost:1234" + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file -- job_name: service-azure - azure_sd_configs: - - environment: AzurePublicCloud - authentication_method: OAuth - subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 - tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 - client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C - client_secret: mysecret - port: 9100 + basic_auth: + username: "myusername" + password: "mysecret" -- job_name: service-nerve - nerve_sd_configs: - - servers: - - localhost - paths: - - /monitoring + - job_name: service-kubernetes-namespaces -- job_name: 0123service-xxx - metrics_path: /metrics - static_configs: - - targets: - - localhost:9090 + kubernetes_sd_configs: + - role: endpoints + api_server: "https://localhost:1234" + namespaces: + names: + - default -- job_name: badfederation - honor_timestamps: false - metrics_path: /federate - static_configs: - - targets: - - localhost:9090 + basic_auth: + username: "myusername" + password_file: valid_password_file -- job_name: 測試 - metrics_path: /metrics - static_configs: - - targets: - - localhost:9090 + - job_name: service-marathon + marathon_sd_configs: + - servers: + - "https://marathon.example.com:443" -- job_name: httpsd - http_sd_configs: - - url: 'http://example.com/prometheus' + auth_token: "mysecret" + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file -- job_name: service-triton - triton_sd_configs: - - account: 'testAccount' - dns_suffix: 'triton.example.com' - endpoint: 'triton.example.com' - port: 9163 - refresh_interval: 1m - version: 1 - tls_config: - cert_file: valid_cert_file - key_file: valid_key_file + - job_name: service-ec2 + ec2_sd_configs: + - region: us-east-1 + access_key: access + secret_key: mysecret + profile: profile + filters: + - name: tag:environment + values: + - prod -- job_name: digitalocean-droplets - digitalocean_sd_configs: - - authorization: - credentials: abcdef + - name: tag:service + values: + - web + - db -- job_name: docker - docker_sd_configs: - - host: unix:///var/run/docker.sock + - job_name: service-lightsail + lightsail_sd_configs: + - region: us-east-1 + access_key: access + secret_key: mysecret + profile: profile -- job_name: dockerswarm - dockerswarm_sd_configs: - - host: http://127.0.0.1:2375 - role: nodes + - job_name: service-azure + azure_sd_configs: + - environment: AzurePublicCloud + authentication_method: OAuth + subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 + tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 + client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C + client_secret: mysecret + port: 9100 -- job_name: service-openstack - openstack_sd_configs: - - role: instance - region: RegionOne - port: 80 - refresh_interval: 1m - tls_config: - ca_file: valid_ca_file - cert_file: valid_cert_file - key_file: valid_key_file + - job_name: service-nerve + nerve_sd_configs: + - servers: + - localhost + paths: + - /monitoring -- job_name: hetzner - hetzner_sd_configs: - - role: hcloud - authorization: - credentials: abcdef - - role: robot - basic_auth: - username: abcdef - password: abcdef + - job_name: 0123service-xxx + metrics_path: /metrics + static_configs: + - targets: + - localhost:9090 -- job_name: service-eureka - eureka_sd_configs: - - server: 'http://eureka.example.com:8761/eureka' + - job_name: badfederation + honor_timestamps: false + metrics_path: /federate + static_configs: + - targets: + - localhost:9090 -- job_name: scaleway - scaleway_sd_configs: - - role: instance - project_id: 11111111-1111-1111-1111-111111111112 - access_key: SCWXXXXXXXXXXXXXXXXX - secret_key: 11111111-1111-1111-1111-111111111111 - - role: baremetal - project_id: 11111111-1111-1111-1111-111111111112 - access_key: SCWXXXXXXXXXXXXXXXXX - secret_key: 11111111-1111-1111-1111-111111111111 + - job_name: 測試 + metrics_path: /metrics + static_configs: + - targets: + - localhost:9090 -- job_name: linode-instances - linode_sd_configs: - - authorization: - credentials: abcdef + - job_name: httpsd + http_sd_configs: + - url: "http://example.com/prometheus" + + - job_name: service-triton + triton_sd_configs: + - account: "testAccount" + dns_suffix: "triton.example.com" + endpoint: "triton.example.com" + port: 9163 + refresh_interval: 1m + version: 1 + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file + + - job_name: digitalocean-droplets + digitalocean_sd_configs: + - authorization: + credentials: abcdef + + - job_name: docker + docker_sd_configs: + - host: unix:///var/run/docker.sock + + - job_name: dockerswarm + dockerswarm_sd_configs: + - host: http://127.0.0.1:2375 + role: nodes + + - job_name: service-openstack + openstack_sd_configs: + - role: instance + region: RegionOne + port: 80 + refresh_interval: 1m + tls_config: + ca_file: valid_ca_file + cert_file: valid_cert_file + key_file: valid_key_file + + - job_name: hetzner + hetzner_sd_configs: + - role: hcloud + authorization: + credentials: abcdef + - role: robot + basic_auth: + username: abcdef + password: abcdef + + - job_name: service-eureka + eureka_sd_configs: + - server: "http://eureka.example.com:8761/eureka" + + - job_name: scaleway + scaleway_sd_configs: + - role: instance + project_id: 11111111-1111-1111-1111-111111111112 + access_key: SCWXXXXXXXXXXXXXXXXX + secret_key: 11111111-1111-1111-1111-111111111111 + - role: baremetal + project_id: 11111111-1111-1111-1111-111111111112 + access_key: SCWXXXXXXXXXXXXXXXXX + secret_key: 11111111-1111-1111-1111-111111111111 + + - job_name: linode-instances + linode_sd_configs: + - authorization: + credentials: abcdef alerting: alertmanagers: - - scheme: https - static_configs: - - targets: - - "1.2.3.4:9093" - - "1.2.3.5:9093" - - "1.2.3.6:9093" + - scheme: https + static_configs: + - targets: + - "1.2.3.4:9093" + - "1.2.3.5:9093" + - "1.2.3.6:9093" diff --git a/config/testdata/ec2_filters_empty_values.bad.yml b/config/testdata/ec2_filters_empty_values.bad.yml index f375bf564..99ee473a8 100644 --- a/config/testdata/ec2_filters_empty_values.bad.yml +++ b/config/testdata/ec2_filters_empty_values.bad.yml @@ -2,8 +2,7 @@ scrape_configs: - job_name: prometheus ec2_sd_configs: - - region: 'us-east-1' - filters: - - name: 'tag:environment' - values: - + - region: "us-east-1" + filters: + - name: "tag:environment" + values: diff --git a/config/testdata/empty_alert_relabel_config.bad.yml b/config/testdata/empty_alert_relabel_config.bad.yml index b863bf23a..cbc5fa65e 100644 --- a/config/testdata/empty_alert_relabel_config.bad.yml +++ b/config/testdata/empty_alert_relabel_config.bad.yml @@ -1,3 +1,3 @@ alerting: alert_relabel_configs: - - + - diff --git a/config/testdata/empty_alertmanager_relabel_config.bad.yml b/config/testdata/empty_alertmanager_relabel_config.bad.yml index 6d99ac4dc..639bfdadf 100644 --- a/config/testdata/empty_alertmanager_relabel_config.bad.yml +++ b/config/testdata/empty_alertmanager_relabel_config.bad.yml @@ -1,4 +1,4 @@ alerting: alertmanagers: - - relabel_configs: - - + - relabel_configs: + - diff --git a/config/testdata/empty_metric_relabel_config.bad.yml b/config/testdata/empty_metric_relabel_config.bad.yml index d2485e352..f0b873075 100644 --- a/config/testdata/empty_metric_relabel_config.bad.yml +++ b/config/testdata/empty_metric_relabel_config.bad.yml @@ -1,4 +1,4 @@ scrape_configs: -- job_name: "test" - metric_relabel_configs: - - + - job_name: "test" + metric_relabel_configs: + - diff --git a/config/testdata/empty_rr_config.bad.yml b/config/testdata/empty_rr_config.bad.yml index e3bcca598..566a5b62b 100644 --- a/config/testdata/empty_rr_config.bad.yml +++ b/config/testdata/empty_rr_config.bad.yml @@ -1,2 +1,2 @@ remote_read: -- + - diff --git a/config/testdata/empty_rw_config.bad.yml b/config/testdata/empty_rw_config.bad.yml index 6f16030e6..8905fa38f 100644 --- a/config/testdata/empty_rw_config.bad.yml +++ b/config/testdata/empty_rw_config.bad.yml @@ -1,2 +1,2 @@ remote_write: -- + - diff --git a/config/testdata/empty_rw_relabel_config.bad.yml b/config/testdata/empty_rw_relabel_config.bad.yml index 6d5418290..62c61b9e7 100644 --- a/config/testdata/empty_rw_relabel_config.bad.yml +++ b/config/testdata/empty_rw_relabel_config.bad.yml @@ -1,4 +1,4 @@ remote_write: - url: "foo" write_relabel_configs: - - \ No newline at end of file + - diff --git a/config/testdata/empty_scrape_config.bad.yml b/config/testdata/empty_scrape_config.bad.yml index 8c300deaa..058496aea 100644 --- a/config/testdata/empty_scrape_config.bad.yml +++ b/config/testdata/empty_scrape_config.bad.yml @@ -1,2 +1,2 @@ scrape_configs: -- \ No newline at end of file + - diff --git a/config/testdata/empty_static_config.bad.yml b/config/testdata/empty_static_config.bad.yml index 464a0a6fb..12ed3c1ad 100644 --- a/config/testdata/empty_static_config.bad.yml +++ b/config/testdata/empty_static_config.bad.yml @@ -1,4 +1,4 @@ scrape_configs: -- job_name: "test" - static_configs: - - + - job_name: "test" + static_configs: + - diff --git a/config/testdata/empty_target_relabel_config.bad.yml b/config/testdata/empty_target_relabel_config.bad.yml index 7324b1041..85bd4e91d 100644 --- a/config/testdata/empty_target_relabel_config.bad.yml +++ b/config/testdata/empty_target_relabel_config.bad.yml @@ -1,4 +1,4 @@ scrape_configs: -- job_name: "test" - relabel_configs: - - + - job_name: "test" + relabel_configs: + - diff --git a/config/testdata/eureka_invalid_server.bad.yml b/config/testdata/eureka_invalid_server.bad.yml index 0c8ae428a..8d20df717 100644 --- a/config/testdata/eureka_invalid_server.bad.yml +++ b/config/testdata/eureka_invalid_server.bad.yml @@ -1,5 +1,4 @@ scrape_configs: - -- job_name: eureka - eureka_sd_configs: - - server: eureka.com + - job_name: eureka + eureka_sd_configs: + - server: eureka.com diff --git a/config/testdata/eureka_no_server.bad.yml b/config/testdata/eureka_no_server.bad.yml index 35c578a6c..8fccaa4e7 100644 --- a/config/testdata/eureka_no_server.bad.yml +++ b/config/testdata/eureka_no_server.bad.yml @@ -1,5 +1,4 @@ scrape_configs: - -- job_name: eureka - eureka_sd_configs: - - server: + - job_name: eureka + eureka_sd_configs: + - server: diff --git a/config/testdata/hetzner_role.bad.yml b/config/testdata/hetzner_role.bad.yml index 0a5cc8c48..4c67b8642 100644 --- a/config/testdata/hetzner_role.bad.yml +++ b/config/testdata/hetzner_role.bad.yml @@ -1,4 +1,3 @@ scrape_configs: -- hetzner_sd_configs: - - role: invalid - + - hetzner_sd_configs: + - role: invalid diff --git a/config/testdata/http_url_bad_scheme.bad.yml b/config/testdata/http_url_bad_scheme.bad.yml index eca8024c0..9245b3ebe 100644 --- a/config/testdata/http_url_bad_scheme.bad.yml +++ b/config/testdata/http_url_bad_scheme.bad.yml @@ -1,3 +1,3 @@ scrape_configs: -- http_sd_configs: - - url: ftp://example.com + - http_sd_configs: + - url: ftp://example.com diff --git a/config/testdata/http_url_no_host.bad.yml b/config/testdata/http_url_no_host.bad.yml index e1ee14d87..99d4b746f 100644 --- a/config/testdata/http_url_no_host.bad.yml +++ b/config/testdata/http_url_no_host.bad.yml @@ -1,3 +1,3 @@ scrape_configs: -- http_sd_configs: - - url: http:// + - http_sd_configs: + - url: http:// diff --git a/config/testdata/http_url_no_scheme.bad.yml b/config/testdata/http_url_no_scheme.bad.yml index bb6fc8384..3725d3cc0 100644 --- a/config/testdata/http_url_no_scheme.bad.yml +++ b/config/testdata/http_url_no_scheme.bad.yml @@ -1,3 +1,3 @@ scrape_configs: -- http_sd_configs: - - url: invalid + - http_sd_configs: + - url: invalid diff --git a/config/testdata/kubernetes_authorization_basicauth.bad.yml b/config/testdata/kubernetes_authorization_basicauth.bad.yml index cfa39bd6d..cbed88e8b 100644 --- a/config/testdata/kubernetes_authorization_basicauth.bad.yml +++ b/config/testdata/kubernetes_authorization_basicauth.bad.yml @@ -2,12 +2,11 @@ scrape_configs: - job_name: prometheus kubernetes_sd_configs: - - role: pod - api_server: 'https://localhost:1234' + - role: pod + api_server: "https://localhost:1234" - authorization: + authorization: credentials: 1234 - basic_auth: - username: user - password: password - + basic_auth: + username: user + password: password diff --git a/config/testdata/kubernetes_bearertoken.bad.yml b/config/testdata/kubernetes_bearertoken.bad.yml index 158de9a1d..424b8498d 100644 --- a/config/testdata/kubernetes_bearertoken.bad.yml +++ b/config/testdata/kubernetes_bearertoken.bad.yml @@ -2,9 +2,8 @@ scrape_configs: - job_name: prometheus kubernetes_sd_configs: - - role: node - api_server: 'https://localhost:1234' - - bearer_token: 1234 - bearer_token_file: somefile + - role: node + api_server: "https://localhost:1234" + bearer_token: 1234 + bearer_token_file: somefile diff --git a/config/testdata/kubernetes_bearertoken_basicauth.bad.yml b/config/testdata/kubernetes_bearertoken_basicauth.bad.yml index ad7cc329d..d4ff2ddf7 100644 --- a/config/testdata/kubernetes_bearertoken_basicauth.bad.yml +++ b/config/testdata/kubernetes_bearertoken_basicauth.bad.yml @@ -2,11 +2,10 @@ scrape_configs: - job_name: prometheus kubernetes_sd_configs: - - role: pod - api_server: 'https://localhost:1234' - - bearer_token: 1234 - basic_auth: - username: user - password: password + - role: pod + api_server: "https://localhost:1234" + bearer_token: 1234 + basic_auth: + username: user + password: password diff --git a/config/testdata/kubernetes_empty_apiserver.good.yml b/config/testdata/kubernetes_empty_apiserver.good.yml index 12b428eb8..d17283f86 100644 --- a/config/testdata/kubernetes_empty_apiserver.good.yml +++ b/config/testdata/kubernetes_empty_apiserver.good.yml @@ -1,4 +1,4 @@ scrape_configs: -- job_name: prometheus - kubernetes_sd_configs: - - role: endpoints + - job_name: prometheus + kubernetes_sd_configs: + - role: endpoints diff --git a/config/testdata/kubernetes_http_config_without_api_server.bad.yml b/config/testdata/kubernetes_http_config_without_api_server.bad.yml index 30cc3c3d7..65302d46c 100644 --- a/config/testdata/kubernetes_http_config_without_api_server.bad.yml +++ b/config/testdata/kubernetes_http_config_without_api_server.bad.yml @@ -1,6 +1,6 @@ scrape_configs: - job_name: prometheus kubernetes_sd_configs: - - role: pod - authorization: - credentials: 1234 + - role: pod + authorization: + credentials: 1234 diff --git a/config/testdata/kubernetes_namespace_discovery.bad.yml b/config/testdata/kubernetes_namespace_discovery.bad.yml index c98d65d34..4a79cc00d 100644 --- a/config/testdata/kubernetes_namespace_discovery.bad.yml +++ b/config/testdata/kubernetes_namespace_discovery.bad.yml @@ -1,6 +1,6 @@ scrape_configs: -- kubernetes_sd_configs: - - api_server: kubernetes:443 - role: endpoints - namespaces: - foo: bar + - kubernetes_sd_configs: + - api_server: kubernetes:443 + role: endpoints + namespaces: + foo: bar diff --git a/config/testdata/kubernetes_role.bad.yml b/config/testdata/kubernetes_role.bad.yml index ae924d8f4..67ce2ec3b 100644 --- a/config/testdata/kubernetes_role.bad.yml +++ b/config/testdata/kubernetes_role.bad.yml @@ -1,5 +1,4 @@ scrape_configs: -- kubernetes_sd_configs: - - api_server: kubernetes:443 - role: vacation - + - kubernetes_sd_configs: + - api_server: kubernetes:443 + role: vacation diff --git a/config/testdata/kubernetes_selectors_duplicated_role.bad.yml b/config/testdata/kubernetes_selectors_duplicated_role.bad.yml index dc3dec25c..c88c1baa8 100644 --- a/config/testdata/kubernetes_selectors_duplicated_role.bad.yml +++ b/config/testdata/kubernetes_selectors_duplicated_role.bad.yml @@ -1,11 +1,11 @@ scrape_configs: -- job_name: prometheus - kubernetes_sd_configs: - - role: endpoints - selectors: - - role: "pod" - label: "foo=bar" - field: "metadata.status=Running" - - role: "pod" - label: "foo=bar" - field: "metadata.status=Running" + - job_name: prometheus + kubernetes_sd_configs: + - role: endpoints + selectors: + - role: "pod" + label: "foo=bar" + field: "metadata.status=Running" + - role: "pod" + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_endpoints.bad.yml b/config/testdata/kubernetes_selectors_endpoints.bad.yml index d2128f374..6981fa77e 100644 --- a/config/testdata/kubernetes_selectors_endpoints.bad.yml +++ b/config/testdata/kubernetes_selectors_endpoints.bad.yml @@ -3,12 +3,12 @@ scrape_configs: kubernetes_sd_configs: - role: endpoints selectors: - - role: "node" - label: "foo=bar" - field: "metadata.status=Running" - - role: "service" - label: "foo=bar" - field: "metadata.status=Running" - - role: "endpoints" - label: "foo=bar" - field: "metadata.status=Running" + - role: "node" + label: "foo=bar" + field: "metadata.status=Running" + - role: "service" + label: "foo=bar" + field: "metadata.status=Running" + - role: "endpoints" + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_endpoints.good.yml b/config/testdata/kubernetes_selectors_endpoints.good.yml index d0b22ff93..dfd25ce70 100644 --- a/config/testdata/kubernetes_selectors_endpoints.good.yml +++ b/config/testdata/kubernetes_selectors_endpoints.good.yml @@ -3,12 +3,12 @@ scrape_configs: kubernetes_sd_configs: - role: endpoints selectors: - - role: "pod" - label: "foo=bar" - field: "metadata.status=Running" - - role: "service" - label: "foo=bar" - field: "metadata.status=Running" - - role: "endpoints" - label: "foo=bar" - field: "metadata.status=Running" + - role: "pod" + label: "foo=bar" + field: "metadata.status=Running" + - role: "service" + label: "foo=bar" + field: "metadata.status=Running" + - role: "endpoints" + label: "foo=bar" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_incorrect_selector.bad.yml b/config/testdata/kubernetes_selectors_incorrect_selector.bad.yml index 72d2d90e6..3ea79e882 100644 --- a/config/testdata/kubernetes_selectors_incorrect_selector.bad.yml +++ b/config/testdata/kubernetes_selectors_incorrect_selector.bad.yml @@ -1,7 +1,7 @@ scrape_configs: -- job_name: prometheus - kubernetes_sd_configs: - - role: endpoints - selectors: - - role: "pod" - field: "metadata.status-Running" + - job_name: prometheus + kubernetes_sd_configs: + - role: endpoints + selectors: + - role: "pod" + field: "metadata.status-Running" diff --git a/config/testdata/kubernetes_selectors_ingress.bad.yml b/config/testdata/kubernetes_selectors_ingress.bad.yml index 7698de35a..d65277004 100644 --- a/config/testdata/kubernetes_selectors_ingress.bad.yml +++ b/config/testdata/kubernetes_selectors_ingress.bad.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: ingress selectors: - - role: "node" + - role: "node" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_ingress.good.yml b/config/testdata/kubernetes_selectors_ingress.good.yml index 6aa1edf20..4b6dab766 100644 --- a/config/testdata/kubernetes_selectors_ingress.good.yml +++ b/config/testdata/kubernetes_selectors_ingress.good.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: ingress selectors: - - role: "ingress" + - role: "ingress" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_node.bad.yml b/config/testdata/kubernetes_selectors_node.bad.yml index 969435151..841f5aa43 100644 --- a/config/testdata/kubernetes_selectors_node.bad.yml +++ b/config/testdata/kubernetes_selectors_node.bad.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: node selectors: - - role: "pod" + - role: "pod" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_node.good.yml b/config/testdata/kubernetes_selectors_node.good.yml index d355c62ab..bd62eb1e8 100644 --- a/config/testdata/kubernetes_selectors_node.good.yml +++ b/config/testdata/kubernetes_selectors_node.good.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: node selectors: - - role: "node" + - role: "node" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_pod.bad.yml b/config/testdata/kubernetes_selectors_pod.bad.yml index daa7290a4..3a1a83abd 100644 --- a/config/testdata/kubernetes_selectors_pod.bad.yml +++ b/config/testdata/kubernetes_selectors_pod.bad.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: pod selectors: - - role: "node" + - role: "node" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_pod.good.yml b/config/testdata/kubernetes_selectors_pod.good.yml index 73ef8802a..91da6ada1 100644 --- a/config/testdata/kubernetes_selectors_pod.good.yml +++ b/config/testdata/kubernetes_selectors_pod.good.yml @@ -3,11 +3,11 @@ scrape_configs: kubernetes_sd_configs: - role: pod selectors: - - role: "pod" + - role: "pod" label: "foo=bar" field: "metadata.status=Running" - role: pod selectors: - - role: "pod" + - role: "pod" label: "foo in (bar,baz)" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_service.bad.yml b/config/testdata/kubernetes_selectors_service.bad.yml index 78179be69..d67e97a2d 100644 --- a/config/testdata/kubernetes_selectors_service.bad.yml +++ b/config/testdata/kubernetes_selectors_service.bad.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: service selectors: - - role: "pod" + - role: "pod" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_service.good.yml b/config/testdata/kubernetes_selectors_service.good.yml index 9c7705dc6..11bbf31f5 100644 --- a/config/testdata/kubernetes_selectors_service.good.yml +++ b/config/testdata/kubernetes_selectors_service.good.yml @@ -3,6 +3,6 @@ scrape_configs: kubernetes_sd_configs: - role: service selectors: - - role: "service" + - role: "service" label: "foo=bar" field: "metadata.status=Running" diff --git a/config/testdata/labeldrop2.bad.yml b/config/testdata/labeldrop2.bad.yml index f70316975..41d3fc631 100644 --- a/config/testdata/labeldrop2.bad.yml +++ b/config/testdata/labeldrop2.bad.yml @@ -1,5 +1,5 @@ scrape_configs: - job_name: prometheus relabel_configs: - - modulus: 8 + - modulus: 8 action: labeldrop diff --git a/config/testdata/labeldrop3.bad.yml b/config/testdata/labeldrop3.bad.yml index 5bed5d0af..6fb800e95 100644 --- a/config/testdata/labeldrop3.bad.yml +++ b/config/testdata/labeldrop3.bad.yml @@ -1,5 +1,5 @@ scrape_configs: - job_name: prometheus relabel_configs: - - separator: ',' + - separator: "," action: labeldrop diff --git a/config/testdata/labeldrop4.bad.yml b/config/testdata/labeldrop4.bad.yml index 52877d2b4..b91a0fcc4 100644 --- a/config/testdata/labeldrop4.bad.yml +++ b/config/testdata/labeldrop4.bad.yml @@ -1,5 +1,5 @@ scrape_configs: - job_name: prometheus relabel_configs: - - replacement: yolo-{1} + - replacement: yolo-{1} action: labeldrop diff --git a/config/testdata/labelkeep2.bad.yml b/config/testdata/labelkeep2.bad.yml index 734e537cf..57665aa39 100644 --- a/config/testdata/labelkeep2.bad.yml +++ b/config/testdata/labelkeep2.bad.yml @@ -1,5 +1,5 @@ scrape_configs: - job_name: prometheus relabel_configs: - - modulus: 8 + - modulus: 8 action: labelkeep diff --git a/config/testdata/labelkeep3.bad.yml b/config/testdata/labelkeep3.bad.yml index 407a0f7c1..5cf3ceb26 100644 --- a/config/testdata/labelkeep3.bad.yml +++ b/config/testdata/labelkeep3.bad.yml @@ -1,5 +1,5 @@ scrape_configs: - job_name: prometheus relabel_configs: - - separator: ',' + - separator: "," action: labelkeep diff --git a/config/testdata/labelkeep4.bad.yml b/config/testdata/labelkeep4.bad.yml index 4e7799415..1c3793ef1 100644 --- a/config/testdata/labelkeep4.bad.yml +++ b/config/testdata/labelkeep4.bad.yml @@ -1,5 +1,5 @@ scrape_configs: - job_name: prometheus relabel_configs: - - replacement: yolo-{1} + - replacement: yolo-{1} action: labelkeep diff --git a/config/testdata/labelvalue.bad.yml b/config/testdata/labelvalue.bad.yml index 7873eb174..44197d84b 100644 --- a/config/testdata/labelvalue.bad.yml +++ b/config/testdata/labelvalue.bad.yml @@ -1,3 +1,3 @@ global: external_labels: - name: !!binary "/w==" \ No newline at end of file + name: !!binary "/w==" diff --git a/config/testdata/marathon_authtoken_authorization.bad.yml b/config/testdata/marathon_authtoken_authorization.bad.yml index d3112b12c..9a8238641 100644 --- a/config/testdata/marathon_authtoken_authorization.bad.yml +++ b/config/testdata/marathon_authtoken_authorization.bad.yml @@ -2,9 +2,9 @@ scrape_configs: - job_name: prometheus marathon_sd_configs: - - servers: - - 'https://localhost:1234' + - servers: + - "https://localhost:1234" - auth_token: 1234 - authorization: - credentials: 4567 + auth_token: 1234 + authorization: + credentials: 4567 diff --git a/config/testdata/marathon_authtoken_authtokenfile.bad.yml b/config/testdata/marathon_authtoken_authtokenfile.bad.yml index b31c6f154..a671dff73 100644 --- a/config/testdata/marathon_authtoken_authtokenfile.bad.yml +++ b/config/testdata/marathon_authtoken_authtokenfile.bad.yml @@ -2,8 +2,8 @@ scrape_configs: - job_name: prometheus marathon_sd_configs: - - servers: - - 'https://localhost:1234' + - servers: + - "https://localhost:1234" - auth_token: 1234 - auth_token_file: somefile + auth_token: 1234 + auth_token_file: somefile diff --git a/config/testdata/marathon_authtoken_basicauth.bad.yml b/config/testdata/marathon_authtoken_basicauth.bad.yml index 64300f407..c14699716 100644 --- a/config/testdata/marathon_authtoken_basicauth.bad.yml +++ b/config/testdata/marathon_authtoken_basicauth.bad.yml @@ -2,10 +2,10 @@ scrape_configs: - job_name: prometheus marathon_sd_configs: - - servers: - - 'https://localhost:1234' + - servers: + - "https://localhost:1234" - auth_token: 1234 - basic_auth: - username: user - password: password + auth_token: 1234 + basic_auth: + username: user + password: password diff --git a/config/testdata/marathon_authtoken_bearertoken.bad.yml b/config/testdata/marathon_authtoken_bearertoken.bad.yml index 36eeb801e..7a263ae82 100644 --- a/config/testdata/marathon_authtoken_bearertoken.bad.yml +++ b/config/testdata/marathon_authtoken_bearertoken.bad.yml @@ -2,8 +2,8 @@ scrape_configs: - job_name: prometheus marathon_sd_configs: - - servers: - - 'https://localhost:1234' + - servers: + - "https://localhost:1234" - auth_token: 1234 - bearer_token: 4567 + auth_token: 1234 + bearer_token: 4567 diff --git a/config/testdata/marathon_no_servers.bad.yml b/config/testdata/marathon_no_servers.bad.yml index df245e91b..b02b5b1bf 100644 --- a/config/testdata/marathon_no_servers.bad.yml +++ b/config/testdata/marathon_no_servers.bad.yml @@ -1,10 +1,9 @@ # my global config global: - scrape_interval: 15s + scrape_interval: 15s evaluation_interval: 30s scrape_configs: - -- job_name: service-marathon - marathon_sd_configs: - - servers: + - job_name: service-marathon + marathon_sd_configs: + - servers: diff --git a/config/testdata/openstack_availability.bad.yml b/config/testdata/openstack_availability.bad.yml index 0ed51b44c..1de623e4e 100644 --- a/config/testdata/openstack_availability.bad.yml +++ b/config/testdata/openstack_availability.bad.yml @@ -1,4 +1,3 @@ scrape_configs: -- openstack_sd_configs: - - availability: invalid - + - openstack_sd_configs: + - availability: invalid diff --git a/config/testdata/openstack_role.bad.yml b/config/testdata/openstack_role.bad.yml index 6c607d8c6..74cf3765a 100644 --- a/config/testdata/openstack_role.bad.yml +++ b/config/testdata/openstack_role.bad.yml @@ -1,4 +1,3 @@ scrape_configs: -- openstack_sd_configs: - - role: invalid - + - openstack_sd_configs: + - role: invalid diff --git a/config/testdata/remote_read_header.bad.yml b/config/testdata/remote_read_header.bad.yml index 116b63ce1..7aa69ecf1 100644 --- a/config/testdata/remote_read_header.bad.yml +++ b/config/testdata/remote_read_header.bad.yml @@ -2,4 +2,4 @@ remote_read: - url: localhost:9090 name: queue1 headers: - "x-prometheus-remote-write-version": "somehack" \ No newline at end of file + "x-prometheus-remote-write-version": "somehack" diff --git a/config/testdata/remote_write_dup.bad.yml b/config/testdata/remote_write_dup.bad.yml index 1fdc093ac..6cf8b39e0 100644 --- a/config/testdata/remote_write_dup.bad.yml +++ b/config/testdata/remote_write_dup.bad.yml @@ -3,4 +3,3 @@ remote_write: name: queue1 - url: localhost:9091 name: queue1 - diff --git a/config/testdata/roundtrip.good.yml b/config/testdata/roundtrip.good.yml index 19690bb69..51b59dd81 100644 --- a/config/testdata/roundtrip.good.yml +++ b/config/testdata/roundtrip.good.yml @@ -1,147 +1,146 @@ alerting: alertmanagers: - - scheme: https + - scheme: https + + file_sd_configs: + - files: + - foo/*.slow.json + - foo/*.slow.yml + refresh_interval: 10m + - files: + - bar/*.yaml + + static_configs: + - targets: + - 1.2.3.4:9093 + - 1.2.3.5:9093 + - 1.2.3.6:9093 + +scrape_configs: + - job_name: foo + static_configs: + - targets: + - localhost:9090 + - localhost:9191 + labels: + my: label + your: label + + - job_name: bar + + azure_sd_configs: + - environment: AzurePublicCloud + authentication_method: OAuth + subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 + tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 + client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C + client_secret: + port: 9100 + + consul_sd_configs: + - server: localhost:1234 + token: + services: [nginx, cache, mysql] + tags: [canary, v1] + node_meta: + rack: "123" + allow_stale: true + scheme: https + tls_config: + ca_file: valid_ca_file + cert_file: valid_cert_file + key_file: valid_key_file + + digitalocean_sd_configs: + - authorization: + credentials: + + docker_sd_configs: + - host: unix:///var/run/docker.sock + + dockerswarm_sd_configs: + - host: http://127.0.0.1:2375 + role: nodes + + dns_sd_configs: + - refresh_interval: 15s + names: + - first.dns.address.domain.com + - second.dns.address.domain.com + - names: + - first.dns.address.domain.com + + ec2_sd_configs: + - region: us-east-1 + access_key: access + secret_key: + profile: profile + filters: + - name: tag:environment + values: + - prod + - name: tag:service + values: + - web + - db file_sd_configs: - - files: - - foo/*.slow.json - - foo/*.slow.yml - refresh_interval: 10m - - files: - - bar/*.yaml + - files: + - single/file.yml + + kubernetes_sd_configs: + - role: endpoints + api_server: https://localhost:1234 + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file + basic_auth: + username: username + password: + - role: endpoints + api_server: https://localhost:1234 + namespaces: + names: + - default + basic_auth: + username: username + password_file: valid_password_file + + marathon_sd_configs: + - servers: + - https://marathon.example.com:443 + auth_token: + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file + + nerve_sd_configs: + - servers: + - localhost + paths: + - /monitoring + + openstack_sd_configs: + - role: instance + region: RegionOne + port: 80 + refresh_interval: 1m + tls_config: + ca_file: valid_ca_file + cert_file: valid_cert_file + key_file: valid_key_file static_configs: - targets: - - 1.2.3.4:9093 - - 1.2.3.5:9093 - - 1.2.3.6:9093 + - localhost:9093 -scrape_configs: - -- job_name: foo - static_configs: - - targets: - - localhost:9090 - - localhost:9191 - labels: - my: label - your: label - -- job_name: bar - - azure_sd_configs: - - environment: AzurePublicCloud - authentication_method: OAuth - subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 - tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 - client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C - client_secret: - port: 9100 - - consul_sd_configs: - - server: localhost:1234 - token: - services: [nginx, cache, mysql] - tags: [canary, v1] - node_meta: - rack: "123" - allow_stale: true - scheme: https - tls_config: - ca_file: valid_ca_file - cert_file: valid_cert_file - key_file: valid_key_file - - digitalocean_sd_configs: - - authorization: - credentials: - - docker_sd_configs: - - host: unix:///var/run/docker.sock - - dockerswarm_sd_configs: - - host: http://127.0.0.1:2375 - role: nodes - - dns_sd_configs: - - refresh_interval: 15s - names: - - first.dns.address.domain.com - - second.dns.address.domain.com - - names: - - first.dns.address.domain.com - - ec2_sd_configs: - - region: us-east-1 - access_key: access - secret_key: - profile: profile - filters: - - name: tag:environment - values: - - prod - - name: tag:service - values: - - web - - db - - file_sd_configs: - - files: - - single/file.yml - - kubernetes_sd_configs: - - role: endpoints - api_server: https://localhost:1234 - tls_config: - cert_file: valid_cert_file - key_file: valid_key_file - basic_auth: - username: username - password: - - role: endpoints - api_server: https://localhost:1234 - namespaces: - names: - - default - basic_auth: - username: username - password_file: valid_password_file - - marathon_sd_configs: - - servers: - - https://marathon.example.com:443 - auth_token: - tls_config: - cert_file: valid_cert_file - key_file: valid_key_file - - nerve_sd_configs: - - servers: - - localhost - paths: - - /monitoring - - openstack_sd_configs: - - role: instance - region: RegionOne - port: 80 - refresh_interval: 1m - tls_config: - ca_file: valid_ca_file - cert_file: valid_cert_file - key_file: valid_key_file - - static_configs: - - targets: - - localhost:9093 - - triton_sd_configs: - - account: testAccount - dns_suffix: triton.example.com - endpoint: triton.example.com - port: 9163 - refresh_interval: 1m - version: 1 - tls_config: - cert_file: valid_cert_file - key_file: valid_key_file + triton_sd_configs: + - account: testAccount + dns_suffix: triton.example.com + endpoint: triton.example.com + port: 9163 + refresh_interval: 1m + version: 1 + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file diff --git a/config/testdata/scaleway_no_secret.bad.yml b/config/testdata/scaleway_no_secret.bad.yml index 4b30af178..62569bdcc 100644 --- a/config/testdata/scaleway_no_secret.bad.yml +++ b/config/testdata/scaleway_no_secret.bad.yml @@ -1,5 +1,5 @@ scrape_configs: -- scaleway_sd_configs: - - role: instance - project_id: 11111111-1111-1111-1111-111111111112 - access_key: SCWXXXXXXXXXXXXXXXXX + - scaleway_sd_configs: + - role: instance + project_id: 11111111-1111-1111-1111-111111111112 + access_key: SCWXXXXXXXXXXXXXXXXX diff --git a/config/testdata/scaleway_role.bad.yml b/config/testdata/scaleway_role.bad.yml index 83e51d04a..192ca6b2e 100644 --- a/config/testdata/scaleway_role.bad.yml +++ b/config/testdata/scaleway_role.bad.yml @@ -1,7 +1,6 @@ scrape_configs: -- scaleway_sd_configs: - - role: invalid - project_id: 11111111-1111-1111-1111-111111111112 - access_key: SCWXXXXXXXXXXXXXXXXX - secret_key_file: bar - + - scaleway_sd_configs: + - role: invalid + project_id: 11111111-1111-1111-1111-111111111112 + access_key: SCWXXXXXXXXXXXXXXXXX + secret_key_file: bar diff --git a/config/testdata/scaleway_two_secrets.bad.yml b/config/testdata/scaleway_two_secrets.bad.yml index d3344d341..3b38fab69 100644 --- a/config/testdata/scaleway_two_secrets.bad.yml +++ b/config/testdata/scaleway_two_secrets.bad.yml @@ -1,8 +1,7 @@ scrape_configs: -- scaleway_sd_configs: - - role: instance - project_id: 11111111-1111-1111-1111-111111111112 - access_key: SCWXXXXXXXXXXXXXXXXX - secret_key_file: bar - secret_key: 11111111-1111-1111-1111-111111111112 - + - scaleway_sd_configs: + - role: instance + project_id: 11111111-1111-1111-1111-111111111112 + access_key: SCWXXXXXXXXXXXXXXXXX + secret_key_file: bar + secret_key: 11111111-1111-1111-1111-111111111112 diff --git a/config/testdata/scrape_body_size_limit.bad.yml b/config/testdata/scrape_body_size_limit.bad.yml index f463e1c04..20dfa369b 100644 --- a/config/testdata/scrape_body_size_limit.bad.yml +++ b/config/testdata/scrape_body_size_limit.bad.yml @@ -1,3 +1,3 @@ scrape_configs: -- job_name: prometheus - body_size_limit: 100 + - job_name: prometheus + body_size_limit: 100 diff --git a/config/testdata/scrape_interval.bad.yml b/config/testdata/scrape_interval.bad.yml index b334c7775..eb9ca6b4f 100644 --- a/config/testdata/scrape_interval.bad.yml +++ b/config/testdata/scrape_interval.bad.yml @@ -1,4 +1,4 @@ scrape_configs: -- job_name: prometheus - scrape_interval: 5s - scrape_timeout: 6s + - job_name: prometheus + scrape_interval: 5s + scrape_timeout: 6s diff --git a/config/testdata/scrape_interval_larger.good.yml b/config/testdata/scrape_interval_larger.good.yml index c31c795a9..37abba908 100644 --- a/config/testdata/scrape_interval_larger.good.yml +++ b/config/testdata/scrape_interval_larger.good.yml @@ -1,16 +1,16 @@ global: - scrape_interval: 15s - scrape_timeout: 15s + scrape_interval: 15s + scrape_timeout: 15s scrape_configs: -- job_name: prometheus + - job_name: prometheus - scrape_interval: 5s + scrape_interval: 5s - dns_sd_configs: - - refresh_interval: 15s - names: - - first.dns.address.domain.com - - second.dns.address.domain.com - - names: - - first.dns.address.domain.com \ No newline at end of file + dns_sd_configs: + - refresh_interval: 15s + names: + - first.dns.address.domain.com + - second.dns.address.domain.com + - names: + - first.dns.address.domain.com diff --git a/config/testdata/target_label_hashmod_missing.bad.yml b/config/testdata/target_label_hashmod_missing.bad.yml index c919ac707..56a2053c4 100644 --- a/config/testdata/target_label_hashmod_missing.bad.yml +++ b/config/testdata/target_label_hashmod_missing.bad.yml @@ -2,5 +2,5 @@ scrape_configs: - job_name: prometheus relabel_configs: - source_labels: [__address__] - modulus: 8 - action: hashmod + modulus: 8 + action: hashmod diff --git a/config/testdata/unknown_attr.bad.yml b/config/testdata/unknown_attr.bad.yml index 8a53075f6..738c73454 100644 --- a/config/testdata/unknown_attr.bad.yml +++ b/config/testdata/unknown_attr.bad.yml @@ -1,12 +1,12 @@ # my global config global: - scrape_interval: 15s + scrape_interval: 15s evaluation_interval: 30s # scrape_timeout is set to the global default (10s). external_labels: - monitor: codelab - foo: bar + monitor: codelab + foo: bar rule_files: - "first.rules" @@ -17,4 +17,4 @@ scrape_configs: - job_name: prometheus consult_sd_configs: - - server: 'localhost:1234' + - server: "localhost:1234" diff --git a/config/testdata/url_in_targetgroup.bad.yml b/config/testdata/url_in_targetgroup.bad.yml index a57d757db..acb13bee7 100644 --- a/config/testdata/url_in_targetgroup.bad.yml +++ b/config/testdata/url_in_targetgroup.bad.yml @@ -1,5 +1,5 @@ scrape_configs: -- job_name: prometheus - static_configs: - - targets: - - http://bad + - job_name: prometheus + static_configs: + - targets: + - http://bad diff --git a/discovery/file/fixtures/valid3.yml b/discovery/file/fixtures/valid3.yml index c4f521480..7b76c25e4 100644 --- a/discovery/file/fixtures/valid3.yml +++ b/discovery/file/fixtures/valid3.yml @@ -1,7 +1,6 @@ # the YAML structure is identical to valid.yml but the raw data is different. -- targets: ['localhost:9090', 'example.org:443'] +- targets: ["localhost:9090", "example.org:443"] labels: foo: bar -- targets: ['my.domain'] - +- targets: ["my.domain"] diff --git a/documentation/examples/kubernetes-rabbitmq/rc.yml b/documentation/examples/kubernetes-rabbitmq/rc.yml index b3ed2a40a..622f3f9be 100644 --- a/documentation/examples/kubernetes-rabbitmq/rc.yml +++ b/documentation/examples/kubernetes-rabbitmq/rc.yml @@ -13,15 +13,15 @@ spec: app: rabbitmq spec: containers: - - image: rabbitmq:3.5.4-management - name: rabbitmq - ports: - - containerPort: 5672 - name: service - - containerPort: 15672 - name: management - - image: kbudde/rabbitmq-exporter - name: rabbitmq-exporter - ports: - - containerPort: 9090 - name: exporter + - image: rabbitmq:3.5.4-management + name: rabbitmq + ports: + - containerPort: 5672 + name: service + - containerPort: 15672 + name: management + - image: kbudde/rabbitmq-exporter + name: rabbitmq-exporter + ports: + - containerPort: 9090 + name: exporter diff --git a/documentation/examples/kubernetes-rabbitmq/svc.yml b/documentation/examples/kubernetes-rabbitmq/svc.yml index f965a3c89..2f84444ab 100644 --- a/documentation/examples/kubernetes-rabbitmq/svc.yml +++ b/documentation/examples/kubernetes-rabbitmq/svc.yml @@ -6,9 +6,9 @@ metadata: name: rabbitmq spec: ports: - - port: 9090 - name: exporter - targetPort: exporter - protocol: TCP + - port: 9090 + name: exporter + targetPort: exporter + protocol: TCP selector: app: rabbitmq diff --git a/documentation/examples/prometheus-digitalocean.yml b/documentation/examples/prometheus-digitalocean.yml index b1ed4ed73..2dd71ca07 100644 --- a/documentation/examples/prometheus-digitalocean.yml +++ b/documentation/examples/prometheus-digitalocean.yml @@ -2,14 +2,13 @@ # DigitalOcean. scrape_configs: - # Make Prometheus scrape itself for metrics. - - job_name: 'prometheus' + - job_name: "prometheus" static_configs: - - targets: ['localhost:9090'] + - targets: ["localhost:9090"] # Discover Node Exporter instances to scrape. - - job_name: 'node' + - job_name: "node" digitalocean_sd_configs: - authorization: @@ -17,10 +16,10 @@ scrape_configs: relabel_configs: # Only scrape targets that have a tag 'monitoring'. - source_labels: [__meta_digitalocean_tags] - regex: '.*,monitoring,.*' + regex: ".*,monitoring,.*" action: keep # Use the public IPv6 address and port 9100 to scrape the target. - source_labels: [__meta_digitalocean_public_ipv6] target_label: __address__ - replacement: '[$1]:9100' + replacement: "[$1]:9100" diff --git a/documentation/examples/prometheus-docker.yml b/documentation/examples/prometheus-docker.yml index 065b79635..35d1e7e96 100644 --- a/documentation/examples/prometheus-docker.yml +++ b/documentation/examples/prometheus-docker.yml @@ -1,20 +1,19 @@ # A example scrape configuration for running Prometheus with Docker. scrape_configs: - # Make Prometheus scrape itself for metrics. - - job_name: 'prometheus' + - job_name: "prometheus" static_configs: - - targets: ['localhost:9090'] + - targets: ["localhost:9090"] # Create a job for Docker daemon. # # This example requires Docker daemon to be configured to expose # Prometheus metrics, as documented here: # https://docs.docker.com/config/daemon/prometheus/ - - job_name: 'docker' + - job_name: "docker" static_configs: - - targets: ['localhost:9323'] + - targets: ["localhost:9323"] # Create a job for Docker Swarm containers. # @@ -26,7 +25,7 @@ scrape_configs: # --mount type=bind,src=/sys,dst=/sys,ro # --mount type=bind,src=/var/lib/docker,dst=/var/lib/docker,ro # google/cadvisor -docker_only - - job_name: 'docker-containers' + - job_name: "docker-containers" docker_sd_configs: - host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon. relabel_configs: diff --git a/documentation/examples/prometheus-dockerswarm.yml b/documentation/examples/prometheus-dockerswarm.yml index ccbaf2630..2272504d9 100644 --- a/documentation/examples/prometheus-dockerswarm.yml +++ b/documentation/examples/prometheus-dockerswarm.yml @@ -2,18 +2,17 @@ # Docker Swarm. scrape_configs: - # Make Prometheus scrape itself for metrics. - - job_name: 'prometheus' + - job_name: "prometheus" static_configs: - - targets: ['localhost:9090'] + - targets: ["localhost:9090"] # Create a job for Docker daemons. # # This example requires Docker daemons to be configured to expose # Prometheus metrics, as documented here: # https://docs.docker.com/config/daemon/prometheus/ - - job_name: 'docker' + - job_name: "docker" dockerswarm_sd_configs: - host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon. role: nodes @@ -34,7 +33,7 @@ scrape_configs: # --mount type=bind,src=/sys,dst=/sys,ro # --mount type=bind,src=/var/lib/docker,dst=/var/lib/docker,ro # google/cadvisor -docker_only - - job_name: 'dockerswarm' + - job_name: "dockerswarm" dockerswarm_sd_configs: - host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon. role: tasks @@ -51,4 +50,3 @@ scrape_configs: - regex: __meta_dockerswarm_service_label_prometheus_(.+) action: labelmap replacement: $1 - diff --git a/documentation/examples/prometheus-hetzner.yml b/documentation/examples/prometheus-hetzner.yml index 4632f6692..ccc1ffc28 100644 --- a/documentation/examples/prometheus-hetzner.yml +++ b/documentation/examples/prometheus-hetzner.yml @@ -2,14 +2,13 @@ # Hetzner. scrape_configs: - # Make Prometheus scrape itself for metrics. - - job_name: 'prometheus' + - job_name: "prometheus" static_configs: - - targets: ['localhost:9090'] + - targets: ["localhost:9090"] # Discover Node Exporter instances to scrape. - - job_name: 'node' + - job_name: "node" hetzner_sd_configs: - authorization: @@ -19,10 +18,10 @@ scrape_configs: # Use the public IPv4 and port 9100 to scrape the target. - source_labels: [__meta_hetzner_public_ipv4] target_label: __address__ - replacement: '$1:9100' + replacement: "$1:9100" # Discover Node Exporter instances to scrape using a Hetzner Cloud Network called mynet. - - job_name: 'node_private' + - job_name: "node_private" hetzner_sd_configs: - authorization: @@ -32,10 +31,10 @@ scrape_configs: # Use the private IPv4 within the Hetzner Cloud Network and port 9100 to scrape the target. - source_labels: [__meta_hetzner_hcloud_private_ipv4_mynet] target_label: __address__ - replacement: '$1:9100' + replacement: "$1:9100" # Discover Node Exporter instances to scrape. - - job_name: 'node_robot' + - job_name: "node_robot" hetzner_sd_configs: - basic_auth: @@ -46,4 +45,4 @@ scrape_configs: # Use the public IPv4 and port 9100 to scrape the target. - source_labels: [__meta_hetzner_public_ipv4] target_label: __address__ - replacement: '$1:9100' + replacement: "$1:9100" diff --git a/documentation/examples/prometheus-kubernetes.yml b/documentation/examples/prometheus-kubernetes.yml index 9f5fe0528..e2de9cdc0 100644 --- a/documentation/examples/prometheus-kubernetes.yml +++ b/documentation/examples/prometheus-kubernetes.yml @@ -16,275 +16,285 @@ # default named port `https`. This works for single API server deployments as # well as HA API server deployments. scrape_configs: -- job_name: 'kubernetes-apiservers' + - job_name: "kubernetes-apiservers" - kubernetes_sd_configs: - - role: endpoints + kubernetes_sd_configs: + - role: endpoints - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https - # This TLS & authorization config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - # insecure_skip_verify: true - authorization: - credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # This TLS & authorization config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + # insecure_skip_verify: true + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token - # Keep only the default/kubernetes service endpoints for the https port. This - # will add targets for each API server which Kubernetes adds an endpoint to - # the default/kubernetes service. - relabel_configs: - - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] - action: keep - regex: default;kubernetes;https + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: + [ + __meta_kubernetes_namespace, + __meta_kubernetes_service_name, + __meta_kubernetes_endpoint_port_name, + ] + action: keep + regex: default;kubernetes;https -# Scrape config for nodes (kubelet). -# -# Rather than connecting directly to the node, the scrape is proxied though the -# Kubernetes apiserver. This means it will work if Prometheus is running out of -# cluster, or can't connect to nodes for some other reason (e.g. because of -# firewalling). -- job_name: 'kubernetes-nodes' - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & authorization config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - # insecure_skip_verify: true - authorization: - credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - -# Scrape config for Kubelet cAdvisor. -# -# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics -# (those whose names begin with 'container_') have been removed from the -# Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to -# retrieve those metrics. -# -# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor -# HTTP endpoint; use the "/metrics" endpoint on the 4194 port of nodes. In -# that case (and ensure cAdvisor's HTTP server hasn't been disabled with the -# --cadvisor-port=0 Kubelet flag). -# -# This job is not necessary and should be removed in Kubernetes 1.6 and -# earlier versions, or it will cause the metrics to be scraped twice. -- job_name: 'kubernetes-cadvisor' - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # Starting Kubernetes 1.7.3 the cAdvisor metrics are under /metrics/cadvisor. - # Kubernetes CIS Benchmark recommends against enabling the insecure HTTP - # servers of Kubernetes, therefore the cAdvisor metrics on the secure handler - # are used. - metrics_path: /metrics/cadvisor - - # This TLS & authorization config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - # insecure_skip_verify: true - authorization: - credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - -# Example scrape config for service endpoints. -# -# The relabeling allows the actual service scrape endpoint to be configured -# for all or only some endpoints. -- job_name: 'kubernetes-service-endpoints' - - kubernetes_sd_configs: - - role: endpoints - - relabel_configs: - # Example relabel to scrape only endpoints that have - # "example.io/should_be_scraped = true" annotation. - # - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_scraped] - # action: keep - # regex: true + # Scrape config for nodes (kubelet). # - # Example relabel to customize metric path based on endpoints - # "example.io/metric_path = " annotation. - # - source_labels: [__meta_kubernetes_service_annotation_example_io_metric_path] - # action: replace - # target_label: __metrics_path__ - # regex: (.+) + # Rather than connecting directly to the node, the scrape is proxied though the + # Kubernetes apiserver. This means it will work if Prometheus is running out of + # cluster, or can't connect to nodes for some other reason (e.g. because of + # firewalling). + - job_name: "kubernetes-nodes" + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & authorization config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + # insecure_skip_verify: true + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + + # Scrape config for Kubelet cAdvisor. # - # Example relabel to scrape only single, desired port for the service based - # on endpoints "example.io/scrape_port = " annotation. - # - source_labels: [__address__, __meta_kubernetes_service_annotation_example_io_scrape_port] - # action: replace - # regex: ([^:]+)(?::\d+)?;(\d+) - # replacement: $1:$2 - # target_label: __address__ + # This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics + # (those whose names begin with 'container_') have been removed from the + # Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to + # retrieve those metrics. # - # Example relabel to configure scrape scheme for all service scrape targets - # based on endpoints "example.io/scrape_scheme = " annotation. - # - source_labels: [__meta_kubernetes_service_annotation_example_io_scrape_scheme] - # action: replace - # target_label: __scheme__ - # regex: (https?) - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: kubernetes_name - -# Example scrape config for probing services via the Blackbox Exporter. -# -# The relabeling allows the actual service scrape endpoint to be configured -# for all or only some services. -- job_name: 'kubernetes-services' - - metrics_path: /probe - params: - module: [http_2xx] - - kubernetes_sd_configs: - - role: service - - relabel_configs: - # Example relabel to probe only some services that have "example.io/should_be_probed = true" annotation - # - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_probed] - # action: keep - # regex: true - - source_labels: [__address__] - target_label: __param_target - - target_label: __address__ - replacement: blackbox-exporter.example.com:9115 - - source_labels: [__param_target] - target_label: instance - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - target_label: kubernetes_name - -# Example scrape config for probing ingresses via the Blackbox Exporter. -# -# The relabeling allows the actual ingress scrape endpoint to be configured -# for all or only some services. -- job_name: 'kubernetes-ingresses' - - metrics_path: /probe - params: - module: [http_2xx] - - kubernetes_sd_configs: - - role: ingress - - relabel_configs: - # Example relabel to probe only some ingresses that have "example.io/should_be_probed = true" annotation - # - source_labels: [__meta_kubernetes_ingress_annotation_example_io_should_be_probed] - # action: keep - # regex: true - - source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path] - regex: (.+);(.+);(.+) - replacement: ${1}://${2}${3} - target_label: __param_target - - target_label: __address__ - replacement: blackbox-exporter.example.com:9115 - - source_labels: [__param_target] - target_label: instance - - action: labelmap - regex: __meta_kubernetes_ingress_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_ingress_name] - target_label: kubernetes_name - -# Example scrape config for pods -# -# The relabeling allows the actual pod scrape to be configured -# for all the declared ports (or port-free target if none is declared) -# or only some ports. -- job_name: 'kubernetes-pods' - - kubernetes_sd_configs: - - role: pod - - relabel_configs: - # Example relabel to scrape only pods that have - # "example.io/should_be_scraped = true" annotation. - # - source_labels: [__meta_kubernetes_pod_annotation_example_io_should_be_scraped] - # action: keep - # regex: true + # In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor + # HTTP endpoint; use the "/metrics" endpoint on the 4194 port of nodes. In + # that case (and ensure cAdvisor's HTTP server hasn't been disabled with the + # --cadvisor-port=0 Kubelet flag). # - # Example relabel to customize metric path based on pod - # "example.io/metric_path = " annotation. - # - source_labels: [__meta_kubernetes_pod_annotation_example_io_metric_path] - # action: replace - # target_label: __metrics_path__ - # regex: (.+) + # This job is not necessary and should be removed in Kubernetes 1.6 and + # earlier versions, or it will cause the metrics to be scraped twice. + - job_name: "kubernetes-cadvisor" + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # Starting Kubernetes 1.7.3 the cAdvisor metrics are under /metrics/cadvisor. + # Kubernetes CIS Benchmark recommends against enabling the insecure HTTP + # servers of Kubernetes, therefore the cAdvisor metrics on the secure handler + # are used. + metrics_path: /metrics/cadvisor + + # This TLS & authorization config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + # insecure_skip_verify: true + authorization: + credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + + # Example scrape config for service endpoints. # - # Example relabel to scrape only single, desired port for the pod - # based on pod "example.io/scrape_port = " annotation. - # - source_labels: [__address__, __meta_kubernetes_pod_annotation_example_io_scrape_port] - # action: replace - # regex: ([^:]+)(?::\d+)?;(\d+) - # replacement: $1:$2 - # target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: kubernetes_pod_name + # The relabeling allows the actual service scrape endpoint to be configured + # for all or only some endpoints. + - job_name: "kubernetes-service-endpoints" + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + # Example relabel to scrape only endpoints that have + # "example.io/should_be_scraped = true" annotation. + # - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_scraped] + # action: keep + # regex: true + # + # Example relabel to customize metric path based on endpoints + # "example.io/metric_path = " annotation. + # - source_labels: [__meta_kubernetes_service_annotation_example_io_metric_path] + # action: replace + # target_label: __metrics_path__ + # regex: (.+) + # + # Example relabel to scrape only single, desired port for the service based + # on endpoints "example.io/scrape_port = " annotation. + # - source_labels: [__address__, __meta_kubernetes_service_annotation_example_io_scrape_port] + # action: replace + # regex: ([^:]+)(?::\d+)?;(\d+) + # replacement: $1:$2 + # target_label: __address__ + # + # Example relabel to configure scrape scheme for all service scrape targets + # based on endpoints "example.io/scrape_scheme = " annotation. + # - source_labels: [__meta_kubernetes_service_annotation_example_io_scrape_scheme] + # action: replace + # target_label: __scheme__ + # regex: (https?) + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # for all or only some services. + - job_name: "kubernetes-services" + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + # Example relabel to probe only some services that have "example.io/should_be_probed = true" annotation + # - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_probed] + # action: keep + # regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox-exporter.example.com:9115 + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + + # Example scrape config for probing ingresses via the Blackbox Exporter. + # + # The relabeling allows the actual ingress scrape endpoint to be configured + # for all or only some services. + - job_name: "kubernetes-ingresses" + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: ingress + + relabel_configs: + # Example relabel to probe only some ingresses that have "example.io/should_be_probed = true" annotation + # - source_labels: [__meta_kubernetes_ingress_annotation_example_io_should_be_probed] + # action: keep + # regex: true + - source_labels: + [ + __meta_kubernetes_ingress_scheme, + __address__, + __meta_kubernetes_ingress_path, + ] + regex: (.+);(.+);(.+) + replacement: ${1}://${2}${3} + target_label: __param_target + - target_label: __address__ + replacement: blackbox-exporter.example.com:9115 + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_ingress_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_ingress_name] + target_label: kubernetes_name + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape to be configured + # for all the declared ports (or port-free target if none is declared) + # or only some ports. + - job_name: "kubernetes-pods" + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + # Example relabel to scrape only pods that have + # "example.io/should_be_scraped = true" annotation. + # - source_labels: [__meta_kubernetes_pod_annotation_example_io_should_be_scraped] + # action: keep + # regex: true + # + # Example relabel to customize metric path based on pod + # "example.io/metric_path = " annotation. + # - source_labels: [__meta_kubernetes_pod_annotation_example_io_metric_path] + # action: replace + # target_label: __metrics_path__ + # regex: (.+) + # + # Example relabel to scrape only single, desired port for the pod + # based on pod "example.io/scrape_port = " annotation. + # - source_labels: [__address__, __meta_kubernetes_pod_annotation_example_io_scrape_port] + # action: replace + # regex: ([^:]+)(?::\d+)?;(\d+) + # replacement: $1:$2 + # target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name diff --git a/documentation/examples/prometheus-linode.yml b/documentation/examples/prometheus-linode.yml index 8539531ae..315675071 100644 --- a/documentation/examples/prometheus-linode.yml +++ b/documentation/examples/prometheus-linode.yml @@ -3,22 +3,22 @@ scrape_configs: # Make Prometheus scrape itself for metrics. - - job_name: 'prometheus' + - job_name: "prometheus" static_configs: - - targets: ['localhost:9090'] + - targets: ["localhost:9090"] # Discover Node Exporter instances to scrape. - - job_name: 'node' + - job_name: "node" linode_sd_configs: - authorization: credentials: "" relabel_configs: # Only scrape targets that have a tag 'monitoring'. - source_labels: [__meta_linode_tags] - regex: '.*,monitoring,.*' + regex: ".*,monitoring,.*" action: keep # Use the public IPv6 address and port 9100 to scrape the target. - source_labels: [__meta_linode_public_ipv6] target_label: __address__ - replacement: '[$1]:9100' + replacement: "[$1]:9100" diff --git a/documentation/examples/prometheus-marathon.yml b/documentation/examples/prometheus-marathon.yml index 417a05358..96b5f8b7f 100644 --- a/documentation/examples/prometheus-marathon.yml +++ b/documentation/examples/prometheus-marathon.yml @@ -2,23 +2,21 @@ # (or DC/OS) cluster. scrape_configs: - # Make Prometheus scrape itself for metrics. - - job_name: 'prometheus' + - job_name: "prometheus" static_configs: - - targets: ['localhost:9090'] + - targets: ["localhost:9090"] # Discover Marathon services to scrape. - - job_name: 'marathon' + - job_name: "marathon" # Scrape Marathon itself to discover new services every minute. marathon_sd_configs: - servers: - - http://marathon.mesos:8080 + - http://marathon.mesos:8080 refresh_interval: 60s relabel_configs: - # Only scrape targets that have a port label called 'metrics' specified on a port # in their app definitions. Example using a port mapping (container or bridge networking): # @@ -45,7 +43,11 @@ scrape_configs: # ] # Match a slash-prefixed string either in a portMapping or a portDefinition label. - - source_labels: [__meta_marathon_port_mapping_label_metrics,__meta_marathon_port_definition_label_metrics] + - source_labels: + [ + __meta_marathon_port_mapping_label_metrics, + __meta_marathon_port_definition_label_metrics, + ] regex: (\/.+;|;\/.+) action: keep @@ -53,7 +55,7 @@ scrape_configs: - source_labels: [__meta_marathon_port_mapping_label_metrics] regex: (\/.+) target_label: __metrics_path__ - + # If a portDefinition 'metrics' label is set, use the label value as the URI to scrape. - source_labels: [__meta_marathon_port_definition_label_metrics] regex: (\/.+) diff --git a/documentation/examples/prometheus.yml b/documentation/examples/prometheus.yml index af33d8704..312b578aa 100644 --- a/documentation/examples/prometheus.yml +++ b/documentation/examples/prometheus.yml @@ -1,15 +1,15 @@ # my global config global: - scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s). # Alertmanager configuration alerting: alertmanagers: - - static_configs: - - targets: - # - alertmanager:9093 + - static_configs: + - targets: + # - alertmanager:9093 # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. rule_files: @@ -20,10 +20,10 @@ rule_files: # Here it's Prometheus itself. scrape_configs: # The job name is added as a label `job=` to any timeseries scraped from this config. - - job_name: 'prometheus' + - job_name: "prometheus" # metrics_path defaults to '/metrics' # scheme defaults to 'http'. static_configs: - - targets: ['localhost:9090'] + - targets: ["localhost:9090"] diff --git a/documentation/examples/rbac-setup.yml b/documentation/examples/rbac-setup.yml index e836f4526..350d3eda2 100644 --- a/documentation/examples/rbac-setup.yml +++ b/documentation/examples/rbac-setup.yml @@ -10,22 +10,22 @@ kind: ClusterRole metadata: name: prometheus rules: -- apiGroups: [""] - resources: - - nodes - - nodes/metrics - - services - - endpoints - - pods - verbs: ["get", "list", "watch"] -- apiGroups: - - extensions - - networking.k8s.io - resources: - - ingresses - verbs: ["get", "list", "watch"] -- nonResourceURLs: ["/metrics", "/metrics/cadvisor"] - verbs: ["get"] + - apiGroups: [""] + resources: + - nodes + - nodes/metrics + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] + - apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + verbs: ["get", "list", "watch"] + - nonResourceURLs: ["/metrics", "/metrics/cadvisor"] + verbs: ["get"] --- apiVersion: v1 kind: ServiceAccount @@ -42,6 +42,6 @@ roleRef: kind: ClusterRole name: prometheus subjects: -- kind: ServiceAccount - name: prometheus - namespace: default + - kind: ServiceAccount + name: prometheus + namespace: default diff --git a/pkg/rulefmt/testdata/bad_annotation.bad.yaml b/pkg/rulefmt/testdata/bad_annotation.bad.yaml index b59c41a63..a747827a9 100644 --- a/pkg/rulefmt/testdata/bad_annotation.bad.yaml +++ b/pkg/rulefmt/testdata/bad_annotation.bad.yaml @@ -1,7 +1,7 @@ groups: - name: yolo rules: - - alert: hola - expr: 1 - annotations: - ins-tance: localhost + - alert: hola + expr: 1 + annotations: + ins-tance: localhost diff --git a/pkg/rulefmt/testdata/bad_expr.bad.yaml b/pkg/rulefmt/testdata/bad_expr.bad.yaml index f9a029ccf..9df14e85e 100644 --- a/pkg/rulefmt/testdata/bad_expr.bad.yaml +++ b/pkg/rulefmt/testdata/bad_expr.bad.yaml @@ -1,5 +1,5 @@ groups: -- name: yolo - rules: - - record: yolo - expr: rate(hi) + - name: yolo + rules: + - record: yolo + expr: rate(hi) diff --git a/pkg/rulefmt/testdata/bad_field.bad.yaml b/pkg/rulefmt/testdata/bad_field.bad.yaml index b93f71c12..d85eab1e5 100644 --- a/pkg/rulefmt/testdata/bad_field.bad.yaml +++ b/pkg/rulefmt/testdata/bad_field.bad.yaml @@ -7,4 +7,3 @@ groups: instance: localhost annotation: summary: annonations is written without s above - diff --git a/pkg/rulefmt/testdata/duplicate_grp.bad.yaml b/pkg/rulefmt/testdata/duplicate_grp.bad.yaml index 97d453429..f4fa16238 100644 --- a/pkg/rulefmt/testdata/duplicate_grp.bad.yaml +++ b/pkg/rulefmt/testdata/duplicate_grp.bad.yaml @@ -1,3 +1,3 @@ groups: -- name: yolo -- name: yolo + - name: yolo + - name: yolo diff --git a/pkg/rulefmt/testdata/invalid_label_name.bad.yaml b/pkg/rulefmt/testdata/invalid_label_name.bad.yaml index 72f69faa9..3b1de358f 100644 --- a/pkg/rulefmt/testdata/invalid_label_name.bad.yaml +++ b/pkg/rulefmt/testdata/invalid_label_name.bad.yaml @@ -1,7 +1,7 @@ groups: -- name: yolo - rules: - - record: hola - expr: 1 - labels: - __name__: anything + - name: yolo + rules: + - record: hola + expr: 1 + labels: + __name__: anything diff --git a/pkg/rulefmt/testdata/invalid_record_name.bad.yaml b/pkg/rulefmt/testdata/invalid_record_name.bad.yaml index bda5f4970..1f1da07ea 100644 --- a/pkg/rulefmt/testdata/invalid_record_name.bad.yaml +++ b/pkg/rulefmt/testdata/invalid_record_name.bad.yaml @@ -1,5 +1,5 @@ groups: - name: yolo rules: - - record: strawberry{flavor="sweet"} - expr: 1 \ No newline at end of file + - record: strawberry{flavor="sweet"} + expr: 1 diff --git a/pkg/rulefmt/testdata/noexpr.bad.yaml b/pkg/rulefmt/testdata/noexpr.bad.yaml index ad0c29e4c..9865e48d0 100644 --- a/pkg/rulefmt/testdata/noexpr.bad.yaml +++ b/pkg/rulefmt/testdata/noexpr.bad.yaml @@ -1,4 +1,4 @@ groups: - name: yolo rules: - - record: ylo + - record: ylo diff --git a/pkg/rulefmt/testdata/record_and_alert.bad.yaml b/pkg/rulefmt/testdata/record_and_alert.bad.yaml index 0ba81b742..3cd3dce37 100644 --- a/pkg/rulefmt/testdata/record_and_alert.bad.yaml +++ b/pkg/rulefmt/testdata/record_and_alert.bad.yaml @@ -1,6 +1,6 @@ groups: -- name: yolo - rules: - - record: Hi - alert: Hello - expr: 1 + - name: yolo + rules: + - record: Hi + alert: Hello + expr: 1 diff --git a/pkg/rulefmt/testdata/test.yaml b/pkg/rulefmt/testdata/test.yaml index a3127426d..6810b2cbd 100644 --- a/pkg/rulefmt/testdata/test.yaml +++ b/pkg/rulefmt/testdata/test.yaml @@ -1,64 +1,64 @@ groups: -- name: my-group-name - interval: 30s # defaults to global interval - rules: - - alert: HighErrors - expr: | - sum without(instance) (rate(errors_total[5m])) - / - sum without(instance) (rate(requests_total[5m])) - for: 5m - labels: - severity: critical - annotations: - description: "stuff's happening with {{ $.labels.service }}" + - name: my-group-name + interval: 30s # defaults to global interval + rules: + - alert: HighErrors + expr: | + sum without(instance) (rate(errors_total[5m])) + / + sum without(instance) (rate(requests_total[5m])) + for: 5m + labels: + severity: critical + annotations: + description: "stuff's happening with {{ $.labels.service }}" - # Mix recording rules in the same list - - record: "new_metric" - expr: | - sum without(instance) (rate(errors_total[5m])) - / - sum without(instance) (rate(requests_total[5m])) - labels: - abc: edf - uvw: xyz + # Mix recording rules in the same list + - record: "new_metric" + expr: | + sum without(instance) (rate(errors_total[5m])) + / + sum without(instance) (rate(requests_total[5m])) + labels: + abc: edf + uvw: xyz - - alert: HighErrors - expr: | - sum without(instance) (rate(errors_total[5m])) - / - sum without(instance) (rate(requests_total[5m])) - for: 5m - labels: - severity: critical - annotations: - description: "stuff's happening with {{ $.labels.service }}" - -- name: my-another-name - interval: 30s # defaults to global interval - rules: - - alert: HighErrors - expr: | - sum without(instance) (rate(errors_total[5m])) - / - sum without(instance) (rate(requests_total[5m])) - for: 5m - labels: - severity: critical + - alert: HighErrors + expr: | + sum without(instance) (rate(errors_total[5m])) + / + sum without(instance) (rate(requests_total[5m])) + for: 5m + labels: + severity: critical + annotations: + description: "stuff's happening with {{ $.labels.service }}" - - record: "new_metric" - expr: | - sum without(instance) (rate(errors_total[5m])) - / - sum without(instance) (rate(requests_total[5m])) + - name: my-another-name + interval: 30s # defaults to global interval + rules: + - alert: HighErrors + expr: | + sum without(instance) (rate(errors_total[5m])) + / + sum without(instance) (rate(requests_total[5m])) + for: 5m + labels: + severity: critical - - alert: HighErrors - expr: | - sum without(instance) (rate(errors_total[5m])) - / - sum without(instance) (rate(requests_total[5m])) - for: 5m - labels: - severity: critical - annotations: - description: "stuff's happening with {{ $.labels.service }}" + - record: "new_metric" + expr: | + sum without(instance) (rate(errors_total[5m])) + / + sum without(instance) (rate(requests_total[5m])) + + - alert: HighErrors + expr: | + sum without(instance) (rate(errors_total[5m])) + / + sum without(instance) (rate(requests_total[5m])) + for: 5m + labels: + severity: critical + annotations: + description: "stuff's happening with {{ $.labels.service }}" diff --git a/rules/fixtures/rules.yaml b/rules/fixtures/rules.yaml index bd4cb228a..9e21c1ea2 100644 --- a/rules/fixtures/rules.yaml +++ b/rules/fixtures/rules.yaml @@ -1,5 +1,5 @@ groups: - name: test rules: - - record: job:http_requests:rate5m - expr: sum by (job)(rate(http_requests_total[5m])) + - record: job:http_requests:rate5m + expr: sum by (job)(rate(http_requests_total[5m])) diff --git a/rules/fixtures/rules2.yaml b/rules/fixtures/rules2.yaml index e405138f8..822d5ebc2 100644 --- a/rules/fixtures/rules2.yaml +++ b/rules/fixtures/rules2.yaml @@ -1,5 +1,5 @@ groups: - name: test_2 rules: - - record: test_2 - expr: vector(2) + - record: test_2 + expr: vector(2) diff --git a/rules/fixtures/rules2_copy.yaml b/rules/fixtures/rules2_copy.yaml index dd74b6511..4f1e95796 100644 --- a/rules/fixtures/rules2_copy.yaml +++ b/rules/fixtures/rules2_copy.yaml @@ -1,5 +1,5 @@ groups: - name: test_2 copy rules: - - record: test_2 - expr: vector(2) + - record: test_2 + expr: vector(2)