Merge pull request #8802 from mwasilew2/yaml-linting

Adds yamllinting to Makefile.common
pull/8990/head
Ben Kochie 2021-06-24 15:59:35 +02:00 committed by GitHub
commit 7cb55d5732
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
105 changed files with 1386 additions and 1365 deletions

View File

@ -28,6 +28,8 @@ jobs:
keys:
- v3-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
- v3-npm-deps-
- run:
command: sudo apt-get install -y yamllint
- run:
command: make
environment:
@ -67,8 +69,7 @@ jobs:
command: |
choco upgrade -y golang
- run:
command:
refreshenv
command: refreshenv
- run:
command: |
$env:GOARCH=""; $env:GOOS=""; cd web/ui; go generate
@ -99,6 +100,8 @@ jobs:
- run:
command: jb install
working_directory: ~/project/documentation/prometheus-mixin
- run:
command: sudo apt-get install -y yamllint
- run:
command: make
working_directory: ~/project/documentation/prometheus-mixin

View File

@ -13,12 +13,12 @@ name: "CodeQL"
on:
push:
branches: [ main, release-* ]
branches: [main, release-*]
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
branches: [main]
schedule:
- cron: '26 14 * * 1'
- cron: "26 14 * * 1"
jobs:
analyze:
@ -28,7 +28,7 @@ jobs:
strategy:
fail-fast: false
matrix:
language: [ 'go', 'javascript' ]
language: ["go", "javascript"]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed

View File

@ -32,13 +32,13 @@ jobs:
- name: Prepare nodepool
uses: docker://prominfra/funcbench:master
with:
entrypoint: 'docker_entrypoint'
entrypoint: "docker_entrypoint"
args: make deploy
- name: Delete all resources
if: always()
uses: docker://prominfra/funcbench:master
with:
entrypoint: 'docker_entrypoint'
entrypoint: "docker_entrypoint"
args: make clean
- name: Update status to failure
if: failure()

View File

@ -2,9 +2,9 @@ name: CIFuzz
on:
pull_request:
paths:
- 'go.sum'
- 'go.mod'
- '**.go'
- "go.sum"
- "go.mod"
- "**.go"
jobs:
Fuzzing:
runs-on: ubuntu-latest
@ -13,12 +13,12 @@ jobs:
id: build
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master
with:
oss-fuzz-project-name: 'prometheus'
oss-fuzz-project-name: "prometheus"
dry-run: false
- name: Run Fuzzers
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
with:
oss-fuzz-project-name: 'prometheus'
oss-fuzz-project-name: "prometheus"
fuzz-seconds: 600
dry-run: false
- name: Upload Crash

View File

@ -1,6 +1,6 @@
on:
repository_dispatch:
types: [prombench_start,prombench_restart,prombench_stop]
types: [prombench_start, prombench_restart, prombench_stop]
name: Prombench Workflow
env:
AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }}

View File

@ -1,3 +1,4 @@
---
tasks:
- init:
make build

26
.yamllint Normal file
View File

@ -0,0 +1,26 @@
---
extends: default
rules:
braces:
max-spaces-inside: 1
level: error
brackets:
max-spaces-inside: 1
level: error
commas: disable
comments: disable
comments-indentation: disable
document-start: disable
indentation:
spaces: consistent
key-duplicates:
ignore: |
config/testdata/section_key_dup.bad.yml
line-length: disable
truthy:
ignore: |
.github/workflows/codeql-analysis.yml
.github/workflows/funcbench.yml
.github/workflows/fuzzing.yml
.github/workflows/prombench.yml

View File

@ -26,7 +26,7 @@ TSDB_BENCHMARK_NUM_METRICS ?= 1000
TSDB_BENCHMARK_DATASET ?= ./tsdb/testdata/20kseries.json
TSDB_BENCHMARK_OUTPUT_DIR ?= ./benchout
GOLANGCI_LINT_OPTS ?= --timeout 2m
GOLANGCI_LINT_OPTS ?= --timeout 4m
include Makefile.common

View File

@ -118,7 +118,7 @@ endif
%: common-% ;
.PHONY: common-all
common-all: precheck style check_license lint unused build test
common-all: precheck style check_license lint yamllint unused build test
.PHONY: common-style
common-style:
@ -198,6 +198,11 @@ else
endif
endif
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
yamllint .
# For backward-compatibility.
.PHONY: common-staticcheck
common-staticcheck: lint

View File

@ -8,13 +8,13 @@ tests:
- interval: 1m
input_series:
- series: test_full
values: '0 0'
values: "0 0"
- series: test_stale
values: '0 stale'
values: "0 stale"
- series: test_missing
values: '0 _ _ _ _ _ _ 0'
values: "0 _ _ _ _ _ _ 0"
promql_expr_test:
# Ensure the sample is evaluated at the time we expect it to be.
@ -36,7 +36,7 @@ tests:
eval_time: 59s
exp_samples:
- value: 0
labels: 'test_stale'
labels: "test_stale"
- expr: test_stale
eval_time: 1m
exp_samples: []
@ -120,7 +120,7 @@ tests:
- series: 'test{job="test", instance="x:0"}'
# 2 minutes + 1 second of input data, recording rules should only run
# once a minute.
values: '0+1x120'
values: "0+1x120"
promql_expr_test:
- expr: job:test:count_over_time1m

View File

@ -1,4 +1,4 @@
scrape_configs:
- azure_sd_configs:
- azure_sd_configs:
- authentication_method: invalid
subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11

View File

@ -3,4 +3,3 @@ scrape_configs:
bearer_token: 1234
bearer_token_file: somefile

View File

@ -5,4 +5,3 @@ scrape_configs:
basic_auth:
username: user
password: password

View File

@ -9,8 +9,8 @@ global:
foo: bar
rule_files:
- "first.rules"
- "my/*.rules"
- "first.rules"
- "my/*.rules"
remote_write:
- url: http://remote1/push
@ -46,7 +46,7 @@ remote_read:
key_file: valid_key_file
scrape_configs:
- job_name: prometheus
- job_name: prometheus
honor_labels: true
# scrape_interval is defined by the configured global (15s).
@ -65,7 +65,7 @@ scrape_configs:
- bar/*.yaml
static_configs:
- targets: ['localhost:9090', 'localhost:9191']
- targets: ["localhost:9090", "localhost:9191"]
labels:
my: label
your: label
@ -87,8 +87,7 @@ scrape_configs:
authorization:
credentials_file: valid_token_file
- job_name: service-x
- job_name: service-x
basic_auth:
username: admin_name
@ -135,12 +134,12 @@ scrape_configs:
regex: expensive_metric.*
action: drop
- job_name: service-y
- job_name: service-y
consul_sd_configs:
- server: 'localhost:1234'
- server: "localhost:1234"
token: mysecret
services: ['nginx', 'cache', 'mysql']
services: ["nginx", "cache", "mysql"]
tags: ["canary", "v1"]
node_meta:
rack: "123"
@ -154,12 +153,12 @@ scrape_configs:
relabel_configs:
- source_labels: [__meta_sd_consul_tags]
separator: ','
separator: ","
regex: label:([^=]+)=([^,]+)
target_label: ${1}
replacement: ${2}
- job_name: service-z
- job_name: service-z
tls_config:
cert_file: valid_cert_file
@ -168,44 +167,43 @@ scrape_configs:
authorization:
credentials: mysecret
- job_name: service-kubernetes
- job_name: service-kubernetes
kubernetes_sd_configs:
- role: endpoints
api_server: 'https://localhost:1234'
api_server: "https://localhost:1234"
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
basic_auth:
username: 'myusername'
password: 'mysecret'
username: "myusername"
password: "mysecret"
- job_name: service-kubernetes-namespaces
- job_name: service-kubernetes-namespaces
kubernetes_sd_configs:
- role: endpoints
api_server: 'https://localhost:1234'
api_server: "https://localhost:1234"
namespaces:
names:
- default
basic_auth:
username: 'myusername'
username: "myusername"
password_file: valid_password_file
- job_name: service-marathon
- job_name: service-marathon
marathon_sd_configs:
- servers:
- 'https://marathon.example.com:443'
- "https://marathon.example.com:443"
auth_token: "mysecret"
tls_config:
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: service-ec2
- job_name: service-ec2
ec2_sd_configs:
- region: us-east-1
access_key: access
@ -221,14 +219,14 @@ scrape_configs:
- web
- db
- job_name: service-lightsail
- job_name: service-lightsail
lightsail_sd_configs:
- region: us-east-1
access_key: access
secret_key: mysecret
profile: profile
- job_name: service-azure
- job_name: service-azure
azure_sd_configs:
- environment: AzurePublicCloud
authentication_method: OAuth
@ -238,41 +236,41 @@ scrape_configs:
client_secret: mysecret
port: 9100
- job_name: service-nerve
- job_name: service-nerve
nerve_sd_configs:
- servers:
- localhost
paths:
- /monitoring
- job_name: 0123service-xxx
- job_name: 0123service-xxx
metrics_path: /metrics
static_configs:
- targets:
- localhost:9090
- job_name: badfederation
- job_name: badfederation
honor_timestamps: false
metrics_path: /federate
static_configs:
- targets:
- localhost:9090
- job_name: 測試
- job_name: 測試
metrics_path: /metrics
static_configs:
- targets:
- localhost:9090
- job_name: httpsd
- job_name: httpsd
http_sd_configs:
- url: 'http://example.com/prometheus'
- url: "http://example.com/prometheus"
- job_name: service-triton
- job_name: service-triton
triton_sd_configs:
- account: 'testAccount'
dns_suffix: 'triton.example.com'
endpoint: 'triton.example.com'
- account: "testAccount"
dns_suffix: "triton.example.com"
endpoint: "triton.example.com"
port: 9163
refresh_interval: 1m
version: 1
@ -280,21 +278,21 @@ scrape_configs:
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: digitalocean-droplets
- job_name: digitalocean-droplets
digitalocean_sd_configs:
- authorization:
credentials: abcdef
- job_name: docker
- job_name: docker
docker_sd_configs:
- host: unix:///var/run/docker.sock
- job_name: dockerswarm
- job_name: dockerswarm
dockerswarm_sd_configs:
- host: http://127.0.0.1:2375
role: nodes
- job_name: service-openstack
- job_name: service-openstack
openstack_sd_configs:
- role: instance
region: RegionOne
@ -305,7 +303,7 @@ scrape_configs:
cert_file: valid_cert_file
key_file: valid_key_file
- job_name: hetzner
- job_name: hetzner
hetzner_sd_configs:
- role: hcloud
authorization:
@ -315,11 +313,11 @@ scrape_configs:
username: abcdef
password: abcdef
- job_name: service-eureka
- job_name: service-eureka
eureka_sd_configs:
- server: 'http://eureka.example.com:8761/eureka'
- server: "http://eureka.example.com:8761/eureka"
- job_name: scaleway
- job_name: scaleway
scaleway_sd_configs:
- role: instance
project_id: 11111111-1111-1111-1111-111111111112
@ -330,7 +328,7 @@ scrape_configs:
access_key: SCWXXXXXXXXXXXXXXXXX
secret_key: 11111111-1111-1111-1111-111111111111
- job_name: linode-instances
- job_name: linode-instances
linode_sd_configs:
- authorization:
credentials: abcdef

View File

@ -2,8 +2,7 @@ scrape_configs:
- job_name: prometheus
ec2_sd_configs:
- region: 'us-east-1'
- region: "us-east-1"
filters:
- name: 'tag:environment'
- name: "tag:environment"
values:

View File

@ -1,4 +1,4 @@
scrape_configs:
- job_name: "test"
- job_name: "test"
metric_relabel_configs:
-

View File

@ -1,2 +1,2 @@
remote_read:
-
-

View File

@ -1,2 +1,2 @@
remote_write:
-
-

View File

@ -1,2 +1,2 @@
scrape_configs:
-
-

View File

@ -1,4 +1,4 @@
scrape_configs:
- job_name: "test"
- job_name: "test"
static_configs:
-

View File

@ -1,4 +1,4 @@
scrape_configs:
- job_name: "test"
- job_name: "test"
relabel_configs:
-

View File

@ -1,5 +1,4 @@
scrape_configs:
- job_name: eureka
- job_name: eureka
eureka_sd_configs:
- server: eureka.com

View File

@ -1,5 +1,4 @@
scrape_configs:
- job_name: eureka
- job_name: eureka
eureka_sd_configs:
- server:

View File

@ -1,4 +1,3 @@
scrape_configs:
- hetzner_sd_configs:
- hetzner_sd_configs:
- role: invalid

View File

@ -1,3 +1,3 @@
scrape_configs:
- http_sd_configs:
- http_sd_configs:
- url: ftp://example.com

View File

@ -1,3 +1,3 @@
scrape_configs:
- http_sd_configs:
- http_sd_configs:
- url: http://

View File

@ -1,3 +1,3 @@
scrape_configs:
- http_sd_configs:
- http_sd_configs:
- url: invalid

View File

@ -3,11 +3,10 @@ scrape_configs:
kubernetes_sd_configs:
- role: pod
api_server: 'https://localhost:1234'
api_server: "https://localhost:1234"
authorization:
credentials: 1234
basic_auth:
username: user
password: password

View File

@ -3,8 +3,7 @@ scrape_configs:
kubernetes_sd_configs:
- role: node
api_server: 'https://localhost:1234'
api_server: "https://localhost:1234"
bearer_token: 1234
bearer_token_file: somefile

View File

@ -3,10 +3,9 @@ scrape_configs:
kubernetes_sd_configs:
- role: pod
api_server: 'https://localhost:1234'
api_server: "https://localhost:1234"
bearer_token: 1234
basic_auth:
username: user
password: password

View File

@ -1,4 +1,4 @@
scrape_configs:
- job_name: prometheus
- job_name: prometheus
kubernetes_sd_configs:
- role: endpoints

View File

@ -1,5 +1,5 @@
scrape_configs:
- kubernetes_sd_configs:
- kubernetes_sd_configs:
- api_server: kubernetes:443
role: endpoints
namespaces:

View File

@ -1,5 +1,4 @@
scrape_configs:
- kubernetes_sd_configs:
- kubernetes_sd_configs:
- api_server: kubernetes:443
role: vacation

View File

@ -1,5 +1,5 @@
scrape_configs:
- job_name: prometheus
- job_name: prometheus
kubernetes_sd_configs:
- role: endpoints
selectors:

View File

@ -1,5 +1,5 @@
scrape_configs:
- job_name: prometheus
- job_name: prometheus
kubernetes_sd_configs:
- role: endpoints
selectors:

View File

@ -1,5 +1,5 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- separator: ','
- separator: ","
action: labeldrop

View File

@ -1,5 +1,5 @@
scrape_configs:
- job_name: prometheus
relabel_configs:
- separator: ','
- separator: ","
action: labelkeep

View File

@ -3,7 +3,7 @@ scrape_configs:
marathon_sd_configs:
- servers:
- 'https://localhost:1234'
- "https://localhost:1234"
auth_token: 1234
authorization:

View File

@ -3,7 +3,7 @@ scrape_configs:
marathon_sd_configs:
- servers:
- 'https://localhost:1234'
- "https://localhost:1234"
auth_token: 1234
auth_token_file: somefile

View File

@ -3,7 +3,7 @@ scrape_configs:
marathon_sd_configs:
- servers:
- 'https://localhost:1234'
- "https://localhost:1234"
auth_token: 1234
basic_auth:

View File

@ -3,7 +3,7 @@ scrape_configs:
marathon_sd_configs:
- servers:
- 'https://localhost:1234'
- "https://localhost:1234"
auth_token: 1234
bearer_token: 4567

View File

@ -4,7 +4,6 @@ global:
evaluation_interval: 30s
scrape_configs:
- job_name: service-marathon
- job_name: service-marathon
marathon_sd_configs:
- servers:

View File

@ -1,4 +1,3 @@
scrape_configs:
- openstack_sd_configs:
- openstack_sd_configs:
- availability: invalid

View File

@ -1,4 +1,3 @@
scrape_configs:
- openstack_sd_configs:
- openstack_sd_configs:
- role: invalid

View File

@ -3,4 +3,3 @@ remote_write:
name: queue1
- url: localhost:9091
name: queue1

View File

@ -17,8 +17,7 @@ alerting:
- 1.2.3.6:9093
scrape_configs:
- job_name: foo
- job_name: foo
static_configs:
- targets:
- localhost:9090
@ -27,7 +26,7 @@ scrape_configs:
my: label
your: label
- job_name: bar
- job_name: bar
azure_sd_configs:
- environment: AzurePublicCloud

View File

@ -1,5 +1,5 @@
scrape_configs:
- scaleway_sd_configs:
- scaleway_sd_configs:
- role: instance
project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX

View File

@ -1,7 +1,6 @@
scrape_configs:
- scaleway_sd_configs:
- scaleway_sd_configs:
- role: invalid
project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX
secret_key_file: bar

View File

@ -1,8 +1,7 @@
scrape_configs:
- scaleway_sd_configs:
- scaleway_sd_configs:
- role: instance
project_id: 11111111-1111-1111-1111-111111111112
access_key: SCWXXXXXXXXXXXXXXXXX
secret_key_file: bar
secret_key: 11111111-1111-1111-1111-111111111112

View File

@ -1,3 +1,3 @@
scrape_configs:
- job_name: prometheus
- job_name: prometheus
body_size_limit: 100

View File

@ -1,4 +1,4 @@
scrape_configs:
- job_name: prometheus
- job_name: prometheus
scrape_interval: 5s
scrape_timeout: 6s

View File

@ -3,7 +3,7 @@ global:
scrape_timeout: 15s
scrape_configs:
- job_name: prometheus
- job_name: prometheus
scrape_interval: 5s

View File

@ -17,4 +17,4 @@ scrape_configs:
- job_name: prometheus
consult_sd_configs:
- server: 'localhost:1234'
- server: "localhost:1234"

View File

@ -1,5 +1,5 @@
scrape_configs:
- job_name: prometheus
- job_name: prometheus
static_configs:
- targets:
- http://bad

View File

@ -1,7 +1,6 @@
# the YAML structure is identical to valid.yml but the raw data is different.
- targets: ['localhost:9090', 'example.org:443']
- targets: ["localhost:9090", "example.org:443"]
labels:
foo: bar
- targets: ['my.domain']
- targets: ["my.domain"]

View File

@ -2,14 +2,13 @@
# DigitalOcean.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
- job_name: "prometheus"
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]
# Discover Node Exporter instances to scrape.
- job_name: 'node'
- job_name: "node"
digitalocean_sd_configs:
- authorization:
@ -17,10 +16,10 @@ scrape_configs:
relabel_configs:
# Only scrape targets that have a tag 'monitoring'.
- source_labels: [__meta_digitalocean_tags]
regex: '.*,monitoring,.*'
regex: ".*,monitoring,.*"
action: keep
# Use the public IPv6 address and port 9100 to scrape the target.
- source_labels: [__meta_digitalocean_public_ipv6]
target_label: __address__
replacement: '[$1]:9100'
replacement: "[$1]:9100"

View File

@ -1,20 +1,19 @@
# A example scrape configuration for running Prometheus with Docker.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
- job_name: "prometheus"
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]
# Create a job for Docker daemon.
#
# This example requires Docker daemon to be configured to expose
# Prometheus metrics, as documented here:
# https://docs.docker.com/config/daemon/prometheus/
- job_name: 'docker'
- job_name: "docker"
static_configs:
- targets: ['localhost:9323']
- targets: ["localhost:9323"]
# Create a job for Docker Swarm containers.
#
@ -26,7 +25,7 @@ scrape_configs:
# --mount type=bind,src=/sys,dst=/sys,ro
# --mount type=bind,src=/var/lib/docker,dst=/var/lib/docker,ro
# google/cadvisor -docker_only
- job_name: 'docker-containers'
- job_name: "docker-containers"
docker_sd_configs:
- host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon.
relabel_configs:

View File

@ -2,18 +2,17 @@
# Docker Swarm.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
- job_name: "prometheus"
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]
# Create a job for Docker daemons.
#
# This example requires Docker daemons to be configured to expose
# Prometheus metrics, as documented here:
# https://docs.docker.com/config/daemon/prometheus/
- job_name: 'docker'
- job_name: "docker"
dockerswarm_sd_configs:
- host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon.
role: nodes
@ -34,7 +33,7 @@ scrape_configs:
# --mount type=bind,src=/sys,dst=/sys,ro
# --mount type=bind,src=/var/lib/docker,dst=/var/lib/docker,ro
# google/cadvisor -docker_only
- job_name: 'dockerswarm'
- job_name: "dockerswarm"
dockerswarm_sd_configs:
- host: unix:///var/run/docker.sock # You can also use http/https to connect to the Docker daemon.
role: tasks
@ -51,4 +50,3 @@ scrape_configs:
- regex: __meta_dockerswarm_service_label_prometheus_(.+)
action: labelmap
replacement: $1

View File

@ -2,14 +2,13 @@
# Hetzner.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
- job_name: "prometheus"
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]
# Discover Node Exporter instances to scrape.
- job_name: 'node'
- job_name: "node"
hetzner_sd_configs:
- authorization:
@ -19,10 +18,10 @@ scrape_configs:
# Use the public IPv4 and port 9100 to scrape the target.
- source_labels: [__meta_hetzner_public_ipv4]
target_label: __address__
replacement: '$1:9100'
replacement: "$1:9100"
# Discover Node Exporter instances to scrape using a Hetzner Cloud Network called mynet.
- job_name: 'node_private'
- job_name: "node_private"
hetzner_sd_configs:
- authorization:
@ -32,10 +31,10 @@ scrape_configs:
# Use the private IPv4 within the Hetzner Cloud Network and port 9100 to scrape the target.
- source_labels: [__meta_hetzner_hcloud_private_ipv4_mynet]
target_label: __address__
replacement: '$1:9100'
replacement: "$1:9100"
# Discover Node Exporter instances to scrape.
- job_name: 'node_robot'
- job_name: "node_robot"
hetzner_sd_configs:
- basic_auth:
@ -46,4 +45,4 @@ scrape_configs:
# Use the public IPv4 and port 9100 to scrape the target.
- source_labels: [__meta_hetzner_public_ipv4]
target_label: __address__
replacement: '$1:9100'
replacement: "$1:9100"

View File

@ -16,7 +16,7 @@
# default named port `https`. This works for single API server deployments as
# well as HA API server deployments.
scrape_configs:
- job_name: 'kubernetes-apiservers'
- job_name: "kubernetes-apiservers"
kubernetes_sd_configs:
- role: endpoints
@ -47,17 +47,22 @@ scrape_configs:
# will add targets for each API server which Kubernetes adds an endpoint to
# the default/kubernetes service.
relabel_configs:
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
- source_labels:
[
__meta_kubernetes_namespace,
__meta_kubernetes_service_name,
__meta_kubernetes_endpoint_port_name,
]
action: keep
regex: default;kubernetes;https
# Scrape config for nodes (kubelet).
#
# Rather than connecting directly to the node, the scrape is proxied though the
# Kubernetes apiserver. This means it will work if Prometheus is running out of
# cluster, or can't connect to nodes for some other reason (e.g. because of
# firewalling).
- job_name: 'kubernetes-nodes'
# Scrape config for nodes (kubelet).
#
# Rather than connecting directly to the node, the scrape is proxied though the
# Kubernetes apiserver. This means it will work if Prometheus is running out of
# cluster, or can't connect to nodes for some other reason (e.g. because of
# firewalling).
- job_name: "kubernetes-nodes"
# Default to scraping over https. If required, just disable this or change to
# `http`.
@ -88,21 +93,21 @@ scrape_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
# Scrape config for Kubelet cAdvisor.
#
# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
# (those whose names begin with 'container_') have been removed from the
# Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to
# retrieve those metrics.
#
# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
# HTTP endpoint; use the "/metrics" endpoint on the 4194 port of nodes. In
# that case (and ensure cAdvisor's HTTP server hasn't been disabled with the
# --cadvisor-port=0 Kubelet flag).
#
# This job is not necessary and should be removed in Kubernetes 1.6 and
# earlier versions, or it will cause the metrics to be scraped twice.
- job_name: 'kubernetes-cadvisor'
# Scrape config for Kubelet cAdvisor.
#
# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
# (those whose names begin with 'container_') have been removed from the
# Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to
# retrieve those metrics.
#
# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
# HTTP endpoint; use the "/metrics" endpoint on the 4194 port of nodes. In
# that case (and ensure cAdvisor's HTTP server hasn't been disabled with the
# --cadvisor-port=0 Kubelet flag).
#
# This job is not necessary and should be removed in Kubernetes 1.6 and
# earlier versions, or it will cause the metrics to be scraped twice.
- job_name: "kubernetes-cadvisor"
# Default to scraping over https. If required, just disable this or change to
# `http`.
@ -139,11 +144,11 @@ scrape_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
# Example scrape config for service endpoints.
#
# The relabeling allows the actual service scrape endpoint to be configured
# for all or only some endpoints.
- job_name: 'kubernetes-service-endpoints'
# Example scrape config for service endpoints.
#
# The relabeling allows the actual service scrape endpoint to be configured
# for all or only some endpoints.
- job_name: "kubernetes-service-endpoints"
kubernetes_sd_configs:
- role: endpoints
@ -185,11 +190,11 @@ scrape_configs:
action: replace
target_label: kubernetes_name
# Example scrape config for probing services via the Blackbox Exporter.
#
# The relabeling allows the actual service scrape endpoint to be configured
# for all or only some services.
- job_name: 'kubernetes-services'
# Example scrape config for probing services via the Blackbox Exporter.
#
# The relabeling allows the actual service scrape endpoint to be configured
# for all or only some services.
- job_name: "kubernetes-services"
metrics_path: /probe
params:
@ -216,11 +221,11 @@ scrape_configs:
- source_labels: [__meta_kubernetes_service_name]
target_label: kubernetes_name
# Example scrape config for probing ingresses via the Blackbox Exporter.
#
# The relabeling allows the actual ingress scrape endpoint to be configured
# for all or only some services.
- job_name: 'kubernetes-ingresses'
# Example scrape config for probing ingresses via the Blackbox Exporter.
#
# The relabeling allows the actual ingress scrape endpoint to be configured
# for all or only some services.
- job_name: "kubernetes-ingresses"
metrics_path: /probe
params:
@ -234,7 +239,12 @@ scrape_configs:
# - source_labels: [__meta_kubernetes_ingress_annotation_example_io_should_be_probed]
# action: keep
# regex: true
- source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path]
- source_labels:
[
__meta_kubernetes_ingress_scheme,
__address__,
__meta_kubernetes_ingress_path,
]
regex: (.+);(.+);(.+)
replacement: ${1}://${2}${3}
target_label: __param_target
@ -249,12 +259,12 @@ scrape_configs:
- source_labels: [__meta_kubernetes_ingress_name]
target_label: kubernetes_name
# Example scrape config for pods
#
# The relabeling allows the actual pod scrape to be configured
# for all the declared ports (or port-free target if none is declared)
# or only some ports.
- job_name: 'kubernetes-pods'
# Example scrape config for pods
#
# The relabeling allows the actual pod scrape to be configured
# for all the declared ports (or port-free target if none is declared)
# or only some ports.
- job_name: "kubernetes-pods"
kubernetes_sd_configs:
- role: pod

View File

@ -3,22 +3,22 @@
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
- job_name: "prometheus"
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]
# Discover Node Exporter instances to scrape.
- job_name: 'node'
- job_name: "node"
linode_sd_configs:
- authorization:
credentials: "<replace with a Personal Access Token with linodes:read_only + ips:read_only access>"
relabel_configs:
# Only scrape targets that have a tag 'monitoring'.
- source_labels: [__meta_linode_tags]
regex: '.*,monitoring,.*'
regex: ".*,monitoring,.*"
action: keep
# Use the public IPv6 address and port 9100 to scrape the target.
- source_labels: [__meta_linode_public_ipv6]
target_label: __address__
replacement: '[$1]:9100'
replacement: "[$1]:9100"

View File

@ -2,14 +2,13 @@
# (or DC/OS) cluster.
scrape_configs:
# Make Prometheus scrape itself for metrics.
- job_name: 'prometheus'
- job_name: "prometheus"
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]
# Discover Marathon services to scrape.
- job_name: 'marathon'
- job_name: "marathon"
# Scrape Marathon itself to discover new services every minute.
marathon_sd_configs:
@ -18,7 +17,6 @@ scrape_configs:
refresh_interval: 60s
relabel_configs:
# Only scrape targets that have a port label called 'metrics' specified on a port
# in their app definitions. Example using a port mapping (container or bridge networking):
#
@ -45,7 +43,11 @@ scrape_configs:
# ]
# Match a slash-prefixed string either in a portMapping or a portDefinition label.
- source_labels: [__meta_marathon_port_mapping_label_metrics,__meta_marathon_port_definition_label_metrics]
- source_labels:
[
__meta_marathon_port_mapping_label_metrics,
__meta_marathon_port_definition_label_metrics,
]
regex: (\/.+;|;\/.+)
action: keep

View File

@ -20,10 +20,10 @@ rule_files:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
- job_name: "prometheus"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']
- targets: ["localhost:9090"]

View File

@ -10,7 +10,7 @@ kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
- apiGroups: [""]
resources:
- nodes
- nodes/metrics
@ -18,13 +18,13 @@ rules:
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups:
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics", "/metrics/cadvisor"]
- nonResourceURLs: ["/metrics", "/metrics/cadvisor"]
verbs: ["get"]
---
apiVersion: v1
@ -42,6 +42,6 @@ roleRef:
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
- kind: ServiceAccount
name: prometheus
namespace: default

View File

@ -1,5 +1,5 @@
groups:
- name: yolo
- name: yolo
rules:
- record: yolo
expr: rate(hi)

View File

@ -7,4 +7,3 @@ groups:
instance: localhost
annotation:
summary: annonations is written without s above

View File

@ -1,3 +1,3 @@
groups:
- name: yolo
- name: yolo
- name: yolo
- name: yolo

View File

@ -1,5 +1,5 @@
groups:
- name: yolo
- name: yolo
rules:
- record: hola
expr: 1

View File

@ -1,5 +1,5 @@
groups:
- name: yolo
- name: yolo
rules:
- record: Hi
alert: Hello

View File

@ -1,5 +1,5 @@
groups:
- name: my-group-name
- name: my-group-name
interval: 30s # defaults to global interval
rules:
- alert: HighErrors
@ -34,7 +34,7 @@ groups:
annotations:
description: "stuff's happening with {{ $.labels.service }}"
- name: my-another-name
- name: my-another-name
interval: 30s # defaults to global interval
rules:
- alert: HighErrors