Merge pull request #14970 from prometheus/beorn7/doc

docs: Improve, clarify, and fix documentation on scrape limits
pull/14108/merge
Björn Rabenstein 2 months ago committed by GitHub
commit 67caa03dc1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -71,8 +71,11 @@ global:
# How frequently to evaluate rules. # How frequently to evaluate rules.
[ evaluation_interval: <duration> | default = 1m ] [ evaluation_interval: <duration> | default = 1m ]
# Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received. # Offset the rule evaluation timestamp of this particular group by the
# Metric availability delays are more likely to occur when Prometheus is running as a remote write target, but can also occur when there's anomalies with scraping. # specified duration into the past to ensure the underlying metrics have
# been received. Metric availability delays are more likely to occur when
# Prometheus is running as a remote write target, but can also occur when
# there's anomalies with scraping.
[ rule_query_offset: <duration> | default = 0s ] [ rule_query_offset: <duration> | default = 0s ]
# The labels to add to any time series or alerts when communicating with # The labels to add to any time series or alerts when communicating with
@ -94,27 +97,29 @@ global:
# change or be removed in the future. # change or be removed in the future.
[ body_size_limit: <size> | default = 0 ] [ body_size_limit: <size> | default = 0 ]
# Per-scrape limit on number of scraped samples that will be accepted. # Per-scrape limit on the number of scraped samples that will be accepted.
# If more than this number of samples are present after metric relabeling # If more than this number of samples are present after metric relabeling
# the entire scrape will be treated as failed. 0 means no limit. # the entire scrape will be treated as failed. 0 means no limit.
[ sample_limit: <int> | default = 0 ] [ sample_limit: <int> | default = 0 ]
# Per-scrape limit on number of labels that will be accepted for a sample. If # Limit on the number of labels that will be accepted per sample. If more
# more than this number of labels are present post metric-relabeling, the # than this number of labels are present on any sample post metric-relabeling,
# entire scrape will be treated as failed. 0 means no limit. # the entire scrape will be treated as failed. 0 means no limit.
[ label_limit: <int> | default = 0 ] [ label_limit: <int> | default = 0 ]
# Per-scrape limit on length of labels name that will be accepted for a sample. # Limit on the length (in bytes) of each individual label name. If any label
# If a label name is longer than this number post metric-relabeling, the entire # name in a scrape is longer than this number post metric-relabeling, the
# scrape will be treated as failed. 0 means no limit. # entire scrape will be treated as failed. Note that label names are UTF-8
# encoded, and characters can take up to 4 bytes. 0 means no limit.
[ label_name_length_limit: <int> | default = 0 ] [ label_name_length_limit: <int> | default = 0 ]
# Per-scrape limit on length of labels value that will be accepted for a sample. # Limit on the length (in bytes) of each individual label value. If any label
# If a label value is longer than this number post metric-relabeling, the # value in a scrape is longer than this number post metric-relabeling, the
# entire scrape will be treated as failed. 0 means no limit. # entire scrape will be treated as failed. Note that label values are UTF-8
# encoded, and characters can take up to 4 bytes. 0 means no limit.
[ label_value_length_limit: <int> | default = 0 ] [ label_value_length_limit: <int> | default = 0 ]
# Per-scrape config limit on number of unique targets that will be # Limit per scrape config on number of unique targets that will be
# accepted. If more than this number of targets are present after target # accepted. If more than this number of targets are present after target
# relabeling, Prometheus will mark the targets as failed without scraping them. # relabeling, Prometheus will mark the targets as failed without scraping them.
# 0 means no limit. This is an experimental feature, this behaviour could # 0 means no limit. This is an experimental feature, this behaviour could
@ -456,34 +461,36 @@ metric_relabel_configs:
# change or be removed in the future. # change or be removed in the future.
[ body_size_limit: <size> | default = 0 ] [ body_size_limit: <size> | default = 0 ]
# Per-scrape limit on number of scraped samples that will be accepted. # Per-scrape limit on the number of scraped samples that will be accepted.
# If more than this number of samples are present after metric relabeling # If more than this number of samples are present after metric relabeling
# the entire scrape will be treated as failed. 0 means no limit. # the entire scrape will be treated as failed. 0 means no limit.
[ sample_limit: <int> | default = 0 ] [ sample_limit: <int> | default = 0 ]
# Per-scrape limit on number of labels that will be accepted for a sample. If # Limit on the number of labels that will be accepted per sample. If more
# more than this number of labels are present post metric-relabeling, the # than this number of labels are present on any sample post metric-relabeling,
# entire scrape will be treated as failed. 0 means no limit. # the entire scrape will be treated as failed. 0 means no limit.
[ label_limit: <int> | default = 0 ] [ label_limit: <int> | default = 0 ]
# Per-scrape limit on length of labels name that will be accepted for a sample. # Limit on the length (in bytes) of each individual label name. If any label
# If a label name is longer than this number post metric-relabeling, the entire # name in a scrape is longer than this number post metric-relabeling, the
# scrape will be treated as failed. 0 means no limit. # entire scrape will be treated as failed. Note that label names are UTF-8
# encoded, and characters can take up to 4 bytes. 0 means no limit.
[ label_name_length_limit: <int> | default = 0 ] [ label_name_length_limit: <int> | default = 0 ]
# Per-scrape limit on length of labels value that will be accepted for a sample. # Limit on the length (in bytes) of each individual label value. If any label
# If a label value is longer than this number post metric-relabeling, the # value in a scrape is longer than this number post metric-relabeling, the
# entire scrape will be treated as failed. 0 means no limit. # entire scrape will be treated as failed. Note that label values are UTF-8
# encoded, and characters can take up to 4 bytes. 0 means no limit.
[ label_value_length_limit: <int> | default = 0 ] [ label_value_length_limit: <int> | default = 0 ]
# Per-scrape config limit on number of unique targets that will be # Limit per scrape config on number of unique targets that will be
# accepted. If more than this number of targets are present after target # accepted. If more than this number of targets are present after target
# relabeling, Prometheus will mark the targets as failed without scraping them. # relabeling, Prometheus will mark the targets as failed without scraping them.
# 0 means no limit. This is an experimental feature, this behaviour could # 0 means no limit. This is an experimental feature, this behaviour could
# change in the future. # change in the future.
[ target_limit: <int> | default = 0 ] [ target_limit: <int> | default = 0 ]
# Per-job limit on the number of targets dropped by relabeling # Limit per scrape config on the number of targets dropped by relabeling
# that will be kept in memory. 0 means no limit. # that will be kept in memory. 0 means no limit.
[ keep_dropped_targets: <int> | default = 0 ] [ keep_dropped_targets: <int> | default = 0 ]

Loading…
Cancel
Save