From a124d56ac2a297d95116d611078c18853714c030 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Rabenstein?= Date: Wed, 7 Apr 2021 17:04:37 +0200 Subject: [PATCH 01/34] Revert "Changelog: Add hyperlinks to PRs" Signed-off-by: beorn7 --- CHANGELOG.md | 1074 +++++++++++++++++++++++++------------------------- 1 file changed, 537 insertions(+), 537 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 26175187f..7b0e2462e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,38 +1,38 @@ ## 2.26.0 / 2021-03-31 -Prometheus is now built and supporting Go 1.16 ([#8544](https://github.com/prometheus/prometheus/pull/8544)). This reverts the memory release pattern added in Go 1.12. This makes common RSS usage metrics showing more accurate number for actual memory used by Prometheus. You can read more details [here](https://www.bwplotka.dev/2019/golang-memory-monitoring/). +Prometheus is now built and supporting Go 1.16 (#8544). This reverts the memory release pattern added in Go 1.12. This makes common RSS usage metrics showing more accurate number for actual memory used by Prometheus. You can read more details [here](https://www.bwplotka.dev/2019/golang-memory-monitoring/). Note that from this release Prometheus is using Alertmanager v2 by default. -* [CHANGE] Alerting: Using Alertmanager v2 API by default. [#8626](https://github.com/prometheus/prometheus/pull/8626) -* [CHANGE] Prometheus/Promtool: As agreed on dev summit, binaries are now printing help and usage to stdout instead of stderr. [#8542](https://github.com/prometheus/prometheus/pull/8542) -* [FEATURE] Remote: Add support for AWS SigV4 auth method for remote_write. [#8509](https://github.com/prometheus/prometheus/pull/8509) -* [FEATURE] Scaleway Discovery: Add Scaleway Service Discovery. [#8555](https://github.com/prometheus/prometheus/pull/8555) -* [FEATURE] PromQL: Allow negative offsets. Behind `--enable-feature=promql-negative-offset` flag. [#8487](https://github.com/prometheus/prometheus/pull/8487) -* [FEATURE] **experimental** Exemplars: Add in-memory storage for exemplars. Behind `--enable-feature=exemplar-storage` flag. [#6635](https://github.com/prometheus/prometheus/pull/6635) -* [FEATURE] UI: Add advanced auto-completion, syntax highlighting and linting to graph page query input. [#8634](https://github.com/prometheus/prometheus/pull/8634) -* [ENHANCEMENT] Digital Ocean Discovery: Add `__meta_digitalocean_image` label. [#8497](https://github.com/prometheus/prometheus/pull/8497) -* [ENHANCEMENT] PromQL: Add `last_over_time`, `sgn`, `clamp` functions. [#8457](https://github.com/prometheus/prometheus/pull/8457) -* [ENHANCEMENT] Scrape: Add support for specifying type of Authorization header credentials with Bearer by default. [#8512](https://github.com/prometheus/prometheus/pull/8512) -* [ENHANCEMENT] Scrape: Add `follow_redirects` option to scrape configuration. [#8546](https://github.com/prometheus/prometheus/pull/8546) -* [ENHANCEMENT] Remote: Allow retries on HTTP 429 response code for remote_write. Disabled by default. See [configuration docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/remote_write) for details. [#8237](https://github.com/prometheus/prometheus/pull/#8237) [#8477](https://github.com/prometheus/prometheus/pull/8477) -* [ENHANCEMENT] Remote: Allow configuring custom headers for remote_read. See [configuration docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/remote_read) for details. [#8516](https://github.com/prometheus/prometheus/pull/#8516) -* [ENHANCEMENT] UI: Hitting Enter now triggers new query. [#8581](https://github.com/prometheus/prometheus/pull/8581) -* [ENHANCEMENT] UI: Better handling of long rule and names on the `/rules` and `/targets` pages. [#8608](https://github.com/prometheus/prometheus/pull/8608) [#8609](https://github.com/prometheus/prometheus/pull/8609) -* [ENHANCEMENT] UI: Add collapse/expand all button on the `/targets` page. [#8486](https://github.com/prometheus/prometheus/pull/8486) -* [BUGFIX] TSDB: Eager deletion of removable blocks on every compaction, saving disk peak space usage. [#8007](https://github.com/prometheus/prometheus/pull/8007) -* [BUGFIX] PromQL: Fix parser support for special characters like`炬`. [#8517](https://github.com/prometheus/prometheus/pull/8517) -* [BUGFIX] Rules: Update rule health for append/commit fails. [#8619](https://github.com/prometheus/prometheus/pull/8619) +* [CHANGE] Alerting: Using Alertmanager v2 API by default. #8626 +* [CHANGE] Prometheus/Promtool: As agreed on dev summit, binaries are now printing help and usage to stdout instead of stderr. #8542 +* [FEATURE] Remote: Add support for AWS SigV4 auth method for remote_write. #8509 +* [FEATURE] Scaleway Discovery: Add Scaleway Service Discovery. #8555 +* [FEATURE] PromQL: Allow negative offsets. Behind `--enable-feature=promql-negative-offset` flag. #8487 +* [FEATURE] **experimental** Exemplars: Add in-memory storage for exemplars. Behind `--enable-feature=exemplar-storage` flag. #6635 +* [FEATURE] UI: Add advanced auto-completion, syntax highlighting and linting to graph page query input. #8634 +* [ENHANCEMENT] Digital Ocean Discovery: Add `__meta_digitalocean_image` label. #8497 +* [ENHANCEMENT] PromQL: Add `last_over_time`, `sgn`, `clamp` functions. #8457 +* [ENHANCEMENT] Scrape: Add support for specifying type of Authorization header credentials with Bearer by default. #8512 +* [ENHANCEMENT] Scrape: Add `follow_redirects` option to scrape configuration. #8546 +* [ENHANCEMENT] Remote: Allow retries on HTTP 429 response code for remote_write. Disabled by default. See [configuration docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write) for details. #8237 #8477 +* [ENHANCEMENT] Remote: Allow configuring custom headers for remote_read. See [configuration docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read) for details. #8516 +* [ENHANCEMENT] UI: Hitting Enter now triggers new query. #8581 +* [ENHANCEMENT] UI: Better handling of long rule and names on the `/rules` and `/targets` pages. #8608 #8609 +* [ENHANCEMENT] UI: Add collapse/expand all button on the `/targets` page. #8486 +* [BUGFIX] TSDB: Eager deletion of removable blocks on every compaction, saving disk peak space usage. #8007 +* [BUGFIX] PromQL: Fix parser support for special characters like`炬`. #8517 +* [BUGFIX] Rules: Update rule health for append/commit fails. #8619 ## 2.25.2 / 2021-03-16 -* [BUGFIX] Fix the ingestion of scrapes when the wall clock changes, e.g. on suspend. [#8601](https://github.com/prometheus/prometheus/pull/8601) +* [BUGFIX] Fix the ingestion of scrapes when the wall clock changes, e.g. on suspend. #8601 ## 2.25.1 / 2021-03-14 -* [BUGFIX] Fix a crash in `promtool` when a subquery with default resolution is used. [#8569](https://github.com/prometheus/prometheus/pull/8569) -* [BUGFIX] Fix a bug that could return duplicate datapoints in queries. [#8591](https://github.com/prometheus/prometheus/pull/8591) -* [BUGFIX] Fix crashes with arm64 when compiled with go1.16. [#8593](https://github.com/prometheus/prometheus/pull/8593) +* [BUGFIX] Fix a crash in `promtool` when a subquery with default resolution is used. #8569 +* [BUGFIX] Fix a bug that could return duplicate datapoints in queries. #8591 +* [BUGFIX] Fix crashes with arm64 when compiled with go1.16. #8593 ## 2.25.0 / 2021-02-17 @@ -44,26 +44,26 @@ It will be done by defaulting `alertmanager_config.api_version` to `v2`. Alertmanager API v2 was released in Alertmanager v0.16.0 (released in January 2019). -* [FEATURE] **experimental** API: Accept remote_write requests. Behind the --enable-feature=remote-write-receiver flag. [#8424](https://github.com/prometheus/prometheus/pull/8424) -* [FEATURE] **experimental** PromQL: Add '@ ' modifier. Behind the --enable-feature=promql-at-modifier flag. [#8121](https://github.com/prometheus/prometheus/pull/8121) [#8436](https://github.com/prometheus/prometheus/pull/8436) [#8425](https://github.com/prometheus/prometheus/pull/8425) -* [ENHANCEMENT] Add optional name property to testgroup for better test failure output. [#8440](https://github.com/prometheus/prometheus/pull/8440) -* [ENHANCEMENT] Add warnings into React Panel on the Graph page. [#8427](https://github.com/prometheus/prometheus/pull/8427) -* [ENHANCEMENT] TSDB: Increase the number of buckets for the compaction duration metric. [#8342](https://github.com/prometheus/prometheus/pull/8342) -* [ENHANCEMENT] Remote: Allow passing along custom remote_write HTTP headers. [#8416](https://github.com/prometheus/prometheus/pull/8416) -* [ENHANCEMENT] Mixins: Scope grafana configuration. [#8332](https://github.com/prometheus/prometheus/pull/8332) -* [ENHANCEMENT] Kubernetes SD: Add endpoint labels metadata. [#8273](https://github.com/prometheus/prometheus/pull/8273) -* [ENHANCEMENT] UI: Expose total number of label pairs in head in TSDB stats page. [#8343](https://github.com/prometheus/prometheus/pull/8343) -* [ENHANCEMENT] TSDB: Reload blocks every minute, to detect new blocks and enforce retention more often. [#8343](https://github.com/prometheus/prometheus/pull/8343) -* [BUGFIX] API: Fix global URL when external address has no port. [#8359](https://github.com/prometheus/prometheus/pull/8359) -* [BUGFIX] Backfill: Fix error message handling. [#8432](https://github.com/prometheus/prometheus/pull/8432) -* [BUGFIX] Backfill: Fix "add sample: out of bounds" error when series span an entire block. [#8476](https://github.com/prometheus/prometheus/pull/8476) -* [BUGFIX] Deprecate unused flag --alertmanager.timeout. [#8407](https://github.com/prometheus/prometheus/pull/8407) -* [BUGFIX] Mixins: Support remote-write metrics renamed in v2.23 in alerts. [#8423](https://github.com/prometheus/prometheus/pull/8423) -* [BUGFIX] Remote: Fix garbage collection of dropped series in remote write. [#8387](https://github.com/prometheus/prometheus/pull/8387) -* [BUGFIX] Remote: Log recoverable remote write errors as warnings. [#8412](https://github.com/prometheus/prometheus/pull/8412) -* [BUGFIX] TSDB: Remove pre-2.21 temporary blocks on start. [#8353](https://github.com/prometheus/prometheus/pull/8353). -* [BUGFIX] UI: Fix duplicated keys on /targets page. [#8456](https://github.com/prometheus/prometheus/pull/8456) -* [BUGFIX] UI: Fix label name leak into class name. [#8459](https://github.com/prometheus/prometheus/pull/8459) +* [FEATURE] **experimental** API: Accept remote_write requests. Behind the --enable-feature=remote-write-receiver flag. #8424 +* [FEATURE] **experimental** PromQL: Add '@ ' modifier. Behind the --enable-feature=promql-at-modifier flag. #8121 #8436 #8425 +* [ENHANCEMENT] Add optional name property to testgroup for better test failure output. #8440 +* [ENHANCEMENT] Add warnings into React Panel on the Graph page. #8427 +* [ENHANCEMENT] TSDB: Increase the number of buckets for the compaction duration metric. #8342 +* [ENHANCEMENT] Remote: Allow passing along custom remote_write HTTP headers. #8416 +* [ENHANCEMENT] Mixins: Scope grafana configuration. #8332 +* [ENHANCEMENT] Kubernetes SD: Add endpoint labels metadata. #8273 +* [ENHANCEMENT] UI: Expose total number of label pairs in head in TSDB stats page. #8343 +* [ENHANCEMENT] TSDB: Reload blocks every minute, to detect new blocks and enforce retention more often. #8343 +* [BUGFIX] API: Fix global URL when external address has no port. #8359 +* [BUGFIX] Backfill: Fix error message handling. #8432 +* [BUGFIX] Backfill: Fix "add sample: out of bounds" error when series span an entire block. #8476 +* [BUGFIX] Deprecate unused flag --alertmanager.timeout. #8407 +* [BUGFIX] Mixins: Support remote-write metrics renamed in v2.23 in alerts. #8423 +* [BUGFIX] Remote: Fix garbage collection of dropped series in remote write. #8387 +* [BUGFIX] Remote: Log recoverable remote write errors as warnings. #8412 +* [BUGFIX] TSDB: Remove pre-2.21 temporary blocks on start. #8353. +* [BUGFIX] UI: Fix duplicated keys on /targets page. #8456 +* [BUGFIX] UI: Fix label name leak into class name. #8459 ## 2.24.1 / 2021-01-20 @@ -72,76 +72,76 @@ Alertmanager API v2 was released in Alertmanager v0.16.0 (released in January ## 2.24.0 / 2021-01-06 -* [FEATURE] Add TLS and basic authentication to HTTP endpoints. [#8316](https://github.com/prometheus/prometheus/pull/8316) -* [FEATURE] promtool: Add `check web-config` subcommand to check web config files. [#8319](https://github.com/prometheus/prometheus/pull/8319) -* [FEATURE] promtool: Add `tsdb create-blocks-from openmetrics` subcommand to backfill metrics data from an OpenMetrics file. [#8084](https://github.com/prometheus/prometheus/pull/8084) -* [ENHANCEMENT] HTTP API: Fast-fail queries with only empty matchers. [#8288](https://github.com/prometheus/prometheus/pull/8288) -* [ENHANCEMENT] HTTP API: Support matchers for labels API. [#8301](https://github.com/prometheus/prometheus/pull/8301) -* [ENHANCEMENT] promtool: Improve checking of URLs passed on the command line. [#7956](https://github.com/prometheus/prometheus/pull/7956) -* [ENHANCEMENT] SD: Expose IPv6 as a label in EC2 SD. [#7086](https://github.com/prometheus/prometheus/pull/7086) -* [ENHANCEMENT] SD: Reuse EC2 client, reducing frequency of requesting credentials. [#8311](https://github.com/prometheus/prometheus/pull/8311) -* [ENHANCEMENT] TSDB: Add logging when compaction takes more than the block time range. [#8151](https://github.com/prometheus/prometheus/pull/8151) -* [ENHANCEMENT] TSDB: Avoid unnecessary GC runs after compaction. [#8276](https://github.com/prometheus/prometheus/pull/8276) -* [BUGFIX] HTTP API: Avoid double-closing of channel when quitting multiple times via HTTP. [#8242](https://github.com/prometheus/prometheus/pull/8242) -* [BUGFIX] SD: Ignore CNAME records in DNS SD to avoid spurious `Invalid SRV record` warnings. [#8216](https://github.com/prometheus/prometheus/pull/8216) -* [BUGFIX] SD: Avoid config error triggered by valid label selectors in Kubernetes SD. [#8285](https://github.com/prometheus/prometheus/pull/8285) +* [FEATURE] Add TLS and basic authentication to HTTP endpoints. #8316 +* [FEATURE] promtool: Add `check web-config` subcommand to check web config files. #8319 +* [FEATURE] promtool: Add `tsdb create-blocks-from openmetrics` subcommand to backfill metrics data from an OpenMetrics file. #8084 +* [ENHANCEMENT] HTTP API: Fast-fail queries with only empty matchers. #8288 +* [ENHANCEMENT] HTTP API: Support matchers for labels API. #8301 +* [ENHANCEMENT] promtool: Improve checking of URLs passed on the command line. #7956 +* [ENHANCEMENT] SD: Expose IPv6 as a label in EC2 SD. #7086 +* [ENHANCEMENT] SD: Reuse EC2 client, reducing frequency of requesting credentials. #8311 +* [ENHANCEMENT] TSDB: Add logging when compaction takes more than the block time range. #8151 +* [ENHANCEMENT] TSDB: Avoid unnecessary GC runs after compaction. #8276 +* [BUGFIX] HTTP API: Avoid double-closing of channel when quitting multiple times via HTTP. #8242 +* [BUGFIX] SD: Ignore CNAME records in DNS SD to avoid spurious `Invalid SRV record` warnings. #8216 +* [BUGFIX] SD: Avoid config error triggered by valid label selectors in Kubernetes SD. #8285 ## 2.23.0 / 2020-11-26 -* [CHANGE] UI: Make the React UI default. [#8142](https://github.com/prometheus/prometheus/pull/8142) -* [CHANGE] Remote write: The following metrics were removed/renamed in remote write. [#6815](https://github.com/prometheus/prometheus/pull/6815) +* [CHANGE] UI: Make the React UI default. #8142 +* [CHANGE] Remote write: The following metrics were removed/renamed in remote write. #6815 - `prometheus_remote_storage_succeeded_samples_total` was removed and `prometheus_remote_storage_samples_total` was introduced for all the samples attempted to send. - `prometheus_remote_storage_sent_bytes_total` was removed and replaced with `prometheus_remote_storage_samples_bytes_total` and `prometheus_remote_storage_metadata_bytes_total`. - `prometheus_remote_storage_failed_samples_total` -> `prometheus_remote_storage_samples_failed_total` . - `prometheus_remote_storage_retried_samples_total` -> `prometheus_remote_storage_samples_retried_total`. - `prometheus_remote_storage_dropped_samples_total` -> `prometheus_remote_storage_samples_dropped_total`. - `prometheus_remote_storage_pending_samples` -> `prometheus_remote_storage_samples_pending`. -* [CHANGE] Remote: Do not collect non-initialized timestamp metrics. [#8060](https://github.com/prometheus/prometheus/pull/8060) -* [FEATURE] [EXPERIMENTAL] Remote write: Allow metric metadata to be propagated via remote write. The following new metrics were introduced: `prometheus_remote_storage_metadata_total`, `prometheus_remote_storage_metadata_failed_total`, `prometheus_remote_storage_metadata_retried_total`, `prometheus_remote_storage_metadata_bytes_total`. [#6815](https://github.com/prometheus/prometheus/pull/6815) -* [ENHANCEMENT] Remote write: Added a metric `prometheus_remote_storage_max_samples_per_send` for remote write. [#8102](https://github.com/prometheus/prometheus/pull/8102) -* [ENHANCEMENT] TSDB: Make the snapshot directory name always the same length. [#8138](https://github.com/prometheus/prometheus/pull/8138) -* [ENHANCEMENT] TSDB: Create a checkpoint only once at the end of all head compactions. [#8067](https://github.com/prometheus/prometheus/pull/8067) -* [ENHANCEMENT] TSDB: Avoid Series API from hitting the chunks. [#8050](https://github.com/prometheus/prometheus/pull/8050) -* [ENHANCEMENT] TSDB: Cache label name and last value when adding series during compactions making compactions faster. [#8192](https://github.com/prometheus/prometheus/pull/8192) -* [ENHANCEMENT] PromQL: Improved performance of Hash method making queries a bit faster. [#8025](https://github.com/prometheus/prometheus/pull/8025) -* [ENHANCEMENT] promtool: `tsdb list` now prints block sizes. [#7993](https://github.com/prometheus/prometheus/pull/7993) -* [ENHANCEMENT] promtool: Calculate mint and maxt per test avoiding unnecessary calculations. [#8096](https://github.com/prometheus/prometheus/pull/8096) -* [ENHANCEMENT] SD: Add filtering of services to Docker Swarm SD. [#8074](https://github.com/prometheus/prometheus/pull/8074) -* [BUGFIX] React UI: Fix button display when there are no panels. [#8155](https://github.com/prometheus/prometheus/pull/8155) -* [BUGFIX] PromQL: Fix timestamp() method for vector selector inside parenthesis. [#8164](https://github.com/prometheus/prometheus/pull/8164) -* [BUGFIX] PromQL: Don't include rendered expression on PromQL parse errors. [#8177](https://github.com/prometheus/prometheus/pull/8177) -* [BUGFIX] web: Fix panic with double close() of channel on calling `/-/quit/`. [#8166](https://github.com/prometheus/prometheus/pull/8166) -* [BUGFIX] TSDB: Fixed WAL corruption on partial writes within a page causing `invalid checksum` error on WAL replay. [#8125](https://github.com/prometheus/prometheus/pull/8125) +* [CHANGE] Remote: Do not collect non-initialized timestamp metrics. #8060 +* [FEATURE] [EXPERIMENTAL] Remote write: Allow metric metadata to be propagated via remote write. The following new metrics were introduced: `prometheus_remote_storage_metadata_total`, `prometheus_remote_storage_metadata_failed_total`, `prometheus_remote_storage_metadata_retried_total`, `prometheus_remote_storage_metadata_bytes_total`. #6815 +* [ENHANCEMENT] Remote write: Added a metric `prometheus_remote_storage_max_samples_per_send` for remote write. #8102 +* [ENHANCEMENT] TSDB: Make the snapshot directory name always the same length. #8138 +* [ENHANCEMENT] TSDB: Create a checkpoint only once at the end of all head compactions. #8067 +* [ENHANCEMENT] TSDB: Avoid Series API from hitting the chunks. #8050 +* [ENHANCEMENT] TSDB: Cache label name and last value when adding series during compactions making compactions faster. #8192 +* [ENHANCEMENT] PromQL: Improved performance of Hash method making queries a bit faster. #8025 +* [ENHANCEMENT] promtool: `tsdb list` now prints block sizes. #7993 +* [ENHANCEMENT] promtool: Calculate mint and maxt per test avoiding unnecessary calculations. #8096 +* [ENHANCEMENT] SD: Add filtering of services to Docker Swarm SD. #8074 +* [BUGFIX] React UI: Fix button display when there are no panels. #8155 +* [BUGFIX] PromQL: Fix timestamp() method for vector selector inside parenthesis. #8164 +* [BUGFIX] PromQL: Don't include rendered expression on PromQL parse errors. #8177 +* [BUGFIX] web: Fix panic with double close() of channel on calling `/-/quit/`. #8166 +* [BUGFIX] TSDB: Fixed WAL corruption on partial writes within a page causing `invalid checksum` error on WAL replay. #8125 * [BUGFIX] Update config metrics `prometheus_config_last_reload_successful` and `prometheus_config_last_reload_success_timestamp_seconds` right after initial validation before starting TSDB. * [BUGFIX] promtool: Correctly detect duplicate label names in exposition. ## 2.22.2 / 2020-11-16 -* [BUGFIX] Fix race condition in syncing/stopping/reloading scrapers. [#8176](https://github.com/prometheus/prometheus/pull/8176) +* [BUGFIX] Fix race condition in syncing/stopping/reloading scrapers. #8176 ## 2.22.1 / 2020-11-03 -* [BUGFIX] Fix potential "mmap: invalid argument" errors in loading the head chunks, after an unclean shutdown, by performing read repairs. [#8061](https://github.com/prometheus/prometheus/pull/8061) -* [BUGFIX] Fix serving metrics and API when reloading scrape config. [#8104](https://github.com/prometheus/prometheus/pull/8104) -* [BUGFIX] Fix head chunk size calculation for size based retention. [#8139](https://github.com/prometheus/prometheus/pull/8139) +* [BUGFIX] Fix potential "mmap: invalid argument" errors in loading the head chunks, after an unclean shutdown, by performing read repairs. #8061 +* [BUGFIX] Fix serving metrics and API when reloading scrape config. #8104 +* [BUGFIX] Fix head chunk size calculation for size based retention. #8139 ## 2.22.0 / 2020-10-07 As announced in the 2.21.0 release notes, the experimental gRPC API v2 has been removed. -* [CHANGE] web: Remove APIv2. [#7935](https://github.com/prometheus/prometheus/pull/7935) -* [ENHANCEMENT] React UI: Implement missing TSDB head stats section. [#7876](https://github.com/prometheus/prometheus/pull/7876) -* [ENHANCEMENT] UI: Add Collapse all button to targets page. [#6957](https://github.com/prometheus/prometheus/pull/6957) -* [ENHANCEMENT] UI: Clarify alert state toggle via checkbox icon. [#7936](https://github.com/prometheus/prometheus/pull/7936) -* [ENHANCEMENT] Add `rule_group_last_evaluation_samples` and `prometheus_tsdb_data_replay_duration_seconds` metrics. [#7737](https://github.com/prometheus/prometheus/pull/7737) [#7977](https://github.com/prometheus/prometheus/pull/7977) -* [ENHANCEMENT] Gracefully handle unknown WAL record types. [#8004](https://github.com/prometheus/prometheus/pull/8004) -* [ENHANCEMENT] Issue a warning for 64 bit systems running 32 bit binaries. [#8012](https://github.com/prometheus/prometheus/pull/8012) -* [BUGFIX] Adjust scrape timestamps to align them to the intended schedule, effectively reducing block size. Workaround for a regression in go1.14+. [#7976](https://github.com/prometheus/prometheus/pull/7976) -* [BUGFIX] promtool: Ensure alert rules are marked as restored in unit tests. [#7661](https://github.com/prometheus/prometheus/pull/7661) -* [BUGFIX] Eureka: Fix service discovery when compiled in 32-bit. [#7964](https://github.com/prometheus/prometheus/pull/7964) -* [BUGFIX] Don't do literal regex matching optimisation when case insensitive. [#8013](https://github.com/prometheus/prometheus/pull/8013) -* [BUGFIX] Fix classic UI sometimes running queries for instant query when in range query mode. [#7984](https://github.com/prometheus/prometheus/pull/7984) +* [CHANGE] web: Remove APIv2. #7935 +* [ENHANCEMENT] React UI: Implement missing TSDB head stats section. #7876 +* [ENHANCEMENT] UI: Add Collapse all button to targets page. #6957 +* [ENHANCEMENT] UI: Clarify alert state toggle via checkbox icon. #7936 +* [ENHANCEMENT] Add `rule_group_last_evaluation_samples` and `prometheus_tsdb_data_replay_duration_seconds` metrics. #7737 #7977 +* [ENHANCEMENT] Gracefully handle unknown WAL record types. #8004 +* [ENHANCEMENT] Issue a warning for 64 bit systems running 32 bit binaries. #8012 +* [BUGFIX] Adjust scrape timestamps to align them to the intended schedule, effectively reducing block size. Workaround for a regression in go1.14+. #7976 +* [BUGFIX] promtool: Ensure alert rules are marked as restored in unit tests. #7661 +* [BUGFIX] Eureka: Fix service discovery when compiled in 32-bit. #7964 +* [BUGFIX] Don't do literal regex matching optimisation when case insensitive. #8013 +* [BUGFIX] Fix classic UI sometimes running queries for instant query when in range query mode. #7984 ## 2.21.0 / 2020-09-11 @@ -152,137 +152,137 @@ In the unlikely case that you use the gRPC API v2 (which is limited to TSDB admin commands), please note that we will remove this experimental API in the next minor release 2.22. -* [CHANGE] Disable HTTP/2 because of concerns with the Go HTTP/2 client. [#7588](https://github.com/prometheus/prometheus/pull/7588) [#7701](https://github.com/prometheus/prometheus/pull/7701) -* [CHANGE] PromQL: `query_log_file` path is now relative to the config file. [#7701](https://github.com/prometheus/prometheus/pull/7701) -* [CHANGE] Promtool: Replace the tsdb command line tool by a promtool tsdb subcommand. [#6088](https://github.com/prometheus/prometheus/pull/6088) -* [CHANGE] Rules: Label `rule_group_iterations` metric with group name. [#7823](https://github.com/prometheus/prometheus/pull/7823) -* [FEATURE] Eureka SD: New service discovery. [#3369](https://github.com/prometheus/prometheus/pull/3369) -* [FEATURE] Hetzner SD: New service discovery. [#7822](https://github.com/prometheus/prometheus/pull/7822) -* [FEATURE] Kubernetes SD: Support Kubernetes EndpointSlices. [#6838](https://github.com/prometheus/prometheus/pull/6838) -* [FEATURE] Scrape: Add per scrape-config targets limit. [#7554](https://github.com/prometheus/prometheus/pull/7554) -* [ENHANCEMENT] Support composite durations in PromQL, config and UI, e.g. 1h30m. [#7713](https://github.com/prometheus/prometheus/pull/7713) [#7833](https://github.com/prometheus/prometheus/pull/7833) -* [ENHANCEMENT] DNS SD: Add SRV record target and port meta labels. [#7678](https://github.com/prometheus/prometheus/pull/7678) -* [ENHANCEMENT] Docker Swarm SD: Support tasks and service without published ports. [#7686](https://github.com/prometheus/prometheus/pull/7686) -* [ENHANCEMENT] PromQL: Reduce the amount of data queried by remote read when a subquery has an offset. [#7667](https://github.com/prometheus/prometheus/pull/7667) -* [ENHANCEMENT] Promtool: Add `--time` option to query instant command. [#7829](https://github.com/prometheus/prometheus/pull/7829) -* [ENHANCEMENT] UI: Respect the `--web.page-title` parameter in the React UI. [#7607](https://github.com/prometheus/prometheus/pull/7607) -* [ENHANCEMENT] UI: Add duration, labels, annotations to alerts page in the React UI. [#7605](https://github.com/prometheus/prometheus/pull/7605) -* [ENHANCEMENT] UI: Add duration on the React UI rules page, hide annotation and labels if empty. [#7606](https://github.com/prometheus/prometheus/pull/7606) -* [BUGFIX] API: Deduplicate series in /api/v1/series. [#7862](https://github.com/prometheus/prometheus/pull/7862) -* [BUGFIX] PromQL: Drop metric name in bool comparison between two instant vectors. [#7819](https://github.com/prometheus/prometheus/pull/7819) -* [BUGFIX] PromQL: Exit with an error when time parameters can't be parsed. [#7505](https://github.com/prometheus/prometheus/pull/7505) -* [BUGFIX] Remote read: Re-add accidentally removed tracing for remote-read requests. [#7916](https://github.com/prometheus/prometheus/pull/7916) -* [BUGFIX] Rules: Detect extra fields in rule files. [#7767](https://github.com/prometheus/prometheus/pull/7767) -* [BUGFIX] Rules: Disallow overwriting the metric name in the `labels` section of recording rules. [#7787](https://github.com/prometheus/prometheus/pull/7787) -* [BUGFIX] Rules: Keep evaluation timestamp across reloads. [#7775](https://github.com/prometheus/prometheus/pull/7775) -* [BUGFIX] Scrape: Do not stop scrapes in progress during reload. [#7752](https://github.com/prometheus/prometheus/pull/7752) -* [BUGFIX] TSDB: Fix `chunks.HeadReadWriter: maxt of the files are not set` error. [#7856](https://github.com/prometheus/prometheus/pull/7856) -* [BUGFIX] TSDB: Delete blocks atomically to prevent corruption when there is a panic/crash during deletion. [#7772](https://github.com/prometheus/prometheus/pull/7772) -* [BUGFIX] Triton SD: Fix a panic when triton_sd_config is nil. [#7671](https://github.com/prometheus/prometheus/pull/7671) -* [BUGFIX] UI: Fix react UI bug with series going on and off. [#7804](https://github.com/prometheus/prometheus/pull/7804) -* [BUGFIX] UI: Fix styling bug for target labels with special names in React UI. [#7902](https://github.com/prometheus/prometheus/pull/7902) -* [BUGFIX] Web: Stop CMUX and GRPC servers even with stale connections, preventing the server to stop on SIGTERM. [#7810](https://github.com/prometheus/prometheus/pull/7810) +* [CHANGE] Disable HTTP/2 because of concerns with the Go HTTP/2 client. #7588 #7701 +* [CHANGE] PromQL: `query_log_file` path is now relative to the config file. #7701 +* [CHANGE] Promtool: Replace the tsdb command line tool by a promtool tsdb subcommand. #6088 +* [CHANGE] Rules: Label `rule_group_iterations` metric with group name. #7823 +* [FEATURE] Eureka SD: New service discovery. #3369 +* [FEATURE] Hetzner SD: New service discovery. #7822 +* [FEATURE] Kubernetes SD: Support Kubernetes EndpointSlices. #6838 +* [FEATURE] Scrape: Add per scrape-config targets limit. #7554 +* [ENHANCEMENT] Support composite durations in PromQL, config and UI, e.g. 1h30m. #7713 #7833 +* [ENHANCEMENT] DNS SD: Add SRV record target and port meta labels. #7678 +* [ENHANCEMENT] Docker Swarm SD: Support tasks and service without published ports. #7686 +* [ENHANCEMENT] PromQL: Reduce the amount of data queried by remote read when a subquery has an offset. #7667 +* [ENHANCEMENT] Promtool: Add `--time` option to query instant command. #7829 +* [ENHANCEMENT] UI: Respect the `--web.page-title` parameter in the React UI. #7607 +* [ENHANCEMENT] UI: Add duration, labels, annotations to alerts page in the React UI. #7605 +* [ENHANCEMENT] UI: Add duration on the React UI rules page, hide annotation and labels if empty. #7606 +* [BUGFIX] API: Deduplicate series in /api/v1/series. #7862 +* [BUGFIX] PromQL: Drop metric name in bool comparison between two instant vectors. #7819 +* [BUGFIX] PromQL: Exit with an error when time parameters can't be parsed. #7505 +* [BUGFIX] Remote read: Re-add accidentally removed tracing for remote-read requests. #7916 +* [BUGFIX] Rules: Detect extra fields in rule files. #7767 +* [BUGFIX] Rules: Disallow overwriting the metric name in the `labels` section of recording rules. #7787 +* [BUGFIX] Rules: Keep evaluation timestamp across reloads. #7775 +* [BUGFIX] Scrape: Do not stop scrapes in progress during reload. #7752 +* [BUGFIX] TSDB: Fix `chunks.HeadReadWriter: maxt of the files are not set` error. #7856 +* [BUGFIX] TSDB: Delete blocks atomically to prevent corruption when there is a panic/crash during deletion. #7772 +* [BUGFIX] Triton SD: Fix a panic when triton_sd_config is nil. #7671 +* [BUGFIX] UI: Fix react UI bug with series going on and off. #7804 +* [BUGFIX] UI: Fix styling bug for target labels with special names in React UI. #7902 +* [BUGFIX] Web: Stop CMUX and GRPC servers even with stale connections, preventing the server to stop on SIGTERM. #7810 ## 2.20.1 / 2020-08-05 -* [BUGFIX] SD: Reduce the Consul watch timeout to 2m and adjust the request timeout accordingly. [#7724](https://github.com/prometheus/prometheus/pull/7724) +* [BUGFIX] SD: Reduce the Consul watch timeout to 2m and adjust the request timeout accordingly. #7724 ## 2.20.0 / 2020-07-22 This release changes WAL compression from opt-in to default. WAL compression will prevent a downgrade to v2.10 or earlier without deleting the WAL. Disable WAL compression explicitly by setting the command line flag `--no-storage.tsdb.wal-compression` if you require downgrading to v2.10 or earlier. -* [CHANGE] promtool: Changed rule numbering from 0-based to 1-based when reporting rule errors. [#7495](https://github.com/prometheus/prometheus/pull/7495) +* [CHANGE] promtool: Changed rule numbering from 0-based to 1-based when reporting rule errors. #7495 * [CHANGE] Remote read: Added `prometheus_remote_storage_read_queries_total` counter and `prometheus_remote_storage_read_request_duration_seconds` histogram, removed `prometheus_remote_storage_remote_read_queries_total` counter. * [CHANGE] Remote write: Added buckets for longer durations to `prometheus_remote_storage_sent_batch_duration_seconds` histogram. -* [CHANGE] TSDB: WAL compression is enabled by default. [#7410](https://github.com/prometheus/prometheus/pull/7410) -* [FEATURE] PromQL: Added `group()` aggregator. [#7480](https://github.com/prometheus/prometheus/pull/7480) -* [FEATURE] SD: Added Docker Swarm SD. [#7420](https://github.com/prometheus/prometheus/pull/7420) -* [FEATURE] SD: Added DigitalOcean SD. [#7407](https://github.com/prometheus/prometheus/pull/7407) -* [FEATURE] SD: Added Openstack config option to query alternative endpoints. [#7494](https://github.com/prometheus/prometheus/pull/7494) -* [ENHANCEMENT] Configuration: Exit early on invalid config file and signal it with exit code 2. [#7399](https://github.com/prometheus/prometheus/pull/7399) -* [ENHANCEMENT] PromQL: `without` is now a valid metric identifier. [#7533](https://github.com/prometheus/prometheus/pull/7533) -* [ENHANCEMENT] PromQL: Optimized regex label matching for literals within the pattern or as prefix/suffix. [#7453](https://github.com/prometheus/prometheus/pull/7453) [#7503](https://github.com/prometheus/prometheus/pull/7503) -* [ENHANCEMENT] promtool: Added time range parameters for labels API in promtool. [#7463](https://github.com/prometheus/prometheus/pull/7463) -* [ENHANCEMENT] Remote write: Include samples waiting in channel in pending samples metric. Log number of dropped samples on hard shutdown. [#7335](https://github.com/prometheus/prometheus/pull/7335) -* [ENHANCEMENT] Scrape: Ingest synthetic scrape report metrics atomically with the corresponding scraped metrics. [#7562](https://github.com/prometheus/prometheus/pull/7562) -* [ENHANCEMENT] SD: Reduce timeouts for Openstack SD. [#7507](https://github.com/prometheus/prometheus/pull/7507) -* [ENHANCEMENT] SD: Use 10m timeout for Consul watches. [#7423](https://github.com/prometheus/prometheus/pull/7423) -* [ENHANCEMENT] SD: Added AMI meta label for EC2 SD. [#7386](https://github.com/prometheus/prometheus/pull/7386) -* [ENHANCEMENT] TSDB: Increment WAL corruption metric also on WAL corruption during checkpointing. [#7491](https://github.com/prometheus/prometheus/pull/7491) -* [ENHANCEMENT] TSDB: Improved query performance for high-cardinality labels. [#7448](https://github.com/prometheus/prometheus/pull/7448) -* [ENHANCEMENT] UI: Display dates as well as timestamps in status page. [#7544](https://github.com/prometheus/prometheus/pull/7544) -* [ENHANCEMENT] UI: Improved scrolling when following hash-fragment links. [#7456](https://github.com/prometheus/prometheus/pull/7456) -* [ENHANCEMENT] UI: React UI renders numbers in alerts in a more human-readable way. [#7426](https://github.com/prometheus/prometheus/pull/7426) -* [BUGFIX] API: Fixed error status code in the query API. [#7435](https://github.com/prometheus/prometheus/pull/7435) -* [BUGFIX] PromQL: Fixed `avg` and `avg_over_time` for NaN, Inf, and float64 overflows. [#7346](https://github.com/prometheus/prometheus/pull/7346) -* [BUGFIX] PromQL: Fixed off-by-one error in `histogram_quantile`. [#7393](https://github.com/prometheus/prometheus/pull/7393) -* [BUGFIX] promtool: Support extended durations in rules unit tests. [#6297](https://github.com/prometheus/prometheus/pull/6297) -* [BUGFIX] Scrape: Fix undercounting for `scrape_samples_post_metric_relabeling` in case of errors. [#7342](https://github.com/prometheus/prometheus/pull/7342) -* [BUGFIX] TSDB: Don't panic on WAL corruptions. [#7550](https://github.com/prometheus/prometheus/pull/7550) -* [BUGFIX] TSDB: Avoid leaving behind empty files in `chunks_head`, causing startup failures. [#7573](https://github.com/prometheus/prometheus/pull/7573) -* [BUGFIX] TSDB: Fixed race between compact (gc, populate) and head append causing unknown symbol error. [#7560](https://github.com/prometheus/prometheus/pull/7560) -* [BUGFIX] TSDB: Fixed unknown symbol error during head compaction. [#7526](https://github.com/prometheus/prometheus/pull/7526) -* [BUGFIX] TSDB: Fixed panic during TSDB metric registration. [#7501](https://github.com/prometheus/prometheus/pull/7501) -* [BUGFIX] TSDB: Fixed `--limit` command line flag in `tsdb` tool. [#7430](https://github.com/prometheus/prometheus/pull/7430) +* [CHANGE] TSDB: WAL compression is enabled by default. #7410 +* [FEATURE] PromQL: Added `group()` aggregator. #7480 +* [FEATURE] SD: Added Docker Swarm SD. #7420 +* [FEATURE] SD: Added DigitalOcean SD. #7407 +* [FEATURE] SD: Added Openstack config option to query alternative endpoints. #7494 +* [ENHANCEMENT] Configuration: Exit early on invalid config file and signal it with exit code 2. #7399 +* [ENHANCEMENT] PromQL: `without` is now a valid metric identifier. #7533 +* [ENHANCEMENT] PromQL: Optimized regex label matching for literals within the pattern or as prefix/suffix. #7453 #7503 +* [ENHANCEMENT] promtool: Added time range parameters for labels API in promtool. #7463 +* [ENHANCEMENT] Remote write: Include samples waiting in channel in pending samples metric. Log number of dropped samples on hard shutdown. #7335 +* [ENHANCEMENT] Scrape: Ingest synthetic scrape report metrics atomically with the corresponding scraped metrics. #7562 +* [ENHANCEMENT] SD: Reduce timeouts for Openstack SD. #7507 +* [ENHANCEMENT] SD: Use 10m timeout for Consul watches. #7423 +* [ENHANCEMENT] SD: Added AMI meta label for EC2 SD. #7386 +* [ENHANCEMENT] TSDB: Increment WAL corruption metric also on WAL corruption during checkpointing. #7491 +* [ENHANCEMENT] TSDB: Improved query performance for high-cardinality labels. #7448 +* [ENHANCEMENT] UI: Display dates as well as timestamps in status page. #7544 +* [ENHANCEMENT] UI: Improved scrolling when following hash-fragment links. #7456 +* [ENHANCEMENT] UI: React UI renders numbers in alerts in a more human-readable way. #7426 +* [BUGFIX] API: Fixed error status code in the query API. #7435 +* [BUGFIX] PromQL: Fixed `avg` and `avg_over_time` for NaN, Inf, and float64 overflows. #7346 +* [BUGFIX] PromQL: Fixed off-by-one error in `histogram_quantile`. #7393 +* [BUGFIX] promtool: Support extended durations in rules unit tests. #6297 +* [BUGFIX] Scrape: Fix undercounting for `scrape_samples_post_metric_relabeling` in case of errors. #7342 +* [BUGFIX] TSDB: Don't panic on WAL corruptions. #7550 +* [BUGFIX] TSDB: Avoid leaving behind empty files in `chunks_head`, causing startup failures. #7573 +* [BUGFIX] TSDB: Fixed race between compact (gc, populate) and head append causing unknown symbol error. #7560 +* [BUGFIX] TSDB: Fixed unknown symbol error during head compaction. #7526 +* [BUGFIX] TSDB: Fixed panic during TSDB metric registration. #7501 +* [BUGFIX] TSDB: Fixed `--limit` command line flag in `tsdb` tool. #7430 ## 2.19.3 / 2020-07-24 -* [BUGFIX] TSDB: Don't panic on WAL corruptions. [#7550](https://github.com/prometheus/prometheus/pull/7550) -* [BUGFIX] TSDB: Avoid leaving behind empty files in chunks_head, causing startup failures. [#7573](https://github.com/prometheus/prometheus/pull/7573) +* [BUGFIX] TSDB: Don't panic on WAL corruptions. #7550 +* [BUGFIX] TSDB: Avoid leaving behind empty files in chunks_head, causing startup failures. #7573 ## 2.19.2 / 2020-06-26 -* [BUGFIX] Remote Write: Fix panic when reloading config with modified queue parameters. [#7452](https://github.com/prometheus/prometheus/pull/7452) +* [BUGFIX] Remote Write: Fix panic when reloading config with modified queue parameters. #7452 ## 2.19.1 / 2020-06-18 -* [BUGFIX] TSDB: Fix m-map file truncation leading to unsequential files. [#7414](https://github.com/prometheus/prometheus/pull/7414) +* [BUGFIX] TSDB: Fix m-map file truncation leading to unsequential files. #7414 ## 2.19.0 / 2020-06-09 -* [FEATURE] TSDB: Memory-map full chunks of Head (in-memory) block from disk. This reduces memory footprint and makes restarts faster. [#6679](https://github.com/prometheus/prometheus/pull/6679) -* [ENHANCEMENT] Discovery: Added discovery support for Triton global zones. [#7250](https://github.com/prometheus/prometheus/pull/7250) -* [ENHANCEMENT] Increased alert resend delay to be more tolerant towards failures. [#7228](https://github.com/prometheus/prometheus/pull/7228) -* [ENHANCEMENT] Remote Read: Added `prometheus_remote_storage_remote_read_queries_total` counter to count total number of remote read queries. [#7328](https://github.com/prometheus/prometheus/pull/7328) -* [ENHANCEMEMT] Added time range parameters for label names and label values API. [#7288](https://github.com/prometheus/prometheus/pull/7288) -* [ENHANCEMENT] TSDB: Reduced contention in isolation for high load. [#7332](https://github.com/prometheus/prometheus/pull/7332) -* [BUGFIX] PromQL: Eliminated collision while checking for duplicate labels. [#7058](https://github.com/prometheus/prometheus/pull/7058) -* [BUGFIX] React UI: Don't null out data when clicking on the current tab. [#7243](https://github.com/prometheus/prometheus/pull/7243) -* [BUGFIX] PromQL: Correctly track number of samples for a query. [#7307](https://github.com/prometheus/prometheus/pull/7307) -* [BUGFIX] PromQL: Return NaN when histogram buckets have 0 observations. [#7318](https://github.com/prometheus/prometheus/pull/7318) +* [FEATURE] TSDB: Memory-map full chunks of Head (in-memory) block from disk. This reduces memory footprint and makes restarts faster. #6679 +* [ENHANCEMENT] Discovery: Added discovery support for Triton global zones. #7250 +* [ENHANCEMENT] Increased alert resend delay to be more tolerant towards failures. #7228 +* [ENHANCEMENT] Remote Read: Added `prometheus_remote_storage_remote_read_queries_total` counter to count total number of remote read queries. #7328 +* [ENHANCEMEMT] Added time range parameters for label names and label values API. #7288 +* [ENHANCEMENT] TSDB: Reduced contention in isolation for high load. #7332 +* [BUGFIX] PromQL: Eliminated collision while checking for duplicate labels. #7058 +* [BUGFIX] React UI: Don't null out data when clicking on the current tab. #7243 +* [BUGFIX] PromQL: Correctly track number of samples for a query. #7307 +* [BUGFIX] PromQL: Return NaN when histogram buckets have 0 observations. #7318 ## 2.18.2 / 2020-06-09 -* [BUGFIX] TSDB: Fix incorrect query results when using Prometheus with remote reads configured [#7361](https://github.com/prometheus/prometheus/pull/7361) +* [BUGFIX] TSDB: Fix incorrect query results when using Prometheus with remote reads configured #7361 ## 2.18.1 / 2020-05-07 -* [BUGFIX] TSDB: Fixed snapshot API. [#7217](https://github.com/prometheus/prometheus/pull/7217) +* [BUGFIX] TSDB: Fixed snapshot API. #7217 ## 2.18.0 / 2020-05-05 -* [CHANGE] Federation: Only use local TSDB for federation (ignore remote read). [#7096](https://github.com/prometheus/prometheus/pull/7096) -* [CHANGE] Rules: `rule_evaluations_total` and `rule_evaluation_failures_total` have a `rule_group` label now. [#7094](https://github.com/prometheus/prometheus/pull/7094) -* [FEATURE] Tracing: Added experimental Jaeger support [#7148](https://github.com/prometheus/prometheus/pull/7148) -* [ENHANCEMENT] TSDB: Significantly reduce WAL size kept around after a block cut. [#7098](https://github.com/prometheus/prometheus/pull/7098) -* [ENHANCEMENT] Discovery: Add `architecture` meta label for EC2. [#7000](https://github.com/prometheus/prometheus/pull/7000) -* [BUGFIX] UI: Fixed wrong MinTime reported by /status. [#7182](https://github.com/prometheus/prometheus/pull/7182) -* [BUGFIX] React UI: Fixed multiselect legend on OSX. [#6880](https://github.com/prometheus/prometheus/pull/6880) -* [BUGFIX] Remote Write: Fixed blocked resharding edge case. [#7122](https://github.com/prometheus/prometheus/pull/7122) -* [BUGFIX] Remote Write: Fixed remote write not updating on relabel configs change. [#7073](https://github.com/prometheus/prometheus/pull/7073) +* [CHANGE] Federation: Only use local TSDB for federation (ignore remote read). #7096 +* [CHANGE] Rules: `rule_evaluations_total` and `rule_evaluation_failures_total` have a `rule_group` label now. #7094 +* [FEATURE] Tracing: Added experimental Jaeger support #7148 +* [ENHANCEMENT] TSDB: Significantly reduce WAL size kept around after a block cut. #7098 +* [ENHANCEMENT] Discovery: Add `architecture` meta label for EC2. #7000 +* [BUGFIX] UI: Fixed wrong MinTime reported by /status. #7182 +* [BUGFIX] React UI: Fixed multiselect legend on OSX. #6880 +* [BUGFIX] Remote Write: Fixed blocked resharding edge case. #7122 +* [BUGFIX] Remote Write: Fixed remote write not updating on relabel configs change. #7073 ## 2.17.2 / 2020-04-20 -* [BUGFIX] Federation: Register federation metrics [#7081](https://github.com/prometheus/prometheus/pull/7081) -* [BUGFIX] PromQL: Fix panic in parser error handling [#7132](https://github.com/prometheus/prometheus/pull/7132) -* [BUGFIX] Rules: Fix reloads hanging when deleting a rule group that is being evaluated [#7138](https://github.com/prometheus/prometheus/pull/7138) -* [BUGFIX] TSDB: Fix a memory leak when prometheus starts with an empty TSDB WAL [#7135](https://github.com/prometheus/prometheus/pull/7135) -* [BUGFIX] TSDB: Make isolation more robust to panics in web handlers [#7129](https://github.com/prometheus/prometheus/pull/7129) [#7136](https://github.com/prometheus/prometheus/pull/7136) +* [BUGFIX] Federation: Register federation metrics #7081 +* [BUGFIX] PromQL: Fix panic in parser error handling #7132 +* [BUGFIX] Rules: Fix reloads hanging when deleting a rule group that is being evaluated #7138 +* [BUGFIX] TSDB: Fix a memory leak when prometheus starts with an empty TSDB WAL #7135 +* [BUGFIX] TSDB: Make isolation more robust to panics in web handlers #7129 #7136 ## 2.17.1 / 2020-03-26 -* [BUGFIX] TSDB: Fix query performance regression that increased memory and CPU usage [#7051](https://github.com/prometheus/prometheus/pull/7051) +* [BUGFIX] TSDB: Fix query performance regression that increased memory and CPU usage #7051 ## 2.17.0 / 2020-03-24 @@ -291,218 +291,218 @@ guaranteed to only see full scrapes and full recording rules. This comes with a certain overhead in resource usage. Depending on the situation, there might be some increase in memory usage, CPU usage, or query latency. -* [FEATURE] TSDB: Support isolation [#6841](https://github.com/prometheus/prometheus/pull/6841) -* [ENHANCEMENT] PromQL: Allow more keywords as metric names [#6933](https://github.com/prometheus/prometheus/pull/6933) -* [ENHANCEMENT] React UI: Add normalization of localhost URLs in targets page [#6794](https://github.com/prometheus/prometheus/pull/6794) -* [ENHANCEMENT] Remote read: Read from remote storage concurrently [#6770](https://github.com/prometheus/prometheus/pull/6770) -* [ENHANCEMENT] Rules: Mark deleted rule series as stale after a reload [#6745](https://github.com/prometheus/prometheus/pull/6745) -* [ENHANCEMENT] Scrape: Log scrape append failures as debug rather than warn [#6852](https://github.com/prometheus/prometheus/pull/6852) -* [ENHANCEMENT] TSDB: Improve query performance for queries that partially hit the head [#6676](https://github.com/prometheus/prometheus/pull/6676) -* [ENHANCEMENT] Consul SD: Expose service health as meta label [#5313](https://github.com/prometheus/prometheus/pull/5313) -* [ENHANCEMENT] EC2 SD: Expose EC2 instance lifecycle as meta label [#6914](https://github.com/prometheus/prometheus/pull/6914) -* [ENHANCEMENT] Kubernetes SD: Expose service type as meta label for K8s service role [#6684](https://github.com/prometheus/prometheus/pull/6684) -* [ENHANCEMENT] Kubernetes SD: Expose label_selector and field_selector [#6807](https://github.com/prometheus/prometheus/pull/6807) -* [ENHANCEMENT] Openstack SD: Expose hypervisor id as meta label [#6962](https://github.com/prometheus/prometheus/pull/6962) -* [BUGFIX] PromQL: Do not escape HTML-like chars in query log [#6834](https://github.com/prometheus/prometheus/pull/6834) [#6795](https://github.com/prometheus/prometheus/pull/6795) -* [BUGFIX] React UI: Fix data table matrix values [#6896](https://github.com/prometheus/prometheus/pull/6896) -* [BUGFIX] React UI: Fix new targets page not loading when using non-ASCII characters [#6892](https://github.com/prometheus/prometheus/pull/6892) -* [BUGFIX] Remote read: Fix duplication of metrics read from remote storage with external labels [#6967](https://github.com/prometheus/prometheus/pull/6967) [#7018](https://github.com/prometheus/prometheus/pull/7018) -* [BUGFIX] Remote write: Register WAL watcher and live reader metrics for all remotes, not just the first one [#6998](https://github.com/prometheus/prometheus/pull/6998) -* [BUGFIX] Scrape: Prevent removal of metric names upon relabeling [#6891](https://github.com/prometheus/prometheus/pull/6891) -* [BUGFIX] Scrape: Fix 'superfluous response.WriteHeader call' errors when scrape fails under some circonstances [#6986](https://github.com/prometheus/prometheus/pull/6986) -* [BUGFIX] Scrape: Fix crash when reloads are separated by two scrape intervals [#7011](https://github.com/prometheus/prometheus/pull/7011) +* [FEATURE] TSDB: Support isolation #6841 +* [ENHANCEMENT] PromQL: Allow more keywords as metric names #6933 +* [ENHANCEMENT] React UI: Add normalization of localhost URLs in targets page #6794 +* [ENHANCEMENT] Remote read: Read from remote storage concurrently #6770 +* [ENHANCEMENT] Rules: Mark deleted rule series as stale after a reload #6745 +* [ENHANCEMENT] Scrape: Log scrape append failures as debug rather than warn #6852 +* [ENHANCEMENT] TSDB: Improve query performance for queries that partially hit the head #6676 +* [ENHANCEMENT] Consul SD: Expose service health as meta label #5313 +* [ENHANCEMENT] EC2 SD: Expose EC2 instance lifecycle as meta label #6914 +* [ENHANCEMENT] Kubernetes SD: Expose service type as meta label for K8s service role #6684 +* [ENHANCEMENT] Kubernetes SD: Expose label_selector and field_selector #6807 +* [ENHANCEMENT] Openstack SD: Expose hypervisor id as meta label #6962 +* [BUGFIX] PromQL: Do not escape HTML-like chars in query log #6834 #6795 +* [BUGFIX] React UI: Fix data table matrix values #6896 +* [BUGFIX] React UI: Fix new targets page not loading when using non-ASCII characters #6892 +* [BUGFIX] Remote read: Fix duplication of metrics read from remote storage with external labels #6967 #7018 +* [BUGFIX] Remote write: Register WAL watcher and live reader metrics for all remotes, not just the first one #6998 +* [BUGFIX] Scrape: Prevent removal of metric names upon relabeling #6891 +* [BUGFIX] Scrape: Fix 'superfluous response.WriteHeader call' errors when scrape fails under some circonstances #6986 +* [BUGFIX] Scrape: Fix crash when reloads are separated by two scrape intervals #7011 ## 2.16.0 / 2020-02-13 -* [FEATURE] React UI: Support local timezone on /graph [#6692](https://github.com/prometheus/prometheus/pull/6692) -* [FEATURE] PromQL: add absent_over_time query function [#6490](https://github.com/prometheus/prometheus/pull/6490) -* [FEATURE] Adding optional logging of queries to their own file [#6520](https://github.com/prometheus/prometheus/pull/6520) -* [ENHANCEMENT] React UI: Add support for rules page and "Xs ago" duration displays [#6503](https://github.com/prometheus/prometheus/pull/6503) -* [ENHANCEMENT] React UI: alerts page, replace filtering togglers tabs with checkboxes [#6543](https://github.com/prometheus/prometheus/pull/6543) -* [ENHANCEMENT] TSDB: Export metric for WAL write errors [#6647](https://github.com/prometheus/prometheus/pull/6647) -* [ENHANCEMENT] TSDB: Improve query performance for queries that only touch the most recent 2h of data. [#6651](https://github.com/prometheus/prometheus/pull/6651) -* [ENHANCEMENT] PromQL: Refactoring in parser errors to improve error messages [#6634](https://github.com/prometheus/prometheus/pull/6634) -* [ENHANCEMENT] PromQL: Support trailing commas in grouping opts [#6480](https://github.com/prometheus/prometheus/pull/6480) -* [ENHANCEMENT] Scrape: Reduce memory usage on reloads by reusing scrape cache [#6670](https://github.com/prometheus/prometheus/pull/6670) -* [ENHANCEMENT] Scrape: Add metrics to track bytes and entries in the metadata cache [#6675](https://github.com/prometheus/prometheus/pull/6675) -* [ENHANCEMENT] promtool: Add support for line-column numbers for invalid rules output [#6533](https://github.com/prometheus/prometheus/pull/6533) -* [ENHANCEMENT] Avoid restarting rule groups when it is unnecessary [#6450](https://github.com/prometheus/prometheus/pull/6450) -* [BUGFIX] React UI: Send cookies on fetch() on older browsers [#6553](https://github.com/prometheus/prometheus/pull/6553) -* [BUGFIX] React UI: adopt grafana flot fix for stacked graphs [#6603](https://github.com/prometheus/prometheus/pull/6603) -* [BUFGIX] React UI: broken graph page browser history so that back button works as expected [#6659](https://github.com/prometheus/prometheus/pull/6659) -* [BUGFIX] TSDB: ensure compactionsSkipped metric is registered, and log proper error if one is returned from head.Init [#6616](https://github.com/prometheus/prometheus/pull/6616) -* [BUGFIX] TSDB: return an error on ingesting series with duplicate labels [#6664](https://github.com/prometheus/prometheus/pull/6664) -* [BUGFIX] PromQL: Fix unary operator precedence [#6579](https://github.com/prometheus/prometheus/pull/6579) -* [BUGFIX] PromQL: Respect query.timeout even when we reach query.max-concurrency [#6712](https://github.com/prometheus/prometheus/pull/6712) -* [BUGFIX] PromQL: Fix string and parentheses handling in engine, which affected React UI [#6612](https://github.com/prometheus/prometheus/pull/6612) -* [BUGFIX] PromQL: Remove output labels returned by absent() if they are produced by multiple identical label matchers [#6493](https://github.com/prometheus/prometheus/pull/6493) -* [BUGFIX] Scrape: Validate that OpenMetrics input ends with `# EOF` [#6505](https://github.com/prometheus/prometheus/pull/6505) -* [BUGFIX] Remote read: return the correct error if configs can't be marshal'd to JSON [#6622](https://github.com/prometheus/prometheus/pull/6622) -* [BUGFIX] Remote write: Make remote client `Store` use passed context, which can affect shutdown timing [#6673](https://github.com/prometheus/prometheus/pull/6673) -* [BUGFIX] Remote write: Improve sharding calculation in cases where we would always be consistently behind by tracking pendingSamples [#6511](https://github.com/prometheus/prometheus/pull/6511) -* [BUGFIX] Ensure prometheus_rule_group metrics are deleted when a rule group is removed [#6693](https://github.com/prometheus/prometheus/pull/6693) +* [FEATURE] React UI: Support local timezone on /graph #6692 +* [FEATURE] PromQL: add absent_over_time query function #6490 +* [FEATURE] Adding optional logging of queries to their own file #6520 +* [ENHANCEMENT] React UI: Add support for rules page and "Xs ago" duration displays #6503 +* [ENHANCEMENT] React UI: alerts page, replace filtering togglers tabs with checkboxes #6543 +* [ENHANCEMENT] TSDB: Export metric for WAL write errors #6647 +* [ENHANCEMENT] TSDB: Improve query performance for queries that only touch the most recent 2h of data. #6651 +* [ENHANCEMENT] PromQL: Refactoring in parser errors to improve error messages #6634 +* [ENHANCEMENT] PromQL: Support trailing commas in grouping opts #6480 +* [ENHANCEMENT] Scrape: Reduce memory usage on reloads by reusing scrape cache #6670 +* [ENHANCEMENT] Scrape: Add metrics to track bytes and entries in the metadata cache #6675 +* [ENHANCEMENT] promtool: Add support for line-column numbers for invalid rules output #6533 +* [ENHANCEMENT] Avoid restarting rule groups when it is unnecessary #6450 +* [BUGFIX] React UI: Send cookies on fetch() on older browsers #6553 +* [BUGFIX] React UI: adopt grafana flot fix for stacked graphs #6603 +* [BUFGIX] React UI: broken graph page browser history so that back button works as expected #6659 +* [BUGFIX] TSDB: ensure compactionsSkipped metric is registered, and log proper error if one is returned from head.Init #6616 +* [BUGFIX] TSDB: return an error on ingesting series with duplicate labels #6664 +* [BUGFIX] PromQL: Fix unary operator precedence #6579 +* [BUGFIX] PromQL: Respect query.timeout even when we reach query.max-concurrency #6712 +* [BUGFIX] PromQL: Fix string and parentheses handling in engine, which affected React UI #6612 +* [BUGFIX] PromQL: Remove output labels returned by absent() if they are produced by multiple identical label matchers #6493 +* [BUGFIX] Scrape: Validate that OpenMetrics input ends with `# EOF` #6505 +* [BUGFIX] Remote read: return the correct error if configs can't be marshal'd to JSON #6622 +* [BUGFIX] Remote write: Make remote client `Store` use passed context, which can affect shutdown timing #6673 +* [BUGFIX] Remote write: Improve sharding calculation in cases where we would always be consistently behind by tracking pendingSamples #6511 +* [BUGFIX] Ensure prometheus_rule_group metrics are deleted when a rule group is removed #6693 ## 2.15.2 / 2020-01-06 -* [BUGFIX] TSDB: Fixed support for TSDB blocks built with Prometheus before 2.1.0. [#6564](https://github.com/prometheus/prometheus/pull/6564) -* [BUGFIX] TSDB: Fixed block compaction issues on Windows. [#6547](https://github.com/prometheus/prometheus/pull/6547) +* [BUGFIX] TSDB: Fixed support for TSDB blocks built with Prometheus before 2.1.0. #6564 +* [BUGFIX] TSDB: Fixed block compaction issues on Windows. #6547 ## 2.15.1 / 2019-12-25 -* [BUGFIX] TSDB: Fixed race on concurrent queries against same data. [#6512](https://github.com/prometheus/prometheus/pull/6512) +* [BUGFIX] TSDB: Fixed race on concurrent queries against same data. #6512 ## 2.15.0 / 2019-12-23 -* [CHANGE] Discovery: Removed `prometheus_sd_kubernetes_cache_*` metrics. Additionally `prometheus_sd_kubernetes_workqueue_latency_seconds` and `prometheus_sd_kubernetes_workqueue_work_duration_seconds` metrics now show correct values in seconds. [#6393](https://github.com/prometheus/prometheus/pull/6393) -* [CHANGE] Remote write: Changed `query` label on `prometheus_remote_storage_*` metrics to `remote_name` and `url`. [#6043](https://github.com/prometheus/prometheus/pull/6043) -* [FEATURE] API: Added new endpoint for exposing per metric metadata `/metadata`. [#6420](https://github.com/prometheus/prometheus/pull/6420) [#6442](https://github.com/prometheus/prometheus/pull/6442) -* [ENHANCEMENT] TSDB: Significantly reduced memory footprint of loaded TSDB blocks. [#6418](https://github.com/prometheus/prometheus/pull/6418) [#6461](https://github.com/prometheus/prometheus/pull/6461) -* [ENHANCEMENT] TSDB: Significantly optimized what we buffer during compaction which should result in lower memory footprint during compaction. [#6422](https://github.com/prometheus/prometheus/pull/6422) [#6452](https://github.com/prometheus/prometheus/pull/6452) [#6468](https://github.com/prometheus/prometheus/pull/6468) [#6475](https://github.com/prometheus/prometheus/pull/6475) -* [ENHANCEMENT] TSDB: Improve replay latency. [#6230](https://github.com/prometheus/prometheus/pull/6230) -* [ENHANCEMENT] TSDB: WAL size is now used for size based retention calculation. [#5886](https://github.com/prometheus/prometheus/pull/5886) -* [ENHANCEMENT] Remote read: Added query grouping and range hints to the remote read request [#6401](https://github.com/prometheus/prometheus/pull/6401) -* [ENHANCEMENT] Remote write: Added `prometheus_remote_storage_sent_bytes_total` counter per queue. [#6344](https://github.com/prometheus/prometheus/pull/6344) -* [ENHANCEMENT] promql: Improved PromQL parser performance. [#6356](https://github.com/prometheus/prometheus/pull/6356) -* [ENHANCEMENT] React UI: Implemented missing pages like `/targets` [#6276](https://github.com/prometheus/prometheus/pull/6276), TSDB status page [#6281](https://github.com/prometheus/prometheus/pull/6281) [#6267](https://github.com/prometheus/prometheus/pull/6267) and many other fixes and performance improvements. -* [ENHANCEMENT] promql: Prometheus now accepts spaces between time range and square bracket. e.g `[ 5m]` [#6065](https://github.com/prometheus/prometheus/pull/6065) -* [BUGFIX] Config: Fixed alertmanager configuration to not miss targets when configurations are similar. [#6455](https://github.com/prometheus/prometheus/pull/6455) -* [BUGFIX] Remote write: Value of `prometheus_remote_storage_shards_desired` gauge shows raw value of desired shards and it's updated correctly. [#6378](https://github.com/prometheus/prometheus/pull/6378) -* [BUGFIX] Rules: Prometheus now fails the evaluation of rules and alerts where metric results collide with labels specified in `labels` field. [#6469](https://github.com/prometheus/prometheus/pull/6469) -* [BUGFIX] API: Targets Metadata API `/targets/metadata` now accepts empty `match_targets` parameter as in the spec. [#6303](https://github.com/prometheus/prometheus/pull/6303) +* [CHANGE] Discovery: Removed `prometheus_sd_kubernetes_cache_*` metrics. Additionally `prometheus_sd_kubernetes_workqueue_latency_seconds` and `prometheus_sd_kubernetes_workqueue_work_duration_seconds` metrics now show correct values in seconds. #6393 +* [CHANGE] Remote write: Changed `query` label on `prometheus_remote_storage_*` metrics to `remote_name` and `url`. #6043 +* [FEATURE] API: Added new endpoint for exposing per metric metadata `/metadata`. #6420 #6442 +* [ENHANCEMENT] TSDB: Significantly reduced memory footprint of loaded TSDB blocks. #6418 #6461 +* [ENHANCEMENT] TSDB: Significantly optimized what we buffer during compaction which should result in lower memory footprint during compaction. #6422 #6452 #6468 #6475 +* [ENHANCEMENT] TSDB: Improve replay latency. #6230 +* [ENHANCEMENT] TSDB: WAL size is now used for size based retention calculation. #5886 +* [ENHANCEMENT] Remote read: Added query grouping and range hints to the remote read request #6401 +* [ENHANCEMENT] Remote write: Added `prometheus_remote_storage_sent_bytes_total` counter per queue. #6344 +* [ENHANCEMENT] promql: Improved PromQL parser performance. #6356 +* [ENHANCEMENT] React UI: Implemented missing pages like `/targets` #6276, TSDB status page #6281 #6267 and many other fixes and performance improvements. +* [ENHANCEMENT] promql: Prometheus now accepts spaces between time range and square bracket. e.g `[ 5m]` #6065 +* [BUGFIX] Config: Fixed alertmanager configuration to not miss targets when configurations are similar. #6455 +* [BUGFIX] Remote write: Value of `prometheus_remote_storage_shards_desired` gauge shows raw value of desired shards and it's updated correctly. #6378 +* [BUGFIX] Rules: Prometheus now fails the evaluation of rules and alerts where metric results collide with labels specified in `labels` field. #6469 +* [BUGFIX] API: Targets Metadata API `/targets/metadata` now accepts empty `match_targets` parameter as in the spec. #6303 ## 2.14.0 / 2019-11-11 -* [SECURITY/BUGFIX] UI: Ensure warnings from the API are escaped. [#6279](https://github.com/prometheus/prometheus/pull/6279) -* [FEATURE] API: `/api/v1/status/runtimeinfo` and `/api/v1/status/buildinfo` endpoints added for use by the React UI. [#6243](https://github.com/prometheus/prometheus/pull/6243) -* [FEATURE] React UI: implement the new experimental React based UI. [#5694](https://github.com/prometheus/prometheus/pull/5694) and many more +* [SECURITY/BUGFIX] UI: Ensure warnings from the API are escaped. #6279 +* [FEATURE] API: `/api/v1/status/runtimeinfo` and `/api/v1/status/buildinfo` endpoints added for use by the React UI. #6243 +* [FEATURE] React UI: implement the new experimental React based UI. #5694 and many more * Can be found by under `/new`. * Not all pages are implemented yet. -* [FEATURE] Status: Cardinality statistics added to the Runtime & Build Information page. [#6125](https://github.com/prometheus/prometheus/pull/6125) -* [ENHANCEMENT/BUGFIX] Remote write: fix delays in remote write after a compaction. [#6021](https://github.com/prometheus/prometheus/pull/6021) -* [ENHANCEMENT] UI: Alerts can be filtered by state. [#5758](https://github.com/prometheus/prometheus/pull/5758) -* [BUGFIX] API: lifecycle endpoints return 403 when not enabled. [#6057](https://github.com/prometheus/prometheus/pull/6057) -* [BUGFIX] Build: Fix Solaris build. [#6149](https://github.com/prometheus/prometheus/pull/6149) -* [BUGFIX] Promtool: Remove false duplicate rule warnings when checking rule files with alerts. [#6270](https://github.com/prometheus/prometheus/pull/6270) -* [BUGFIX] Remote write: restore use of deduplicating logger in remote write. [#6113](https://github.com/prometheus/prometheus/pull/6113) -* [BUGFIX] Remote write: do not reshard when unable to send samples. [#6111](https://github.com/prometheus/prometheus/pull/6111) -* [BUGFIX] Service discovery: errors are no longer logged on context cancellation. [#6116](https://github.com/prometheus/prometheus/pull/6116), [#6133](https://github.com/prometheus/prometheus/pull/6133) -* [BUGFIX] UI: handle null response from API properly. [#6071](https://github.com/prometheus/prometheus/pull/6071) +* [FEATURE] Status: Cardinality statistics added to the Runtime & Build Information page. #6125 +* [ENHANCEMENT/BUGFIX] Remote write: fix delays in remote write after a compaction. #6021 +* [ENHANCEMENT] UI: Alerts can be filtered by state. #5758 +* [BUGFIX] API: lifecycle endpoints return 403 when not enabled. #6057 +* [BUGFIX] Build: Fix Solaris build. #6149 +* [BUGFIX] Promtool: Remove false duplicate rule warnings when checking rule files with alerts. #6270 +* [BUGFIX] Remote write: restore use of deduplicating logger in remote write. #6113 +* [BUGFIX] Remote write: do not reshard when unable to send samples. #6111 +* [BUGFIX] Service discovery: errors are no longer logged on context cancellation. #6116, #6133 +* [BUGFIX] UI: handle null response from API properly. #6071 ## 2.13.1 / 2019-10-16 -* [BUGFIX] Fix panic in ARM builds of Prometheus. [#6110](https://github.com/prometheus/prometheus/pull/6110) -* [BUGFIX] promql: fix potential panic in the query logger. [#6094](https://github.com/prometheus/prometheus/pull/6094) -* [BUGFIX] Multiple errors of http: superfluous response.WriteHeader call in the logs. [#6145](https://github.com/prometheus/prometheus/pull/6145) +* [BUGFIX] Fix panic in ARM builds of Prometheus. #6110 +* [BUGFIX] promql: fix potential panic in the query logger. #6094 +* [BUGFIX] Multiple errors of http: superfluous response.WriteHeader call in the logs. #6145 ## 2.13.0 / 2019-10-04 -* [SECURITY/BUGFIX] UI: Fix a Stored DOM XSS vulnerability with query history [CVE-2019-10215](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-10215). [#6098](https://github.com/prometheus/prometheus/pull/6098) -* [CHANGE] Metrics: renamed prometheus_sd_configs_failed_total to prometheus_sd_failed_configs and changed to Gauge [#5254](https://github.com/prometheus/prometheus/pull/5254) -* [ENHANCEMENT] Include the tsdb tool in builds. [#6089](https://github.com/prometheus/prometheus/pull/6089) -* [ENHANCEMENT] Service discovery: add new node address types for kubernetes. [#5902](https://github.com/prometheus/prometheus/pull/5902) -* [ENHANCEMENT] UI: show warnings if query have returned some warnings. [#5964](https://github.com/prometheus/prometheus/pull/5964) -* [ENHANCEMENT] Remote write: reduce memory usage of the series cache. [#5849](https://github.com/prometheus/prometheus/pull/5849) -* [ENHANCEMENT] Remote read: use remote read streaming to reduce memory usage. [#5703](https://github.com/prometheus/prometheus/pull/5703) -* [ENHANCEMENT] Metrics: added metrics for remote write max/min/desired shards to queue manager. [#5787](https://github.com/prometheus/prometheus/pull/5787) -* [ENHANCEMENT] Promtool: show the warnings during label query. [#5924](https://github.com/prometheus/prometheus/pull/5924) -* [ENHANCEMENT] Promtool: improve error messages when parsing bad rules. [#5965](https://github.com/prometheus/prometheus/pull/5965) -* [ENHANCEMENT] Promtool: more promlint rules. [#5515](https://github.com/prometheus/prometheus/pull/5515) -* [BUGFIX] Promtool: fix recording inconsistency due to duplicate labels. [#6026](https://github.com/prometheus/prometheus/pull/6026) -* [BUGFIX] UI: fixes service-discovery view when accessed from unhealthy targets. [#5915](https://github.com/prometheus/prometheus/pull/5915) -* [BUGFIX] Metrics format: OpenMetrics parser crashes on short input. [#5939](https://github.com/prometheus/prometheus/pull/5939) -* [BUGFIX] UI: avoid truncated Y-axis values. [#6014](https://github.com/prometheus/prometheus/pull/6014) +* [SECURITY/BUGFIX] UI: Fix a Stored DOM XSS vulnerability with query history [CVE-2019-10215](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-10215). #6098 +* [CHANGE] Metrics: renamed prometheus_sd_configs_failed_total to prometheus_sd_failed_configs and changed to Gauge #5254 +* [ENHANCEMENT] Include the tsdb tool in builds. #6089 +* [ENHANCEMENT] Service discovery: add new node address types for kubernetes. #5902 +* [ENHANCEMENT] UI: show warnings if query have returned some warnings. #5964 +* [ENHANCEMENT] Remote write: reduce memory usage of the series cache. #5849 +* [ENHANCEMENT] Remote read: use remote read streaming to reduce memory usage. #5703 +* [ENHANCEMENT] Metrics: added metrics for remote write max/min/desired shards to queue manager. #5787 +* [ENHANCEMENT] Promtool: show the warnings during label query. #5924 +* [ENHANCEMENT] Promtool: improve error messages when parsing bad rules. #5965 +* [ENHANCEMENT] Promtool: more promlint rules. #5515 +* [BUGFIX] Promtool: fix recording inconsistency due to duplicate labels. #6026 +* [BUGFIX] UI: fixes service-discovery view when accessed from unhealthy targets. #5915 +* [BUGFIX] Metrics format: OpenMetrics parser crashes on short input. #5939 +* [BUGFIX] UI: avoid truncated Y-axis values. #6014 ## 2.12.0 / 2019-08-17 -* [FEATURE] Track currently active PromQL queries in a log file. [#5794](https://github.com/prometheus/prometheus/pull/5794) -* [FEATURE] Enable and provide binaries for `mips64` / `mips64le` architectures. [#5792](https://github.com/prometheus/prometheus/pull/5792) -* [ENHANCEMENT] Improve responsiveness of targets web UI and API endpoint. [#5740](https://github.com/prometheus/prometheus/pull/5740) -* [ENHANCEMENT] Improve remote write desired shards calculation. [#5763](https://github.com/prometheus/prometheus/pull/5763) -* [ENHANCEMENT] Flush TSDB pages more precisely. [tsdb#660](https://github.com/prometheus-junkyard/tsdb/pull/660) -* [ENHANCEMENT] Add `prometheus_tsdb_retention_limit_bytes` metric. [tsdb#667](https://github.com/prometheus-junkyard/tsdb/pull/667) -* [ENHANCEMENT] Add logging during TSDB WAL replay on startup. [tsdb#662](https://github.com/prometheus-junkyard/tsdb/pull/662) -* [ENHANCEMENT] Improve TSDB memory usage. [tsdb#653](https://github.com/prometheus-junkyard/tsdb/pull/653), [tsdb#643](https://github.com/prometheus-junkyard/tsdb/pull/643), [tsdb#654](https://github.com/prometheus-junkyard/tsdb/pull/654), [tsdb#642](https://github.com/prometheus-junkyard/tsdb/pull/642), [tsdb#627](https://github.com/prometheus-junkyard/tsdb/pull/627) -* [BUGFIX] Check for duplicate label names in remote read. [#5829](https://github.com/prometheus/prometheus/pull/5829) -* [BUGFIX] Mark deleted rules' series as stale on next evaluation. [#5759](https://github.com/prometheus/prometheus/pull/5759) -* [BUGFIX] Fix JavaScript error when showing warning about out-of-sync server time. [#5833](https://github.com/prometheus/prometheus/pull/5833) -* [BUGFIX] Fix `promtool test rules` panic when providing empty `exp_labels`. [#5774](https://github.com/prometheus/prometheus/pull/5774) -* [BUGFIX] Only check last directory when discovering checkpoint number. [#5756](https://github.com/prometheus/prometheus/pull/5756) -* [BUGFIX] Fix error propagation in WAL watcher helper functions. [#5741](https://github.com/prometheus/prometheus/pull/5741) -* [BUGFIX] Correctly handle empty labels from alert templates. [#5845](https://github.com/prometheus/prometheus/pull/5845) +* [FEATURE] Track currently active PromQL queries in a log file. #5794 +* [FEATURE] Enable and provide binaries for `mips64` / `mips64le` architectures. #5792 +* [ENHANCEMENT] Improve responsiveness of targets web UI and API endpoint. #5740 +* [ENHANCEMENT] Improve remote write desired shards calculation. #5763 +* [ENHANCEMENT] Flush TSDB pages more precisely. tsdb#660 +* [ENHANCEMENT] Add `prometheus_tsdb_retention_limit_bytes` metric. tsdb#667 +* [ENHANCEMENT] Add logging during TSDB WAL replay on startup. tsdb#662 +* [ENHANCEMENT] Improve TSDB memory usage. tsdb#653, tsdb#643, tsdb#654, tsdb#642, tsdb#627 +* [BUGFIX] Check for duplicate label names in remote read. #5829 +* [BUGFIX] Mark deleted rules' series as stale on next evaluation. #5759 +* [BUGFIX] Fix JavaScript error when showing warning about out-of-sync server time. #5833 +* [BUGFIX] Fix `promtool test rules` panic when providing empty `exp_labels`. #5774 +* [BUGFIX] Only check last directory when discovering checkpoint number. #5756 +* [BUGFIX] Fix error propagation in WAL watcher helper functions. #5741 +* [BUGFIX] Correctly handle empty labels from alert templates. #5845 ## 2.11.2 / 2019-08-14 -* [BUGFIX/SECURITY] Fix a Stored DOM XSS vulnerability with query history. [#5888](https://github.com/prometheus/prometheus/pull/5888) +* [BUGFIX/SECURITY] Fix a Stored DOM XSS vulnerability with query history. #5888 ## 2.11.1 / 2019-07-10 -* [BUGFIX] Fix potential panic when prometheus is watching multiple zookeeper paths. [#5749](https://github.com/prometheus/prometheus/pull/5749) +* [BUGFIX] Fix potential panic when prometheus is watching multiple zookeeper paths. #5749 ## 2.11.0 / 2019-07-09 -* [CHANGE] Remove `max_retries` from queue_config (it has been unused since rewriting remote-write to utilize the write-ahead-log). [#5649](https://github.com/prometheus/prometheus/pull/5649) -* [CHANGE] The meta file `BlockStats` no longer holds size information. This is now dynamically calculated and kept in memory. It also includes the meta file size which was not included before. [tsdb#637](https://github.com/prometheus-junkyard/tsdb/pull/637) -* [CHANGE] Renamed metric from `prometheus_tsdb_wal_reader_corruption_errors` to `prometheus_tsdb_wal_reader_corruption_errors_total`. [tsdb#622](https://github.com/prometheus-junkyard/tsdb/pull/622) -* [FEATURE] Add option to use Alertmanager API v2. [#5482](https://github.com/prometheus/prometheus/pull/5482) -* [FEATURE] Added `humanizePercentage` function for templates. [#5670](https://github.com/prometheus/prometheus/pull/5670) -* [FEATURE] Include InitContainers in Kubernetes Service Discovery. [#5598](https://github.com/prometheus/prometheus/pull/5598) -* [FEATURE] Provide option to compress WAL records using Snappy. [tsdb#609](https://github.com/prometheus-junkyard/tsdb/pull/609) -* [ENHANCEMENT] Create new clean segment when starting the WAL. [tsdb#608](https://github.com/prometheus-junkyard/tsdb/pull/608) -* [ENHANCEMENT] Reduce allocations in PromQL aggregations. [#5641](https://github.com/prometheus/prometheus/pull/5641) -* [ENHANCEMENT] Add storage warnings to LabelValues and LabelNames API results. [#5673](https://github.com/prometheus/prometheus/pull/5673) -* [ENHANCEMENT] Add `prometheus_http_requests_total` metric. [#5640](https://github.com/prometheus/prometheus/pull/5640) -* [ENHANCEMENT] Enable openbsd/arm build. [#5696](https://github.com/prometheus/prometheus/pull/5696) -* [ENHANCEMENT] Remote-write allocation improvements. [#5614](https://github.com/prometheus/prometheus/pull/5614) -* [ENHANCEMENT] Query performance improvement: Efficient iteration and search in HashForLabels and HashWithoutLabels. [#5707](https://github.com/prometheus/prometheus/pull/5707) -* [ENHANCEMENT] Allow injection of arbitrary headers in promtool. [#4389](https://github.com/prometheus/prometheus/pull/4389) -* [ENHANCEMENT] Allow passing `external_labels` in alert unit tests groups. [#5608](https://github.com/prometheus/prometheus/pull/5608) -* [ENHANCEMENT] Allows globs for rules when unit testing. [#5595](https://github.com/prometheus/prometheus/pull/5595) -* [ENHANCEMENT] Improved postings intersection matching. [tsdb#616](https://github.com/prometheus-junkyard/tsdb/pull/616) -* [ENHANCEMENT] Reduced disk usage for WAL for small setups. [tsdb#605](https://github.com/prometheus-junkyard/tsdb/pull/605) -* [ENHANCEMENT] Optimize queries using regexp for set lookups. [tsdb#602](https://github.com/prometheus-junkyard/tsdb/pull/602) -* [BUGFIX] resolve race condition in maxGauge. [#5647](https://github.com/prometheus/prometheus/pull/5647) -* [BUGFIX] Fix ZooKeeper connection leak. [#5675](https://github.com/prometheus/prometheus/pull/5675) -* [BUGFIX] Improved atomicity of .tmp block replacement during compaction for usual case. [tsdb#636](https://github.com/prometheus-junkyard/tsdb/pull/636) -* [BUGFIX] Fix "unknown series references" after clean shutdown. [tsdb#623](https://github.com/prometheus-junkyard/tsdb/pull/623) -* [BUGFIX] Re-calculate block size when calling `block.Delete`. [tsdb#637](https://github.com/prometheus-junkyard/tsdb/pull/637) -* [BUGFIX] Fix unsafe snapshots with head block. [tsdb#641](https://github.com/prometheus-junkyard/tsdb/pull/641) -* [BUGFIX] `prometheus_tsdb_compactions_failed_total` is now incremented on any compaction failure. [tsdb#613](https://github.com/prometheus-junkyard/tsdb/pull/613) +* [CHANGE] Remove `max_retries` from queue_config (it has been unused since rewriting remote-write to utilize the write-ahead-log). #5649 +* [CHANGE] The meta file `BlockStats` no longer holds size information. This is now dynamically calculated and kept in memory. It also includes the meta file size which was not included before. tsdb#637 +* [CHANGE] Renamed metric from `prometheus_tsdb_wal_reader_corruption_errors` to `prometheus_tsdb_wal_reader_corruption_errors_total`. tsdb#622 +* [FEATURE] Add option to use Alertmanager API v2. #5482 +* [FEATURE] Added `humanizePercentage` function for templates. #5670 +* [FEATURE] Include InitContainers in Kubernetes Service Discovery. #5598 +* [FEATURE] Provide option to compress WAL records using Snappy. [#609](https://github.com/prometheus/tsdb/pull/609) +* [ENHANCEMENT] Create new clean segment when starting the WAL. tsdb#608 +* [ENHANCEMENT] Reduce allocations in PromQL aggregations. #5641 +* [ENHANCEMENT] Add storage warnings to LabelValues and LabelNames API results. #5673 +* [ENHANCEMENT] Add `prometheus_http_requests_total` metric. #5640 +* [ENHANCEMENT] Enable openbsd/arm build. #5696 +* [ENHANCEMENT] Remote-write allocation improvements. #5614 +* [ENHANCEMENT] Query performance improvement: Efficient iteration and search in HashForLabels and HashWithoutLabels. #5707 +* [ENHANCEMENT] Allow injection of arbitrary headers in promtool. #4389 +* [ENHANCEMENT] Allow passing `external_labels` in alert unit tests groups. #5608 +* [ENHANCEMENT] Allows globs for rules when unit testing. #5595 +* [ENHANCEMENT] Improved postings intersection matching. tsdb#616 +* [ENHANCEMENT] Reduced disk usage for WAL for small setups. tsdb#605 +* [ENHANCEMENT] Optimize queries using regexp for set lookups. tsdb#602 +* [BUGFIX] resolve race condition in maxGauge. #5647 +* [BUGFIX] Fix ZooKeeper connection leak. #5675 +* [BUGFIX] Improved atomicity of .tmp block replacement during compaction for usual case. tsdb#636 +* [BUGFIX] Fix "unknown series references" after clean shutdown. tsdb#623 +* [BUGFIX] Re-calculate block size when calling `block.Delete`. tsdb#637 +* [BUGFIX] Fix unsafe snapshots with head block. tsdb#641 +* [BUGFIX] `prometheus_tsdb_compactions_failed_total` is now incremented on any compaction failure. tsdb#613 ## 2.10.0 / 2019-05-25 -* [CHANGE/BUGFIX] API: Encode alert values as string to correctly represent Inf/NaN. [#5582](https://github.com/prometheus/prometheus/pull/5582) -* [FEATURE] Template expansion: Make external labels available as `$externalLabels` in alert and console template expansion. [#5463](https://github.com/prometheus/prometheus/pull/5463) -* [FEATURE] TSDB: Add `prometheus_tsdb_wal_segment_current` metric for the WAL segment index that TSDB is currently writing to. [tsdb#601](https://github.com/prometheus-junkyard/tsdb/pull/601) -* [FEATURE] Scrape: Add `scrape_series_added` per-scrape metric. [#5546](https://github.com/prometheus/prometheus/pull/5546) -* [ENHANCEMENT] Discovery/kubernetes: Add labels `__meta_kubernetes_endpoint_node_name` and `__meta_kubernetes_endpoint_hostname`. [#5571](https://github.com/prometheus/prometheus/pull/5571) -* [ENHANCEMENT] Discovery/azure: Add label `__meta_azure_machine_public_ip`. [#5475](https://github.com/prometheus/prometheus/pull/5475) -* [ENHANCEMENT] TSDB: Simplify mergedPostings.Seek, resulting in better performance if there are many posting lists. [tsdb#595](https://github.com/prometheus-junkyard/tsdb/pull/595) -* [ENHANCEMENT] Log filesystem type on startup. [#5558](https://github.com/prometheus/prometheus/pull/5558) +* [CHANGE/BUGFIX] API: Encode alert values as string to correctly represent Inf/NaN. #5582 +* [FEATURE] Template expansion: Make external labels available as `$externalLabels` in alert and console template expansion. #5463 +* [FEATURE] TSDB: Add `prometheus_tsdb_wal_segment_current` metric for the WAL segment index that TSDB is currently writing to. tsdb#601 +* [FEATURE] Scrape: Add `scrape_series_added` per-scrape metric. #5546 +* [ENHANCEMENT] Discovery/kubernetes: Add labels `__meta_kubernetes_endpoint_node_name` and `__meta_kubernetes_endpoint_hostname`. #5571 +* [ENHANCEMENT] Discovery/azure: Add label `__meta_azure_machine_public_ip`. #5475 +* [ENHANCEMENT] TSDB: Simplify mergedPostings.Seek, resulting in better performance if there are many posting lists. tsdb#595 +* [ENHANCEMENT] Log filesystem type on startup. #5558 * [ENHANCEMENT] Cmd/promtool: Use POST requests for Query and QueryRange. client_golang#557 -* [ENHANCEMENT] Web: Sort alerts by group name. [#5448](https://github.com/prometheus/prometheus/pull/5448) -* [ENHANCEMENT] Console templates: Add convenience variables `$rawParams`, `$params`, `$path`. [#5463](https://github.com/prometheus/prometheus/pull/5463) -* [BUGFIX] TSDB: Don't panic when running out of disk space and recover nicely from the condition. [tsdb#582](https://github.com/prometheus-junkyard/tsdb/pull/582) -* [BUGFIX] TSDB: Correctly handle empty labels. [tsdb#594](https://github.com/prometheus-junkyard/tsdb/pull/594) -* [BUGFIX] TSDB: Don't crash on an unknown tombstone reference. [tsdb#604](https://github.com/prometheus-junkyard/tsdb/pull/604) -* [BUGFIX] Storage/remote: Remove queue-manager specific metrics if queue no longer exists. [#5445](https://github.com/prometheus/prometheus/pull/5445) [#5485](https://github.com/prometheus/prometheus/pull/5485) [#5555](https://github.com/prometheus/prometheus/pull/5555) -* [BUGFIX] PromQL: Correctly display `{__name__="a"}`. [#5552](https://github.com/prometheus/prometheus/pull/5552) -* [BUGFIX] Discovery/kubernetes: Use `service` rather than `ingress` as the name for the service workqueue. [#5520](https://github.com/prometheus/prometheus/pull/5520) -* [BUGFIX] Discovery/azure: Don't panic on a VM with a public IP. [#5587](https://github.com/prometheus/prometheus/pull/5587) -* [BUGFIX] Discovery/triton: Always read HTTP body to completion. [#5596](https://github.com/prometheus/prometheus/pull/5596) -* [BUGFIX] Web: Fixed Content-Type for js and css instead of using `/etc/mime.types`. [#5551](https://github.com/prometheus/prometheus/pull/5551) +* [ENHANCEMENT] Web: Sort alerts by group name. #5448 +* [ENHANCEMENT] Console templates: Add convenience variables `$rawParams`, `$params`, `$path`. #5463 +* [BUGFIX] TSDB: Don't panic when running out of disk space and recover nicely from the condition. tsdb#582 +* [BUGFIX] TSDB: Correctly handle empty labels. tsdb#594 +* [BUGFIX] TSDB: Don't crash on an unknown tombstone reference. tsdb#604 +* [BUGFIX] Storage/remote: Remove queue-manager specific metrics if queue no longer exists. #5445 #5485 #5555 +* [BUGFIX] PromQL: Correctly display `{__name__="a"}`. #5552 +* [BUGFIX] Discovery/kubernetes: Use `service` rather than `ingress` as the name for the service workqueue. #5520 +* [BUGFIX] Discovery/azure: Don't panic on a VM with a public IP. #5587 +* [BUGFIX] Discovery/triton: Always read HTTP body to completion. #5596 +* [BUGFIX] Web: Fixed Content-Type for js and css instead of using `/etc/mime.types`. #5551 ## 2.9.2 / 2019-04-24 -* [BUGFIX] Make sure subquery range is taken into account for selection [#5467](https://github.com/prometheus/prometheus/pull/5467) -* [BUGFIX] Exhaust every request body before closing it [#5166](https://github.com/prometheus/prometheus/pull/5166) -* [BUGFIX] Cmd/promtool: return errors from rule evaluations [#5483](https://github.com/prometheus/prometheus/pull/5483) -* [BUGFIX] Remote Storage: string interner should not panic in release [#5487](https://github.com/prometheus/prometheus/pull/5487) -* [BUGFIX] Fix memory allocation regression in mergedPostings.Seek [tsdb#586](https://github.com/prometheus-junkyard/tsdb/pull/586) +* [BUGFIX] Make sure subquery range is taken into account for selection #5467 +* [BUGFIX] Exhaust every request body before closing it #5166 +* [BUGFIX] Cmd/promtool: return errors from rule evaluations #5483 +* [BUGFIX] Remote Storage: string interner should not panic in release #5487 +* [BUGFIX] Fix memory allocation regression in mergedPostings.Seek tsdb#586 ## 2.9.1 / 2019-04-16 -* [BUGFIX] Discovery/kubernetes: fix missing label sanitization [#5462](https://github.com/prometheus/prometheus/pull/5462) -* [BUGFIX] Remote_write: Prevent reshard concurrent with calling stop [#5460](https://github.com/prometheus/prometheus/pull/5460) +* [BUGFIX] Discovery/kubernetes: fix missing label sanitization #5462 +* [BUGFIX] Remote_write: Prevent reshard concurrent with calling stop #5460 ## 2.9.0 / 2019-04-15 @@ -510,249 +510,249 @@ This releases uses Go 1.12, which includes a change in how memory is released to Linux. This will cause RSS to be reported as higher, however this is harmless and the memory is available to the kernel when it needs it. -* [CHANGE/ENHANCEMENT] Update Consul to support catalog.ServiceMultipleTags. [#5151](https://github.com/prometheus/prometheus/pull/5151) -* [FEATURE] Add honor_timestamps scrape option. [#5304](https://github.com/prometheus/prometheus/pull/5304) -* [ENHANCEMENT] Discovery/kubernetes: add present labels for labels/annotations. [#5443](https://github.com/prometheus/prometheus/pull/5443) -* [ENHANCEMENT] OpenStack SD: Add ProjectID and UserID meta labels. [#5431](https://github.com/prometheus/prometheus/pull/5431) -* [ENHANCEMENT] Add GODEBUG and retention to the runtime page. [#5324](https://github.com/prometheus/prometheus/pull/5324) [#5322](https://github.com/prometheus/prometheus/pull/5322) -* [ENHANCEMENT] Add support for POSTing to /series endpoint. [#5422](https://github.com/prometheus/prometheus/pull/5422) -* [ENHANCEMENT] Support PUT methods for Lifecycle and Admin APIs. [#5376](https://github.com/prometheus/prometheus/pull/5376) -* [ENHANCEMENT] Scrape: Add global jitter for HA server. [#5181](https://github.com/prometheus/prometheus/pull/5181) -* [ENHANCEMENT] Check for cancellation on every step of a range evaluation. [#5131](https://github.com/prometheus/prometheus/pull/5131) -* [ENHANCEMENT] String interning for labels & values in the remote_write path. [#5316](https://github.com/prometheus/prometheus/pull/5316) -* [ENHANCEMENT] Don't lose the scrape cache on a failed scrape. [#5414](https://github.com/prometheus/prometheus/pull/5414) -* [ENHANCEMENT] Reload cert files from disk automatically. [common#173](https://github.com/prometheus/common/pull/173) -* [ENHANCEMENT] Use fixed length millisecond timestamp format for logs. [common#172](https://github.com/prometheus/common/pull/172) -* [ENHANCEMENT] Performance improvements for postings. [tsdb#509](https://github.com/prometheus-junkyard/tsdb/pull/509) [tsdb#572](https://github.com/prometheus-junkyard/tsdb/pull/572) -* [BUGFIX] Remote Write: fix checkpoint reading. [#5429](https://github.com/prometheus/prometheus/pull/5429) -* [BUGFIX] Check if label value is valid when unmarshaling external labels from YAML. [#5316](https://github.com/prometheus/prometheus/pull/5316) -* [BUGFIX] Promparse: sort all labels when parsing. [#5372](https://github.com/prometheus/prometheus/pull/5372) -* [BUGFIX] Reload rules: copy state on both name and labels. [#5368](https://github.com/prometheus/prometheus/pull/5368) -* [BUGFIX] Exponentiation operator to drop metric name in result of operation. [#5329](https://github.com/prometheus/prometheus/pull/5329) -* [BUGFIX] Config: resolve more file paths. [#5284](https://github.com/prometheus/prometheus/pull/5284) -* [BUGFIX] Promtool: resolve relative paths in alert test files. [#5336](https://github.com/prometheus/prometheus/pull/5336) -* [BUGFIX] Set TLSHandshakeTimeout in HTTP transport. [common#179](https://github.com/prometheus/common/pull/179) -* [BUGFIX] Use fsync to be more resilient to machine crashes. [tsdb#573](https://github.com/prometheus-junkyard/tsdb/pull/573) [tsdb#578](https://github.com/prometheus-junkyard/tsdb/pull/578) -* [BUGFIX] Keep series that are still in WAL in checkpoints. [tsdb#577](https://github.com/prometheus-junkyard/tsdb/pull/577) -* [BUGFIX] Fix output sample values for scalar-to-vector comparison operations. [#5454](https://github.com/prometheus/prometheus/pull/5454) +* [CHANGE/ENHANCEMENT] Update Consul to support catalog.ServiceMultipleTags. #5151 +* [FEATURE] Add honor_timestamps scrape option. #5304 +* [ENHANCEMENT] Discovery/kubernetes: add present labels for labels/annotations. #5443 +* [ENHANCEMENT] OpenStack SD: Add ProjectID and UserID meta labels. #5431 +* [ENHANCEMENT] Add GODEBUG and retention to the runtime page. #5324 #5322 +* [ENHANCEMENT] Add support for POSTing to /series endpoint. #5422 +* [ENHANCEMENT] Support PUT methods for Lifecycle and Admin APIs. #5376 +* [ENHANCEMENT] Scrape: Add global jitter for HA server. #5181 +* [ENHANCEMENT] Check for cancellation on every step of a range evaluation. #5131 +* [ENHANCEMENT] String interning for labels & values in the remote_write path. #5316 +* [ENHANCEMENT] Don't lose the scrape cache on a failed scrape. #5414 +* [ENHANCEMENT] Reload cert files from disk automatically. common#173 +* [ENHANCEMENT] Use fixed length millisecond timestamp format for logs. common#172 +* [ENHANCEMENT] Performance improvements for postings. tsdb#509 tsdb#572 +* [BUGFIX] Remote Write: fix checkpoint reading. #5429 +* [BUGFIX] Check if label value is valid when unmarshaling external labels from YAML. #5316 +* [BUGFIX] Promparse: sort all labels when parsing. #5372 +* [BUGFIX] Reload rules: copy state on both name and labels. #5368 +* [BUGFIX] Exponentiation operator to drop metric name in result of operation. #5329 +* [BUGFIX] Config: resolve more file paths. #5284 +* [BUGFIX] Promtool: resolve relative paths in alert test files. #5336 +* [BUGFIX] Set TLSHandshakeTimeout in HTTP transport. common#179 +* [BUGFIX] Use fsync to be more resilient to machine crashes. tsdb#573 tsdb#578 +* [BUGFIX] Keep series that are still in WAL in checkpoints. tsdb#577 +* [BUGFIX] Fix output sample values for scalar-to-vector comparison operations. #5454 ## 2.8.1 / 2019-03-28 -* [BUGFIX] Display the job labels in `/targets` which was removed accidentally. [#5406](https://github.com/prometheus/prometheus/pull/5406) +* [BUGFIX] Display the job labels in `/targets` which was removed accidentally. #5406 ## 2.8.0 / 2019-03-12 This release uses Write-Ahead Logging (WAL) for the remote_write API. This currently causes a slight increase in memory usage, which will be addressed in future releases. -* [CHANGE] Default time retention is used only when no size based retention is specified. These are flags where time retention is specified by the flag `--storage.tsdb.retention` and size retention by `--storage.tsdb.retention.size`. [#5216](https://github.com/prometheus/prometheus/pull/5216) -* [CHANGE] `prometheus_tsdb_storage_blocks_bytes_total` is now `prometheus_tsdb_storage_blocks_bytes`. [tsdb#506](https://github.com/prometheus-junkyard/tsdb/pull/506) -* [FEATURE] [EXPERIMENTAL] Time overlapping blocks are now allowed; vertical compaction and vertical query merge. It is an optional feature which is controlled by the `--storage.tsdb.allow-overlapping-blocks` flag, disabled by default. [tsdb#370](https://github.com/prometheus-junkyard/tsdb/pull/370) -* [ENHANCEMENT] Use the WAL for remote_write API. [#4588](https://github.com/prometheus/prometheus/pull/4588) -* [ENHANCEMENT] Query performance improvements. [tsdb#531](https://github.com/prometheus-junkyard/tsdb/pull/531) -* [ENHANCEMENT] UI enhancements with upgrade to Bootstrap 4. [#5226](https://github.com/prometheus/prometheus/pull/5226) -* [ENHANCEMENT] Reduce time that Alertmanagers are in flux when reloaded. [#5126](https://github.com/prometheus/prometheus/pull/5126) -* [ENHANCEMENT] Limit number of metrics displayed on UI to 10000. [#5139](https://github.com/prometheus/prometheus/pull/5139) -* [ENHANCEMENT] (1) Remember All/Unhealthy choice on target-overview when reloading page. (2) Resize text-input area on Graph page on mouseclick. [#5201](https://github.com/prometheus/prometheus/pull/5201) -* [ENHANCEMENT] In `histogram_quantile` merge buckets with equivalent le values. [#5158](https://github.com/prometheus/prometheus/pull/5158). -* [ENHANCEMENT] Show list of offending labels in the error message in many-to-many scenarios. [#5189](https://github.com/prometheus/prometheus/pull/5189) -* [ENHANCEMENT] Show `Storage Retention` criteria in effect on `/status` page. [#5322](https://github.com/prometheus/prometheus/pull/5322) -* [BUGFIX] Fix sorting of rule groups. [#5260](https://github.com/prometheus/prometheus/pull/5260) -* [BUGFIX] Fix support for password_file and bearer_token_file in Kubernetes SD. [#5211](https://github.com/prometheus/prometheus/pull/5211) -* [BUGFIX] Scrape: catch errors when creating HTTP clients [#5182](https://github.com/prometheus/prometheus/pull/5182). Adds new metrics: +* [CHANGE] Default time retention is used only when no size based retention is specified. These are flags where time retention is specified by the flag `--storage.tsdb.retention` and size retention by `--storage.tsdb.retention.size`. #5216 +* [CHANGE] `prometheus_tsdb_storage_blocks_bytes_total` is now `prometheus_tsdb_storage_blocks_bytes`. prometheus/tsdb#506 +* [FEATURE] [EXPERIMENTAL] Time overlapping blocks are now allowed; vertical compaction and vertical query merge. It is an optional feature which is controlled by the `--storage.tsdb.allow-overlapping-blocks` flag, disabled by default. prometheus/tsdb#370 +* [ENHANCEMENT] Use the WAL for remote_write API. #4588 +* [ENHANCEMENT] Query performance improvements. prometheus/tsdb#531 +* [ENHANCEMENT] UI enhancements with upgrade to Bootstrap 4. #5226 +* [ENHANCEMENT] Reduce time that Alertmanagers are in flux when reloaded. #5126 +* [ENHANCEMENT] Limit number of metrics displayed on UI to 10000. #5139 +* [ENHANCEMENT] (1) Remember All/Unhealthy choice on target-overview when reloading page. (2) Resize text-input area on Graph page on mouseclick. #5201 +* [ENHANCEMENT] In `histogram_quantile` merge buckets with equivalent le values. #5158. +* [ENHANCEMENT] Show list of offending labels in the error message in many-to-many scenarios. #5189 +* [ENHANCEMENT] Show `Storage Retention` criteria in effect on `/status` page. #5322 +* [BUGFIX] Fix sorting of rule groups. #5260 +* [BUGFIX] Fix support for password_file and bearer_token_file in Kubernetes SD. #5211 +* [BUGFIX] Scrape: catch errors when creating HTTP clients #5182. Adds new metrics: * `prometheus_target_scrape_pools_total` * `prometheus_target_scrape_pools_failed_total` * `prometheus_target_scrape_pool_reloads_total` * `prometheus_target_scrape_pool_reloads_failed_total` -* [BUGFIX] Fix panic when aggregator param is not a literal. [#5290](https://github.com/prometheus/prometheus/pull/5290) +* [BUGFIX] Fix panic when aggregator param is not a literal. #5290 ## 2.7.2 / 2019-03-02 -* [BUGFIX] `prometheus_rule_group_last_evaluation_timestamp_seconds` is now a unix timestamp. [#5186](https://github.com/prometheus/prometheus/pull/5186) +* [BUGFIX] `prometheus_rule_group_last_evaluation_timestamp_seconds` is now a unix timestamp. #5186 ## 2.7.1 / 2019-01-31 This release has a fix for a Stored DOM XSS vulnerability that can be triggered when using the query history functionality. Thanks to Dor Tumarkin from Checkmarx for reporting it. -* [BUGFIX/SECURITY] Fix a Stored DOM XSS vulnerability with query history. [#5163](https://github.com/prometheus/prometheus/pull/5163) -* [BUGFIX] `prometheus_rule_group_last_duration_seconds` now reports seconds instead of nanoseconds. [#5153](https://github.com/prometheus/prometheus/pull/5153) -* [BUGFIX] Make sure the targets are consistently sorted in the targets page. [#5161](https://github.com/prometheus/prometheus/pull/5161) +* [BUGFIX/SECURITY] Fix a Stored DOM XSS vulnerability with query history. #5163 +* [BUGFIX] `prometheus_rule_group_last_duration_seconds` now reports seconds instead of nanoseconds. #5153 +* [BUGFIX] Make sure the targets are consistently sorted in the targets page. #5161 ## 2.7.0 / 2019-01-28 We're rolling back the Dockerfile changes introduced in 2.6.0. If you made changes to your docker deployment in 2.6.0, you will need to roll them back. This release also adds experimental support for disk size based retention. To accommodate that we are deprecating the flag `storage.tsdb.retention` in favour of `storage.tsdb.retention.time`. We print a warning if the flag is in use, but it will function without breaking until Prometheus 3.0. -* [CHANGE] Rollback Dockerfile to version at 2.5.0. Rollback of the breaking change introduced in 2.6.0. [#5122](https://github.com/prometheus/prometheus/pull/5122) -* [FEATURE] Add subqueries to PromQL. [#4831](https://github.com/prometheus/prometheus/pull/4831) -* [FEATURE] [EXPERIMENTAL] Add support for disk size based retention. Note that we don't consider the WAL size which could be significant and the time based retention policy also applies. [#5109](https://github.com/prometheus/prometheus/pull/5109) [tsdb#343](https://github.com/prometheus-junkyard/tsdb/pull/343) -* [FEATURE] Add CORS origin flag. [#5011](https://github.com/prometheus/prometheus/pull/5011) -* [ENHANCEMENT] Consul SD: Add tagged address to the discovery metadata. [#5001](https://github.com/prometheus/prometheus/pull/5001) -* [ENHANCEMENT] Kubernetes SD: Add service external IP and external name to the discovery metadata. [#4940](https://github.com/prometheus/prometheus/pull/4940) -* [ENHANCEMENT] Azure SD: Add support for Managed Identity authentication. [#4590](https://github.com/prometheus/prometheus/pull/4590) -* [ENHANCEMENT] Azure SD: Add tenant and subscription IDs to the discovery metadata. [#4969](https://github.com/prometheus/prometheus/pull/4969) -* [ENHANCEMENT] OpenStack SD: Add support for application credentials based authentication. [#4968](https://github.com/prometheus/prometheus/pull/4968) -* [ENHANCEMENT] Add metric for number of rule groups loaded. [#5090](https://github.com/prometheus/prometheus/pull/5090) -* [BUGFIX] Avoid duplicate tests for alert unit tests. [#4964](https://github.com/prometheus/prometheus/pull/4964) -* [BUGFIX] Don't depend on given order when comparing samples in alert unit testing. [#5049](https://github.com/prometheus/prometheus/pull/5049) -* [BUGFIX] Make sure the retention period doesn't overflow. [#5112](https://github.com/prometheus/prometheus/pull/5112) -* [BUGFIX] Make sure the blocks don't get very large. [#5112](https://github.com/prometheus/prometheus/pull/5112) -* [BUGFIX] Don't generate blocks with no samples. [tsdb#374](https://github.com/prometheus-junkyard/tsdb/pull/374) -* [BUGFIX] Reintroduce metric for WAL corruptions. [tsdb#473](https://github.com/prometheus-junkyard/tsdb/pull/473) +* [CHANGE] Rollback Dockerfile to version at 2.5.0. Rollback of the breaking change introduced in 2.6.0. #5122 +* [FEATURE] Add subqueries to PromQL. #4831 +* [FEATURE] [EXPERIMENTAL] Add support for disk size based retention. Note that we don't consider the WAL size which could be significant and the time based retention policy also applies. #5109 prometheus/tsdb#343 +* [FEATURE] Add CORS origin flag. #5011 +* [ENHANCEMENT] Consul SD: Add tagged address to the discovery metadata. #5001 +* [ENHANCEMENT] Kubernetes SD: Add service external IP and external name to the discovery metadata. #4940 +* [ENHANCEMENT] Azure SD: Add support for Managed Identity authentication. #4590 +* [ENHANCEMENT] Azure SD: Add tenant and subscription IDs to the discovery metadata. #4969 +* [ENHANCEMENT] OpenStack SD: Add support for application credentials based authentication. #4968 +* [ENHANCEMENT] Add metric for number of rule groups loaded. #5090 +* [BUGFIX] Avoid duplicate tests for alert unit tests. #4964 +* [BUGFIX] Don't depend on given order when comparing samples in alert unit testing. #5049 +* [BUGFIX] Make sure the retention period doesn't overflow. #5112 +* [BUGFIX] Make sure the blocks don't get very large. #5112 +* [BUGFIX] Don't generate blocks with no samples. prometheus/tsdb#374 +* [BUGFIX] Reintroduce metric for WAL corruptions. prometheus/tsdb#473 ## 2.6.1 / 2019-01-15 -* [BUGFIX] Azure SD: Fix discovery getting stuck sometimes. [#5088](https://github.com/prometheus/prometheus/pull/5088) -* [BUGFIX] Marathon SD: Use `Tasks.Ports` when `RequirePorts` is `false`. [#5026](https://github.com/prometheus/prometheus/pull/5026) -* [BUGFIX] Promtool: Fix "out-of-order sample" errors when testing rules. [#5069](https://github.com/prometheus/prometheus/pull/5069) +* [BUGFIX] Azure SD: Fix discovery getting stuck sometimes. #5088 +* [BUGFIX] Marathon SD: Use `Tasks.Ports` when `RequirePorts` is `false`. #5026 +* [BUGFIX] Promtool: Fix "out-of-order sample" errors when testing rules. #5069 ## 2.6.0 / 2018-12-17 -* [CHANGE] Remove default flags from the container's entrypoint, run Prometheus from `/etc/prometheus` and symlink the storage directory to `/etc/prometheus/data`. [#4976](https://github.com/prometheus/prometheus/pull/4976) -* [CHANGE] Promtool: Remove the `update` command. [#3839](https://github.com/prometheus/prometheus/pull/3839) -* [FEATURE] Add JSON log format via the `--log.format` flag. [#4876](https://github.com/prometheus/prometheus/pull/4876) -* [FEATURE] API: Add /api/v1/labels endpoint to get all label names. [#4835](https://github.com/prometheus/prometheus/pull/4835) -* [FEATURE] Web: Allow setting the page's title via the `--web.ui-title` flag. [#4841](https://github.com/prometheus/prometheus/pull/4841) -* [ENHANCEMENT] Add `prometheus_tsdb_lowest_timestamp_seconds`, `prometheus_tsdb_head_min_time_seconds` and `prometheus_tsdb_head_max_time_seconds` metrics. [#4888](https://github.com/prometheus/prometheus/pull/4888) -* [ENHANCEMENT] Add `rule_group_last_evaluation_timestamp_seconds` metric. [#4852](https://github.com/prometheus/prometheus/pull/4852) -* [ENHANCEMENT] Add `prometheus_template_text_expansion_failures_total` and `prometheus_template_text_expansions_total` metrics. [#4747](https://github.com/prometheus/prometheus/pull/4747) -* [ENHANCEMENT] Set consistent User-Agent header in outgoing requests. [#4891](https://github.com/prometheus/prometheus/pull/4891) -* [ENHANCEMENT] Azure SD: Error out at load time when authentication parameters are missing. [#4907](https://github.com/prometheus/prometheus/pull/4907) -* [ENHANCEMENT] EC2 SD: Add the machine's private DNS name to the discovery metadata. [#4693](https://github.com/prometheus/prometheus/pull/4693) -* [ENHANCEMENT] EC2 SD: Add the operating system's platform to the discovery metadata. [#4663](https://github.com/prometheus/prometheus/pull/4663) -* [ENHANCEMENT] Kubernetes SD: Add the pod's phase to the discovery metadata. [#4824](https://github.com/prometheus/prometheus/pull/4824) -* [ENHANCEMENT] Kubernetes SD: Log Kubernetes messages. [#4931](https://github.com/prometheus/prometheus/pull/4931) -* [ENHANCEMENT] Promtool: Collect CPU and trace profiles. [#4897](https://github.com/prometheus/prometheus/pull/4897) -* [ENHANCEMENT] Promtool: Support writing output as JSON. [#4848](https://github.com/prometheus/prometheus/pull/4848) -* [ENHANCEMENT] Remote Read: Return available data if remote read fails partially. [#4832](https://github.com/prometheus/prometheus/pull/4832) -* [ENHANCEMENT] Remote Write: Improve queue performance. [#4772](https://github.com/prometheus/prometheus/pull/4772) -* [ENHANCEMENT] Remote Write: Add min_shards parameter to set the minimum number of shards. [#4924](https://github.com/prometheus/prometheus/pull/4924) -* [ENHANCEMENT] TSDB: Improve WAL reading. [#4953](https://github.com/prometheus/prometheus/pull/4953) -* [ENHANCEMENT] TSDB: Memory improvements. [#4953](https://github.com/prometheus/prometheus/pull/4953) -* [ENHANCEMENT] Web: Log stack traces on panic. [#4221](https://github.com/prometheus/prometheus/pull/4221) -* [ENHANCEMENT] Web UI: Add copy to clipboard button for configuration. [#4410](https://github.com/prometheus/prometheus/pull/4410) -* [ENHANCEMENT] Web UI: Support console queries at specific times. [#4764](https://github.com/prometheus/prometheus/pull/4764) -* [ENHANCEMENT] Web UI: group targets by job then instance. [#4898](https://github.com/prometheus/prometheus/pull/4898) [#4806](https://github.com/prometheus/prometheus/pull/4806) -* [BUGFIX] Deduplicate handler labels for HTTP metrics. [#4732](https://github.com/prometheus/prometheus/pull/4732) -* [BUGFIX] Fix leaked queriers causing shutdowns to hang. [#4922](https://github.com/prometheus/prometheus/pull/4922) -* [BUGFIX] Fix configuration loading panics on nil pointer slice elements. [#4942](https://github.com/prometheus/prometheus/pull/4942) -* [BUGFIX] API: Correctly skip mismatching targets on /api/v1/targets/metadata. [#4905](https://github.com/prometheus/prometheus/pull/4905) -* [BUGFIX] API: Better rounding for incoming query timestamps. [#4941](https://github.com/prometheus/prometheus/pull/4941) -* [BUGFIX] Azure SD: Fix panic. [#4867](https://github.com/prometheus/prometheus/pull/4867) -* [BUGFIX] Console templates: Fix hover when the metric has a null value. [#4906](https://github.com/prometheus/prometheus/pull/4906) -* [BUGFIX] Discovery: Remove all targets when the scrape configuration gets empty. [#4819](https://github.com/prometheus/prometheus/pull/4819) -* [BUGFIX] Marathon SD: Fix leaked connections. [#4915](https://github.com/prometheus/prometheus/pull/4915) -* [BUGFIX] Marathon SD: Use 'hostPort' member of portMapping to construct target endpoints. [#4887](https://github.com/prometheus/prometheus/pull/4887) -* [BUGFIX] PromQL: Fix a goroutine leak in the lexer/parser. [#4858](https://github.com/prometheus/prometheus/pull/4858) -* [BUGFIX] Scrape: Pass through content-type for non-compressed output. [#4912](https://github.com/prometheus/prometheus/pull/4912) -* [BUGFIX] Scrape: Fix deadlock in the scrape's manager. [#4894](https://github.com/prometheus/prometheus/pull/4894) -* [BUGFIX] Scrape: Scrape targets at fixed intervals even after Prometheus restarts. [#4926](https://github.com/prometheus/prometheus/pull/4926) -* [BUGFIX] TSDB: Support restored snapshots including the head properly. [#4953](https://github.com/prometheus/prometheus/pull/4953) -* [BUGFIX] TSDB: Repair WAL when the last record in a segment is torn. [#4953](https://github.com/prometheus/prometheus/pull/4953) -* [BUGFIX] TSDB: Fix unclosed file readers on Windows systems. [#4997](https://github.com/prometheus/prometheus/pull/4997) -* [BUGFIX] Web: Avoid proxy to connect to the local gRPC server. [#4572](https://github.com/prometheus/prometheus/pull/4572) +* [CHANGE] Remove default flags from the container's entrypoint, run Prometheus from `/etc/prometheus` and symlink the storage directory to `/etc/prometheus/data`. #4976 +* [CHANGE] Promtool: Remove the `update` command. #3839 +* [FEATURE] Add JSON log format via the `--log.format` flag. #4876 +* [FEATURE] API: Add /api/v1/labels endpoint to get all label names. #4835 +* [FEATURE] Web: Allow setting the page's title via the `--web.ui-title` flag. #4841 +* [ENHANCEMENT] Add `prometheus_tsdb_lowest_timestamp_seconds`, `prometheus_tsdb_head_min_time_seconds` and `prometheus_tsdb_head_max_time_seconds` metrics. #4888 +* [ENHANCEMENT] Add `rule_group_last_evaluation_timestamp_seconds` metric. #4852 +* [ENHANCEMENT] Add `prometheus_template_text_expansion_failures_total` and `prometheus_template_text_expansions_total` metrics. #4747 +* [ENHANCEMENT] Set consistent User-Agent header in outgoing requests. #4891 +* [ENHANCEMENT] Azure SD: Error out at load time when authentication parameters are missing. #4907 +* [ENHANCEMENT] EC2 SD: Add the machine's private DNS name to the discovery metadata. #4693 +* [ENHANCEMENT] EC2 SD: Add the operating system's platform to the discovery metadata. #4663 +* [ENHANCEMENT] Kubernetes SD: Add the pod's phase to the discovery metadata. #4824 +* [ENHANCEMENT] Kubernetes SD: Log Kubernetes messages. #4931 +* [ENHANCEMENT] Promtool: Collect CPU and trace profiles. #4897 +* [ENHANCEMENT] Promtool: Support writing output as JSON. #4848 +* [ENHANCEMENT] Remote Read: Return available data if remote read fails partially. #4832 +* [ENHANCEMENT] Remote Write: Improve queue performance. #4772 +* [ENHANCEMENT] Remote Write: Add min_shards parameter to set the minimum number of shards. #4924 +* [ENHANCEMENT] TSDB: Improve WAL reading. #4953 +* [ENHANCEMENT] TSDB: Memory improvements. #4953 +* [ENHANCEMENT] Web: Log stack traces on panic. #4221 +* [ENHANCEMENT] Web UI: Add copy to clipboard button for configuration. #4410 +* [ENHANCEMENT] Web UI: Support console queries at specific times. #4764 +* [ENHANCEMENT] Web UI: group targets by job then instance. #4898 #4806 +* [BUGFIX] Deduplicate handler labels for HTTP metrics. #4732 +* [BUGFIX] Fix leaked queriers causing shutdowns to hang. #4922 +* [BUGFIX] Fix configuration loading panics on nil pointer slice elements. #4942 +* [BUGFIX] API: Correctly skip mismatching targets on /api/v1/targets/metadata. #4905 +* [BUGFIX] API: Better rounding for incoming query timestamps. #4941 +* [BUGFIX] Azure SD: Fix panic. #4867 +* [BUGFIX] Console templates: Fix hover when the metric has a null value. #4906 +* [BUGFIX] Discovery: Remove all targets when the scrape configuration gets empty. #4819 +* [BUGFIX] Marathon SD: Fix leaked connections. #4915 +* [BUGFIX] Marathon SD: Use 'hostPort' member of portMapping to construct target endpoints. #4887 +* [BUGFIX] PromQL: Fix a goroutine leak in the lexer/parser. #4858 +* [BUGFIX] Scrape: Pass through content-type for non-compressed output. #4912 +* [BUGFIX] Scrape: Fix deadlock in the scrape's manager. #4894 +* [BUGFIX] Scrape: Scrape targets at fixed intervals even after Prometheus restarts. #4926 +* [BUGFIX] TSDB: Support restored snapshots including the head properly. #4953 +* [BUGFIX] TSDB: Repair WAL when the last record in a segment is torn. #4953 +* [BUGFIX] TSDB: Fix unclosed file readers on Windows systems. #4997 +* [BUGFIX] Web: Avoid proxy to connect to the local gRPC server. #4572 ## 2.5.0 / 2018-11-06 -* [CHANGE] Group targets by scrape config instead of job name. [#4806](https://github.com/prometheus/prometheus/pull/4806) [#4526](https://github.com/prometheus/prometheus/pull/4526) -* [CHANGE] Marathon SD: Various changes to adapt to Marathon 1.5+. [#4499](https://github.com/prometheus/prometheus/pull/4499) -* [CHANGE] Discovery: Split `prometheus_sd_discovered_targets` metric by scrape and notify (Alertmanager SD) as well as by section in the respective configuration. [#4753](https://github.com/prometheus/prometheus/pull/4753) -* [FEATURE] Add OpenMetrics support for scraping (EXPERIMENTAL). [#4700](https://github.com/prometheus/prometheus/pull/4700) -* [FEATURE] Add unit testing for rules. [#4350](https://github.com/prometheus/prometheus/pull/4350) -* [FEATURE] Make maximum number of samples per query configurable via `--query.max-samples` flag. [#4513](https://github.com/prometheus/prometheus/pull/4513) -* [FEATURE] Make maximum number of concurrent remote reads configurable via `--storage.remote.read-concurrent-limit` flag. [#4656](https://github.com/prometheus/prometheus/pull/4656) -* [ENHANCEMENT] Support s390x platform for Linux. [#4605](https://github.com/prometheus/prometheus/pull/4605) -* [ENHANCEMENT] API: Add `prometheus_api_remote_read_queries` metric tracking currently executed or waiting remote read API requests. [#4699](https://github.com/prometheus/prometheus/pull/4699) -* [ENHANCEMENT] Remote Read: Add `prometheus_remote_storage_remote_read_queries` metric tracking currently in-flight remote read queries. [#4677](https://github.com/prometheus/prometheus/pull/4677) -* [ENHANCEMENT] Remote Read: Reduced memory usage. [#4655](https://github.com/prometheus/prometheus/pull/4655) -* [ENHANCEMENT] Discovery: Add `prometheus_sd_discovered_targets`, `prometheus_sd_received_updates_total`, `prometheus_sd_updates_delayed_total`, and `prometheus_sd_updates_total` metrics for discovery subsystem. [#4667](https://github.com/prometheus/prometheus/pull/4667) -* [ENHANCEMENT] Discovery: Improve performance of previously slow updates of changes of targets. [#4526](https://github.com/prometheus/prometheus/pull/4526) -* [ENHANCEMENT] Kubernetes SD: Add extended metrics. [#4458](https://github.com/prometheus/prometheus/pull/4458) -* [ENHANCEMENT] OpenStack SD: Support discovering instances from all projects. [#4682](https://github.com/prometheus/prometheus/pull/4682) -* [ENHANCEMENT] OpenStack SD: Discover all interfaces. [#4649](https://github.com/prometheus/prometheus/pull/4649) -* [ENHANCEMENT] OpenStack SD: Support `tls_config` for the used HTTP client. [#4654](https://github.com/prometheus/prometheus/pull/4654) -* [ENHANCEMENT] Triton SD: Add ability to filter triton_sd targets by pre-defined groups. [#4701](https://github.com/prometheus/prometheus/pull/4701) -* [ENHANCEMENT] Web UI: Avoid browser spell-checking in expression field. [#4728](https://github.com/prometheus/prometheus/pull/4728) -* [ENHANCEMENT] Web UI: Add scrape duration and last evaluation time in targets and rules pages. [#4722](https://github.com/prometheus/prometheus/pull/4722) -* [ENHANCEMENT] Web UI: Improve rule view by wrapping lines. [#4702](https://github.com/prometheus/prometheus/pull/4702) -* [ENHANCEMENT] Rules: Error out at load time for invalid templates, rather than at evaluation time. [#4537](https://github.com/prometheus/prometheus/pull/4537) -* [ENHANCEMENT] TSDB: Add metrics for WAL operations. [#4692](https://github.com/prometheus/prometheus/pull/4692) -* [BUGFIX] Change max/min over_time to handle NaNs properly. [#4386](https://github.com/prometheus/prometheus/pull/4386) -* [BUGFIX] Check label name for `count_values` PromQL function. [#4585](https://github.com/prometheus/prometheus/pull/4585) -* [BUGFIX] Ensure that vectors and matrices do not contain identical label-sets. [#4589](https://github.com/prometheus/prometheus/pull/4589) +* [CHANGE] Group targets by scrape config instead of job name. #4806 #4526 +* [CHANGE] Marathon SD: Various changes to adapt to Marathon 1.5+. #4499 +* [CHANGE] Discovery: Split `prometheus_sd_discovered_targets` metric by scrape and notify (Alertmanager SD) as well as by section in the respective configuration. #4753 +* [FEATURE] Add OpenMetrics support for scraping (EXPERIMENTAL). #4700 +* [FEATURE] Add unit testing for rules. #4350 +* [FEATURE] Make maximum number of samples per query configurable via `--query.max-samples` flag. #4513 +* [FEATURE] Make maximum number of concurrent remote reads configurable via `--storage.remote.read-concurrent-limit` flag. #4656 +* [ENHANCEMENT] Support s390x platform for Linux. #4605 +* [ENHANCEMENT] API: Add `prometheus_api_remote_read_queries` metric tracking currently executed or waiting remote read API requests. #4699 +* [ENHANCEMENT] Remote Read: Add `prometheus_remote_storage_remote_read_queries` metric tracking currently in-flight remote read queries. #4677 +* [ENHANCEMENT] Remote Read: Reduced memory usage. #4655 +* [ENHANCEMENT] Discovery: Add `prometheus_sd_discovered_targets`, `prometheus_sd_received_updates_total`, `prometheus_sd_updates_delayed_total`, and `prometheus_sd_updates_total` metrics for discovery subsystem. #4667 +* [ENHANCEMENT] Discovery: Improve performance of previously slow updates of changes of targets. #4526 +* [ENHANCEMENT] Kubernetes SD: Add extended metrics. #4458 +* [ENHANCEMENT] OpenStack SD: Support discovering instances from all projects. #4682 +* [ENHANCEMENT] OpenStack SD: Discover all interfaces. #4649 +* [ENHANCEMENT] OpenStack SD: Support `tls_config` for the used HTTP client. #4654 +* [ENHANCEMENT] Triton SD: Add ability to filter triton_sd targets by pre-defined groups. #4701 +* [ENHANCEMENT] Web UI: Avoid browser spell-checking in expression field. #4728 +* [ENHANCEMENT] Web UI: Add scrape duration and last evaluation time in targets and rules pages. #4722 +* [ENHANCEMENT] Web UI: Improve rule view by wrapping lines. #4702 +* [ENHANCEMENT] Rules: Error out at load time for invalid templates, rather than at evaluation time. #4537 +* [ENHANCEMENT] TSDB: Add metrics for WAL operations. #4692 +* [BUGFIX] Change max/min over_time to handle NaNs properly. #4386 +* [BUGFIX] Check label name for `count_values` PromQL function. #4585 +* [BUGFIX] Ensure that vectors and matrices do not contain identical label-sets. #4589 ## 2.4.3 / 2018-10-04 -* [BUGFIX] Fix panic when using custom EC2 API for SD [#4672](https://github.com/prometheus/prometheus/pull/4672) -* [BUGFIX] Fix panic when Zookeeper SD cannot connect to servers [#4669](https://github.com/prometheus/prometheus/pull/4669) -* [BUGFIX] Make the skip_head an optional parameter for snapshot API [#4674](https://github.com/prometheus/prometheus/pull/4674) +* [BUGFIX] Fix panic when using custom EC2 API for SD #4672 +* [BUGFIX] Fix panic when Zookeeper SD cannot connect to servers #4669 +* [BUGFIX] Make the skip_head an optional parameter for snapshot API #4674 ## 2.4.2 / 2018-09-21 The last release didn't have bugfix included due to a vendoring error. - * [BUGFIX] Handle WAL corruptions properly [tsdb#389](https://github.com/prometheus-junkyard/tsdb/pull/389) - * [BUGFIX] Handle WAL migrations correctly on Windows [tsdb#392](https://github.com/prometheus-junkyard/tsdb/pull/392) + * [BUGFIX] Handle WAL corruptions properly prometheus/tsdb#389 + * [BUGFIX] Handle WAL migrations correctly on Windows prometheus/tsdb#392 ## 2.4.1 / 2018-09-19 -* [ENHANCEMENT] New TSDB metrics [tsdb#375](https://github.com/prometheus-junkyard/tsdb/pull/375) [tsdb#363](https://github.com/prometheus-junkyard/tsdb/pull/363) -* [BUGFIX] Render UI correctly for Windows [#4616](https://github.com/prometheus/prometheus/pull/4616) +* [ENHANCEMENT] New TSDB metrics prometheus/tsdb#375 prometheus/tsdb#363 +* [BUGFIX] Render UI correctly for Windows #4616 ## 2.4.0 / 2018-09-11 This release includes multiple bugfixes and features. Further, the WAL implementation has been re-written so the storage is not forward compatible. Prometheus 2.3 storage will work on 2.4 but not vice-versa. -* [CHANGE] Reduce remote write default retries [#4279](https://github.com/prometheus/prometheus/pull/4279) -* [CHANGE] Remove /heap endpoint [#4460](https://github.com/prometheus/prometheus/pull/4460) -* [FEATURE] Persist alert 'for' state across restarts [#4061](https://github.com/prometheus/prometheus/pull/4061) -* [FEATURE] Add API providing per target metric metadata [#4183](https://github.com/prometheus/prometheus/pull/4183) -* [FEATURE] Add API providing recording and alerting rules [#4318](https://github.com/prometheus/prometheus/pull/4318) [#4501](https://github.com/prometheus/prometheus/pull/4501) +* [CHANGE] Reduce remote write default retries #4279 +* [CHANGE] Remove /heap endpoint #4460 +* [FEATURE] Persist alert 'for' state across restarts #4061 +* [FEATURE] Add API providing per target metric metadata #4183 +* [FEATURE] Add API providing recording and alerting rules #4318 #4501 * [ENHANCEMENT] Brand new WAL implementation for TSDB. Forwards incompatible with previous WAL. -* [ENHANCEMENT] Show rule evaluation errors in UI [#4457](https://github.com/prometheus/prometheus/pull/4457) -* [ENHANCEMENT] Throttle resends of alerts to Alertmanager [#4538](https://github.com/prometheus/prometheus/pull/4538) -* [ENHANCEMENT] Send EndsAt along with the alert to Alertmanager [#4550](https://github.com/prometheus/prometheus/pull/4550) -* [ENHANCEMENT] Limit the samples returned by remote read endpoint [#4532](https://github.com/prometheus/prometheus/pull/4532) -* [ENHANCEMENT] Limit the data read in through remote read [#4239](https://github.com/prometheus/prometheus/pull/4239) -* [ENHANCEMENT] Coalesce identical SD configurations [#3912](https://github.com/prometheus/prometheus/pull/3912) -* [ENHANCEMENT] `promtool`: Add new commands for debugging and querying [#4247](https://github.com/prometheus/prometheus/pull/4247) [#4308](https://github.com/prometheus/prometheus/pull/4308) [#4346](https://github.com/prometheus/prometheus/pull/4346) #4454 -* [ENHANCEMENT] Update console examples for node_exporter v0.16.0 [#4208](https://github.com/prometheus/prometheus/pull/4208) -* [ENHANCEMENT] Optimize PromQL aggregations [#4248](https://github.com/prometheus/prometheus/pull/4248) -* [ENHANCEMENT] Remote read: Add Offset to hints [#4226](https://github.com/prometheus/prometheus/pull/4226) -* [ENHANCEMENT] `consul_sd`: Add support for ServiceMeta field [#4280](https://github.com/prometheus/prometheus/pull/4280) -* [ENHANCEMENT] `ec2_sd`: Maintain order of subnet_id label [#4405](https://github.com/prometheus/prometheus/pull/4405) -* [ENHANCEMENT] `ec2_sd`: Add support for custom endpoint to support EC2 compliant APIs [#4333](https://github.com/prometheus/prometheus/pull/4333) -* [ENHANCEMENT] `ec2_sd`: Add instance_owner label [#4514](https://github.com/prometheus/prometheus/pull/4514) -* [ENHANCEMENT] `azure_sd`: Add support for VMSS discovery and multiple environments [#4202](https://github.com/prometheus/prometheus/pull/4202) [#4569](https://github.com/prometheus/prometheus/pull/4569) -* [ENHANCEMENT] `gce_sd`: Add instance_id label [#4488](https://github.com/prometheus/prometheus/pull/4488) -* [ENHANCEMENT] Forbid rule-abiding robots from indexing [#4266](https://github.com/prometheus/prometheus/pull/4266) -* [ENHANCEMENT] Log virtual memory limits on startup [#4418](https://github.com/prometheus/prometheus/pull/4418) -* [BUGFIX] Wait for service discovery to stop before exiting [#4508](https://github.com/prometheus/prometheus/pull/4508) -* [BUGFIX] Render SD configs properly [#4338](https://github.com/prometheus/prometheus/pull/4338) -* [BUGFIX] Only add LookbackDelta to vector selectors [#4399](https://github.com/prometheus/prometheus/pull/4399) -* [BUGFIX] `ec2_sd`: Handle panic-ing nil pointer [#4469](https://github.com/prometheus/prometheus/pull/4469) -* [BUGFIX] `consul_sd`: Stop leaking connections [#4443](https://github.com/prometheus/prometheus/pull/4443) -* [BUGFIX] Use templated labels also to identify alerts [#4500](https://github.com/prometheus/prometheus/pull/4500) -* [BUGFIX] Reduce floating point errors in stddev and related functions [#4533](https://github.com/prometheus/prometheus/pull/4533) -* [BUGFIX] Log errors while encoding responses [#4359](https://github.com/prometheus/prometheus/pull/4359) +* [ENHANCEMENT] Show rule evaluation errors in UI #4457 +* [ENHANCEMENT] Throttle resends of alerts to Alertmanager #4538 +* [ENHANCEMENT] Send EndsAt along with the alert to Alertmanager #4550 +* [ENHANCEMENT] Limit the samples returned by remote read endpoint #4532 +* [ENHANCEMENT] Limit the data read in through remote read #4239 +* [ENHANCEMENT] Coalesce identical SD configurations #3912 +* [ENHANCEMENT] `promtool`: Add new commands for debugging and querying #4247 #4308 #4346 #4454 +* [ENHANCEMENT] Update console examples for node_exporter v0.16.0 #4208 +* [ENHANCEMENT] Optimize PromQL aggregations #4248 +* [ENHANCEMENT] Remote read: Add Offset to hints #4226 +* [ENHANCEMENT] `consul_sd`: Add support for ServiceMeta field #4280 +* [ENHANCEMENT] `ec2_sd`: Maintain order of subnet_id label #4405 +* [ENHANCEMENT] `ec2_sd`: Add support for custom endpoint to support EC2 compliant APIs #4333 +* [ENHANCEMENT] `ec2_sd`: Add instance_owner label #4514 +* [ENHANCEMENT] `azure_sd`: Add support for VMSS discovery and multiple environments #4202 #4569 +* [ENHANCEMENT] `gce_sd`: Add instance_id label #4488 +* [ENHANCEMENT] Forbid rule-abiding robots from indexing #4266 +* [ENHANCEMENT] Log virtual memory limits on startup #4418 +* [BUGFIX] Wait for service discovery to stop before exiting #4508 +* [BUGFIX] Render SD configs properly #4338 +* [BUGFIX] Only add LookbackDelta to vector selectors #4399 +* [BUGFIX] `ec2_sd`: Handle panic-ing nil pointer #4469 +* [BUGFIX] `consul_sd`: Stop leaking connections #4443 +* [BUGFIX] Use templated labels also to identify alerts #4500 +* [BUGFIX] Reduce floating point errors in stddev and related functions #4533 +* [BUGFIX] Log errors while encoding responses #4359 ## 2.3.2 / 2018-07-12 -* [BUGFIX] Fix various tsdb bugs [#4369](https://github.com/prometheus/prometheus/pull/4369) -* [BUGFIX] Reorder startup and shutdown to prevent panics. [#4321](https://github.com/prometheus/prometheus/pull/4321) -* [BUGFIX] Exit with non-zero code on error [#4296](https://github.com/prometheus/prometheus/pull/4296) -* [BUGFIX] discovery/kubernetes/ingress: fix scheme discovery [#4329](https://github.com/prometheus/prometheus/pull/4329) -* [BUGFIX] Fix race in zookeeper sd [#4355](https://github.com/prometheus/prometheus/pull/4355) -* [BUGFIX] Better timeout handling in promql [#4291](https://github.com/prometheus/prometheus/pull/4291) [#4300](https://github.com/prometheus/prometheus/pull/4300) -* [BUGFIX] Propagate errors when selecting series from the tsdb [#4136](https://github.com/prometheus/prometheus/pull/4136) +* [BUGFIX] Fix various tsdb bugs #4369 +* [BUGFIX] Reorder startup and shutdown to prevent panics. #4321 +* [BUGFIX] Exit with non-zero code on error #4296 +* [BUGFIX] discovery/kubernetes/ingress: fix scheme discovery #4329 +* [BUGFIX] Fix race in zookeeper sd #4355 +* [BUGFIX] Better timeout handling in promql #4291 #4300 +* [BUGFIX] Propagate errors when selecting series from the tsdb #4136 ## 2.3.1 / 2018-06-19 -* [BUGFIX] Avoid infinite loop on duplicate NaN values. [#4275](https://github.com/prometheus/prometheus/pull/4275) -* [BUGFIX] Fix nil pointer deference when using various API endpoints [#4282](https://github.com/prometheus/prometheus/pull/4282) -* [BUGFIX] config: set target group source index during unmarshaling [#4245](https://github.com/prometheus/prometheus/pull/4245) -* [BUGFIX] discovery/file: fix logging [#4178](https://github.com/prometheus/prometheus/pull/4178) -* [BUGFIX] kubernetes_sd: fix namespace filtering [#4285](https://github.com/prometheus/prometheus/pull/4285) -* [BUGFIX] web: restore old path prefix behavior [#4273](https://github.com/prometheus/prometheus/pull/4273) -* [BUGFIX] web: remove security headers added in 2.3.0 [#4259](https://github.com/prometheus/prometheus/pull/4259) +* [BUGFIX] Avoid infinite loop on duplicate NaN values. #4275 +* [BUGFIX] Fix nil pointer deference when using various API endpoints #4282 +* [BUGFIX] config: set target group source index during unmarshaling #4245 +* [BUGFIX] discovery/file: fix logging #4178 +* [BUGFIX] kubernetes_sd: fix namespace filtering #4285 +* [BUGFIX] web: restore old path prefix behavior #4273 +* [BUGFIX] web: remove security headers added in 2.3.0 #4259 ## 2.3.0 / 2018-06-05 From 7c028d59c2f8919c60a966a3588fccc9c5710c74 Mon Sep 17 00:00:00 2001 From: n888 Date: Wed, 28 Apr 2021 02:29:12 -0700 Subject: [PATCH 02/34] Add lightsail service discovery (#8693) Signed-off-by: N888 --- config/config_test.go | 30 +++- config/testdata/conf.good.yml | 7 + discovery/{ec2 => aws}/ec2.go | 58 +++---- discovery/aws/lightsail.go | 234 ++++++++++++++++++++++++++++ discovery/install/install.go | 2 +- docs/configuration/configuration.md | 56 +++++++ 6 files changed, 354 insertions(+), 33 deletions(-) rename discovery/{ec2 => aws}/ec2.go (85%) create mode 100644 discovery/aws/lightsail.go diff --git a/config/config_test.go b/config/config_test.go index 330f6f5a4..de6ad8526 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -30,11 +30,11 @@ import ( "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/aws" "github.com/prometheus/prometheus/discovery/azure" "github.com/prometheus/prometheus/discovery/consul" "github.com/prometheus/prometheus/discovery/digitalocean" "github.com/prometheus/prometheus/discovery/dns" - "github.com/prometheus/prometheus/discovery/ec2" "github.com/prometheus/prometheus/discovery/eureka" "github.com/prometheus/prometheus/discovery/file" "github.com/prometheus/prometheus/discovery/hetzner" @@ -465,14 +465,14 @@ var expectedConf = &Config{ HTTPClientConfig: config.DefaultHTTPClientConfig, ServiceDiscoveryConfigs: discovery.Configs{ - &ec2.SDConfig{ + &aws.EC2SDConfig{ Region: "us-east-1", AccessKey: "access", SecretKey: "mysecret", Profile: "profile", RefreshInterval: model.Duration(60 * time.Second), Port: 80, - Filters: []*ec2.Filter{ + Filters: []*aws.EC2Filter{ { Name: "tag:environment", Values: []string{"prod"}, @@ -485,6 +485,28 @@ var expectedConf = &Config{ }, }, }, + { + JobName: "service-lightsail", + + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: DefaultScrapeConfig.MetricsPath, + Scheme: DefaultScrapeConfig.Scheme, + HTTPClientConfig: config.DefaultHTTPClientConfig, + + ServiceDiscoveryConfigs: discovery.Configs{ + &aws.LightsailSDConfig{ + Region: "us-east-1", + AccessKey: "access", + SecretKey: "mysecret", + Profile: "profile", + RefreshInterval: model.Duration(60 * time.Second), + Port: 80, + }, + }, + }, { JobName: "service-azure", @@ -886,7 +908,7 @@ func TestElideSecrets(t *testing.T) { yamlConfig := string(config) matches := secretRe.FindAllStringIndex(yamlConfig, -1) - require.Equal(t, 12, len(matches), "wrong number of secret matches found") + require.Equal(t, 13, len(matches), "wrong number of secret matches found") require.NotContains(t, yamlConfig, "mysecret", "yaml marshal reveals authentication credentials.") } diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 515f91ed9..7163abbfe 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -215,6 +215,13 @@ scrape_configs: - web - db +- job_name: service-lightsail + lightsail_sd_configs: + - region: us-east-1 + access_key: access + secret_key: mysecret + profile: profile + - job_name: service-azure azure_sd_configs: - environment: AzurePublicCloud diff --git a/discovery/ec2/ec2.go b/discovery/aws/ec2.go similarity index 85% rename from discovery/ec2/ec2.go rename to discovery/aws/ec2.go index 8fa694c86..e2464277a 100644 --- a/discovery/ec2/ec2.go +++ b/discovery/aws/ec2.go @@ -1,4 +1,4 @@ -// Copyright 2015 The Prometheus Authors +// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package ec2 +package aws import ( "context" @@ -61,24 +61,26 @@ const ( ec2LabelSeparator = "," ) -// DefaultSDConfig is the default EC2 SD configuration. -var DefaultSDConfig = SDConfig{ - Port: 80, - RefreshInterval: model.Duration(60 * time.Second), -} +var ( + // DefaultEC2SDConfig is the default EC2 SD configuration. + DefaultEC2SDConfig = EC2SDConfig{ + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), + } +) func init() { - discovery.RegisterConfig(&SDConfig{}) + discovery.RegisterConfig(&EC2SDConfig{}) } // Filter is the configuration for filtering EC2 instances. -type Filter struct { +type EC2Filter struct { Name string `yaml:"name"` Values []string `yaml:"values"` } -// SDConfig is the configuration for EC2 based service discovery. -type SDConfig struct { +// EC2SDConfig is the configuration for EC2 based service discovery. +type EC2SDConfig struct { Endpoint string `yaml:"endpoint"` Region string `yaml:"region"` AccessKey string `yaml:"access_key,omitempty"` @@ -87,21 +89,21 @@ type SDConfig struct { RoleARN string `yaml:"role_arn,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` Port int `yaml:"port"` - Filters []*Filter `yaml:"filters"` + Filters []*EC2Filter `yaml:"filters"` } -// Name returns the name of the Config. -func (*SDConfig) Name() string { return "ec2" } +// Name returns the name of the EC2 Config. +func (*EC2SDConfig) Name() string { return "ec2" } -// NewDiscoverer returns a Discoverer for the Config. -func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { - return NewDiscovery(c, opts.Logger), nil +// NewDiscoverer returns a Discoverer for the EC2 Config. +func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { + return NewEC2Discovery(c, opts.Logger), nil } -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - *c = DefaultSDConfig - type plain SDConfig +// UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config. +func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultEC2SDConfig + type plain EC2SDConfig err := unmarshal((*plain)(c)) if err != nil { return err @@ -128,18 +130,18 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // Discovery periodically performs EC2-SD requests. It implements // the Discoverer interface. -type Discovery struct { +type EC2Discovery struct { *refresh.Discovery - cfg *SDConfig + cfg *EC2SDConfig ec2 *ec2.EC2 } -// NewDiscovery returns a new EC2Discovery which periodically refreshes its targets. -func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery { +// NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets. +func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery { if logger == nil { logger = log.NewNopLogger() } - d := &Discovery{ + d := &EC2Discovery{ cfg: conf, } d.Discovery = refresh.NewDiscovery( @@ -151,7 +153,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery { return d } -func (d *Discovery) ec2Client() (*ec2.EC2, error) { +func (d *EC2Discovery) ec2Client() (*ec2.EC2, error) { if d.ec2 != nil { return d.ec2, nil } @@ -183,7 +185,7 @@ func (d *Discovery) ec2Client() (*ec2.EC2, error) { return d.ec2, nil } -func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { +func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { ec2Client, err := d.ec2Client() if err != nil { return nil, err diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go new file mode 100644 index 000000000..8e11a1a4d --- /dev/null +++ b/discovery/aws/lightsail.go @@ -0,0 +1,234 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aws + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/lightsail" + "github.com/go-kit/kit/log" + "github.com/pkg/errors" + "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/refresh" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" +) + +const ( + lightsailLabel = model.MetaLabelPrefix + "lightsail_" + lightsailLabelAZ = lightsailLabel + "availability_zone" + lightsailLabelBlueprintID = lightsailLabel + "blueprint_id" + lightsailLabelBundleID = lightsailLabel + "bundle_id" + lightsailLabelInstanceName = lightsailLabel + "instance_name" + lightsailLabelInstanceState = lightsailLabel + "instance_state" + lightsailLabelInstanceSupportCode = lightsailLabel + "instance_support_code" + lightsailLabelIPv6Addresses = lightsailLabel + "ipv6_addresses" + lightsailLabelPrivateIP = lightsailLabel + "private_ip" + lightsailLabelPublicIP = lightsailLabel + "public_ip" + lightsailLabelTag = lightsailLabel + "tag_" + lightsailLabelSeparator = "," +) + +var ( + // DefaultLightsailSDConfig is the default Lightsail SD configuration. + DefaultLightsailSDConfig = LightsailSDConfig{ + Port: 80, + RefreshInterval: model.Duration(60 * time.Second), + } +) + +func init() { + discovery.RegisterConfig(&LightsailSDConfig{}) +} + +// LightsailSDConfig is the configuration for Lightsail based service discovery. +type LightsailSDConfig struct { + Endpoint string `yaml:"endpoint"` + Region string `yaml:"region"` + AccessKey string `yaml:"access_key,omitempty"` + SecretKey config.Secret `yaml:"secret_key,omitempty"` + Profile string `yaml:"profile,omitempty"` + RoleARN string `yaml:"role_arn,omitempty"` + RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` + Port int `yaml:"port"` +} + +// Name returns the name of the Lightsail Config. +func (*LightsailSDConfig) Name() string { return "lightsail" } + +// NewDiscoverer returns a Discoverer for the Lightsail Config. +func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { + return NewLightsailDiscovery(c, opts.Logger), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config. +func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultLightsailSDConfig + type plain LightsailSDConfig + err := unmarshal((*plain)(c)) + if err != nil { + return err + } + if c.Region == "" { + sess, err := session.NewSession() + if err != nil { + return err + } + + metadata := ec2metadata.New(sess) + + region, err := metadata.Region() + if err != nil { + return errors.New("Lightsail SD configuration requires a region") + } + c.Region = region + } + return nil +} + +// Discovery periodically performs Lightsail-SD requests. It implements +// the Discoverer interface. +type LightsailDiscovery struct { + *refresh.Discovery + cfg *LightsailSDConfig + lightsail *lightsail.Lightsail +} + +// NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets. +func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger) *LightsailDiscovery { + if logger == nil { + logger = log.NewNopLogger() + } + d := &LightsailDiscovery{ + cfg: conf, + } + d.Discovery = refresh.NewDiscovery( + logger, + "lightsail", + time.Duration(d.cfg.RefreshInterval), + d.refresh, + ) + return d +} + +func (d *LightsailDiscovery) lightsailClient() (*lightsail.Lightsail, error) { + if d.lightsail != nil { + return d.lightsail, nil + } + + creds := credentials.NewStaticCredentials(d.cfg.AccessKey, string(d.cfg.SecretKey), "") + if d.cfg.AccessKey == "" && d.cfg.SecretKey == "" { + creds = nil + } + + sess, err := session.NewSessionWithOptions(session.Options{ + Config: aws.Config{ + Endpoint: &d.cfg.Endpoint, + Region: &d.cfg.Region, + Credentials: creds, + }, + Profile: d.cfg.Profile, + }) + if err != nil { + return nil, errors.Wrap(err, "could not create aws session") + } + + if d.cfg.RoleARN != "" { + creds := stscreds.NewCredentials(sess, d.cfg.RoleARN) + d.lightsail = lightsail.New(sess, &aws.Config{Credentials: creds}) + } else { + d.lightsail = lightsail.New(sess) + } + + return d.lightsail, nil +} + +func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { + lightsailClient, err := d.lightsailClient() + if err != nil { + return nil, err + } + + tg := &targetgroup.Group{ + Source: d.cfg.Region, + } + + input := &lightsail.GetInstancesInput{} + + output, err := lightsailClient.GetInstancesWithContext(ctx, input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && (awsErr.Code() == "AuthFailure" || awsErr.Code() == "UnauthorizedOperation") { + d.lightsail = nil + } + return nil, errors.Wrap(err, "could not get instances") + } + + for _, inst := range output.Instances { + if inst.PrivateIpAddress == nil { + continue + } + + labels := model.LabelSet{ + lightsailLabelAZ: model.LabelValue(*inst.Location.AvailabilityZone), + lightsailLabelBlueprintID: model.LabelValue(*inst.BlueprintId), + lightsailLabelBundleID: model.LabelValue(*inst.BundleId), + lightsailLabelInstanceName: model.LabelValue(*inst.Name), + lightsailLabelInstanceState: model.LabelValue(*inst.State.Name), + lightsailLabelInstanceSupportCode: model.LabelValue(*inst.SupportCode), + lightsailLabelPrivateIP: model.LabelValue(*inst.PrivateIpAddress), + } + + addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port)) + labels[model.AddressLabel] = model.LabelValue(addr) + + if inst.PublicIpAddress != nil { + labels[lightsailLabelPublicIP] = model.LabelValue(*inst.PublicIpAddress) + } + + if len(inst.Ipv6Addresses) > 0 { + var ipv6addrs []string + for _, ipv6addr := range inst.Ipv6Addresses { + ipv6addrs = append(ipv6addrs, *ipv6addr) + } + labels[lightsailLabelIPv6Addresses] = model.LabelValue( + lightsailLabelSeparator + + strings.Join(ipv6addrs, lightsailLabelSeparator) + + lightsailLabelSeparator) + } + + for _, t := range inst.Tags { + if t == nil || t.Key == nil || t.Value == nil { + continue + } + name := strutil.SanitizeLabelName(*t.Key) + labels[lightsailLabelTag+model.LabelName(name)] = model.LabelValue(*t.Value) + } + + tg.Targets = append(tg.Targets, labels) + } + return []*targetgroup.Group{tg}, nil +} diff --git a/discovery/install/install.go b/discovery/install/install.go index d6f69d0d6..3e6f0f388 100644 --- a/discovery/install/install.go +++ b/discovery/install/install.go @@ -16,11 +16,11 @@ package install import ( + _ "github.com/prometheus/prometheus/discovery/aws" // register aws _ "github.com/prometheus/prometheus/discovery/azure" // register azure _ "github.com/prometheus/prometheus/discovery/consul" // register consul _ "github.com/prometheus/prometheus/discovery/digitalocean" // register digitalocean _ "github.com/prometheus/prometheus/discovery/dns" // register dns - _ "github.com/prometheus/prometheus/discovery/ec2" // register ec2 _ "github.com/prometheus/prometheus/discovery/eureka" // register eureka _ "github.com/prometheus/prometheus/discovery/file" // register file _ "github.com/prometheus/prometheus/discovery/gce" // register gce diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 7d9a0e9b2..d2bc058cd 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -238,6 +238,10 @@ hetzner_sd_configs: kubernetes_sd_configs: [ - ... ] +# List of Lightsail service discovery configurations. +lightsail_sd_configs: + [ - ... ] + # List of Marathon service discovery configurations. marathon_sd_configs: [ - ... ] @@ -1341,6 +1345,54 @@ for a detailed example of configuring Prometheus for Kubernetes. You may wish to check out the 3rd party [Prometheus Operator](https://github.com/coreos/prometheus-operator), which automates the Prometheus setup on top of Kubernetes. +### `` + +Lightsail SD configurations allow retrieving scrape targets from [AWS Lightsail](https://aws.amazon.com/lightsail/) +instances. The private IP address is used by default, but may be changed to +the public IP address with relabeling. + +The following meta labels are available on targets during [relabeling](#relabel_config): + +* `__meta_lightsail_availability_zone`: the availability zone in which the instance is running +* `__meta_lightsail_blueprint_id`: the Lightsail blueprint ID +* `__meta_lightsail_bundle_id`: the Lightsail bundle ID +* `__meta_lightsail_instance_name`: the name of the Lightsail instance +* `__meta_lightsail_instance_state`: the state of the Lightsail instance +* `__meta_lightsail_instance_support_code`: the support code of the Lightsail instance +* `__meta_lightsail_ipv6_addresses`: comma separated list of IPv6 addresses assigned to the instance's network interfaces, if present +* `__meta_lightsail_private_ip`: the private IP address of the instance +* `__meta_lightsail_public_ip`: the public IP address of the instance, if available +* `__meta_lightsail_tag_`: each tag value of the instance + +See below for the configuration options for Lightsail discovery: + +```yaml +# The information to access the Lightsail API. + +# The AWS region. If blank, the region from the instance metadata is used. +[ region: ] + +# Custom endpoint to be used. +[ endpoint: ] + +# The AWS API keys. If blank, the environment variables `AWS_ACCESS_KEY_ID` +# and `AWS_SECRET_ACCESS_KEY` are used. +[ access_key: ] +[ secret_key: ] +# Named AWS profile used to connect to the API. +[ profile: ] + +# AWS Role ARN, an alternative to using AWS API keys. +[ role_arn: ] + +# Refresh interval to re-read the instance list. +[ refresh_interval: | default = 60s ] + +# The port to scrape metrics from. If using the public IP address, this must +# instead be specified in the relabeling rule. +[ port: | default = 80 ] +``` + ### `` Marathon SD configurations allow retrieving scrape targets using the @@ -1936,6 +1988,10 @@ hetzner_sd_configs: kubernetes_sd_configs: [ - ... ] +# List of Lightsail service discovery configurations. +lightsail_sd_configs: + [ - ... ] + # List of Marathon service discovery configurations. marathon_sd_configs: [ - ... ] From fa184a5fc3bd83abe37854983e0f548ceaabb4e0 Mon Sep 17 00:00:00 2001 From: Levi Harrison Date: Wed, 28 Apr 2021 08:47:52 -0400 Subject: [PATCH 03/34] Add OAuth 2.0 Config (#8761) * Introduced oauth2 config into the codebase Signed-off-by: Levi Harrison --- config/config.go | 6 +-- config/config_test.go | 21 +++++--- config/testdata/conf.good.yml | 5 ++ docs/configuration/configuration.md | 83 ++++++++++++++++++++++++++++- go.mod | 2 +- go.sum | 4 +- 6 files changed, 107 insertions(+), 14 deletions(-) diff --git a/config/config.go b/config/config.go index 02d9f763d..56350e1be 100644 --- a/config/config.go +++ b/config/config.go @@ -663,10 +663,10 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err } httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || - c.HTTPClientConfig.Authorization != nil + c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil if httpClientConfigAuthEnabled && c.SigV4Config != nil { - return fmt.Errorf("at most one of basic_auth, authorization, & sigv4 must be configured") + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") } return nil @@ -675,7 +675,7 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err func validateHeaders(headers map[string]string) error { for header := range headers { if strings.ToLower(header) == "authorization" { - return errors.New("authorization header must be changed via the basic_auth or authorization parameter") + return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter") } if _, ok := reservedHeaders[strings.ToLower(header)]; ok { return errors.Errorf("%s is a reserved header. It must not be changed", header) diff --git a/config/config_test.go b/config/config_test.go index de6ad8526..85e0a78fb 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -90,9 +90,16 @@ var expectedConf = &Config{ Action: relabel.Drop, }, }, - QueueConfig: DefaultQueueConfig, - MetadataConfig: DefaultMetadataConfig, - HTTPClientConfig: config.DefaultHTTPClientConfig, + QueueConfig: DefaultQueueConfig, + MetadataConfig: DefaultMetadataConfig, + HTTPClientConfig: config.HTTPClientConfig{ + OAuth2: &config.OAuth2{ + ClientID: "123", + ClientSecret: "456", + TokenURL: "http://remote1/auth", + }, + FollowRedirects: true, + }, }, { URL: mustParseURL("http://remote2/push"), @@ -1010,7 +1017,7 @@ var expectedErrors = []struct { errMsg: "at most one of bearer_token & bearer_token_file must be configured", }, { filename: "bearertoken_basicauth.bad.yml", - errMsg: "at most one of basic_auth, bearer_token & bearer_token_file must be configured", + errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured", }, { filename: "kubernetes_http_config_without_api_server.bad.yml", errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly", @@ -1046,10 +1053,10 @@ var expectedErrors = []struct { errMsg: "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'", }, { filename: "kubernetes_bearertoken_basicauth.bad.yml", - errMsg: "at most one of basic_auth, bearer_token & bearer_token_file must be configured", + errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured", }, { filename: "kubernetes_authorization_basicauth.bad.yml", - errMsg: "at most one of basic_auth & authorization must be configured", + errMsg: "at most one of basic_auth, oauth2 & authorization must be configured", }, { filename: "marathon_no_servers.bad.yml", errMsg: "marathon_sd: must contain at least one Marathon server", @@ -1094,7 +1101,7 @@ var expectedErrors = []struct { errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`, }, { filename: "remote_write_authorization_header.bad.yml", - errMsg: `authorization header must be changed via the basic_auth or authorization parameter`, + errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`, }, { filename: "remote_write_url_missing.bad.yml", errMsg: `url for remote_write is empty`, diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 7163abbfe..d0f565d58 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -19,6 +19,11 @@ remote_write: - source_labels: [__name__] regex: expensive.* action: drop + oauth2: + client_id: "123" + client_secret: "456" + token_url: "http://remote1/auth" + - url: http://remote2/push name: rw_tls tls_config: diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index d2bc058cd..72135a270 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -180,6 +180,11 @@ authorization: # configured file. It is mutually exclusive with `credentials`. [ credentials_file: ] +# Optional OAuth 2.0 configuration. +# Cannot be used at the same time as basic_auth or authorization. +oauth2: + [ ] + # Configure whether scrape requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -313,6 +318,32 @@ A `tls_config` allows configuring TLS connections. [ insecure_skip_verify: ] ``` +### `oauth2` + +OAuth 2.0 authentication using the client credentials grant type. +Prometheus fetches an access token from the specified endpoint with +the given client access and secret keys. + +```yaml +client_id: +[ client_secret: ] + +# Read the client secret from a file. +# It is mutually exclusive with `client_secret`. +[ client_secret_file: ] + +# Scopes for the token request. +scopes: + [ - ... ] + +# The URL to fetch the token from. +token_url: + +# Optional parameters to append to the token URL. +endpoint_params: + [ : ... ] +``` + ### `` Azure SD configurations allow retrieving scrape targets from Azure VMs. @@ -477,6 +508,11 @@ authorization: # It is mutually exclusive with `credentials`. [ credentials_file: ] +# Optional OAuth 2.0 configuration. +# Cannot be used at the same time as basic_auth or authorization. +oauth2: + [ ] + # Optional proxy URL. [ proxy_url: ] @@ -569,6 +605,11 @@ authorization: # It is mutually exclusive with `credentials`. [ credentials_file: ] +# Optional OAuth 2.0 configuration. +# Cannot be used at the same time as basic_auth or authorization. +oauth2: + [ ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -727,6 +768,11 @@ authorization: # It is mutually exclusive with `credentials`. [ credentials_file: ] +# Optional OAuth 2.0 configuration. +# Cannot be used at the same time as basic_auth or authorization. +oauth2: + [ ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1135,6 +1181,11 @@ authorization: # It is mutually exclusive with `credentials`. [ credentials_file: ] +# Optional OAuth 2.0 configuration. +# Cannot be used at the same time as basic_auth or authorization. +oauth2: + [ ] + # Optional proxy URL. [ proxy_url: ] @@ -1306,6 +1357,11 @@ authorization: # It is mutually exclusive with `credentials`. [ credentials_file: ] +# Optional OAuth 2.0 configuration. +# Cannot be used at the same time as basic_auth or authorization. +oauth2: + [ ] + # Optional proxy URL. [ proxy_url: ] @@ -1454,6 +1510,11 @@ authorization: # It is mutually exclusive with `credentials`. [ credentials_file: ] +# Optional OAuth 2.0 configuration. +# Cannot be used at the same time as basic_auth or authorization. +oauth2: + [ ] + # Configure whether HTTP requests follow HTTP 3xx redirects. [ follow_redirects: | default = true ] @@ -1652,6 +1713,11 @@ authorization: # It is mutually exclusive with `credentials`. [ credentials_file: ] +# Optional OAuth 2.0 configuration. +# Cannot be used at the same time as basic_auth or authorization. +oauth2: + [ ] + # Configures the scrape request's TLS settings. tls_config: [ ] @@ -1930,6 +1996,11 @@ authorization: # It is mutually exclusive with `credentials`. [ credentials_file: ] +# Optional OAuth 2.0 configuration. +# Cannot be used at the same time as basic_auth or authorization. +oauth2: + [ ] + # Configures the scrape request's TLS settings. tls_config: [ ] @@ -2075,7 +2146,7 @@ authorization: [ credentials_file: ] # Optionally configures AWS's Signature Verification 4 signing process to -# sign requests. Cannot be set at the same time as basic_auth or authorization. +# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2. # To use the default credentials from the AWS SDK, use `sigv4: {}`. sigv4: # The AWS region. If blank, the region from the default credentials chain @@ -2093,6 +2164,11 @@ sigv4: # AWS Role ARN, an alternative to using AWS API keys. [ role_arn: ] +# Optional OAuth 2.0 configuration. +# Cannot be used at the same time as basic_auth, authorization, or sigv4. +oauth2: + [ ] + # Configures the remote write request's TLS settings. tls_config: [ ] @@ -2187,6 +2263,11 @@ authorization: # It is mutually exclusive with `credentials`. [ credentials_file: ] +# Optional OAuth 2.0 configuration. +# Cannot be used at the same time as basic_auth or authorization. +oauth2: + [ ] + # Configures the remote read request's TLS settings. tls_config: [ ] diff --git a/go.mod b/go.mod index b9fa5bdbb..62828efc6 100644 --- a/go.mod +++ b/go.mod @@ -48,7 +48,7 @@ require ( github.com/prometheus/alertmanager v0.21.0 github.com/prometheus/client_golang v1.10.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.21.0 + github.com/prometheus/common v0.23.0 github.com/prometheus/exporter-toolkit v0.5.1 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 diff --git a/go.sum b/go.sum index aa7d9f835..5d8b1cb2b 100644 --- a/go.sum +++ b/go.sum @@ -722,8 +722,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.21.0 h1:SMvI2JVldvfUvRVlP64jkIJEC6WiGHJcN2e5tB+ztF8= -github.com/prometheus/common v0.21.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.23.0 h1:GXWvPYuTUenIa+BhOq/x+L/QZzCqASkVRny5KTlPDGM= +github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/exporter-toolkit v0.5.1 h1:9eqgis5er9xN613ZSADjypCJaDGj9ZlcWBvsIHa8/3c= github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= From f3b2d2a99889257022de5923a070e770b8d41b02 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Thu, 29 Apr 2021 00:00:30 +0200 Subject: [PATCH 04/34] Fix config tests in main branch (#8767) The merge of 8761 did not catch that the secrets were off by one because it was not rebased on top of 8693. Signed-off-by: Julien Pivotto --- config/config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/config_test.go b/config/config_test.go index 85e0a78fb..ddc014bc6 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -915,7 +915,7 @@ func TestElideSecrets(t *testing.T) { yamlConfig := string(config) matches := secretRe.FindAllStringIndex(yamlConfig, -1) - require.Equal(t, 13, len(matches), "wrong number of secret matches found") + require.Equal(t, 14, len(matches), "wrong number of secret matches found") require.NotContains(t, yamlConfig, "mysecret", "yaml marshal reveals authentication credentials.") } From fddf4918c0b48a5a0e8916b9de108293f7069a42 Mon Sep 17 00:00:00 2001 From: Nick Triller Date: Wed, 28 Apr 2021 17:00:07 +0200 Subject: [PATCH 05/34] Send empty targetgroup if nothing discovered Signed-off-by: Nick Triller --- discovery/consul/consul.go | 9 +++++++++ discovery/consul/consul_test.go | 17 +++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index ee2734def..fa3d32303 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -426,6 +426,15 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup. } } } + + // Send targetgroup with no targets if nothing was discovered. + if len(services) == 0 { + select { + case <-ctx.Done(): + return + case ch <- []*targetgroup.Group{{}}: + } + } } // consulService contains data belonging to the same service. diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index 0a6b73518..afc387c0b 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -298,6 +298,23 @@ func TestAllServices(t *testing.T) { <-ch } +// targetgroup with no targets is emitted if no services were discovered. +func TestNoTargets(t *testing.T) { + stub, config := newServer(t) + defer stub.Close() + config.ServiceTags = []string{"missing"} + + d := newDiscovery(t, config) + + ctx, cancel := context.WithCancel(context.Background()) + ch := make(chan []*targetgroup.Group) + go d.Run(ctx, ch) + + targets := (<-ch)[0].Targets + require.Equal(t, 0, len(targets)) + cancel() +} + // Watch only the test service. func TestOneService(t *testing.T) { stub, config := newServer(t) From 15d328750aa9a9311ae342b406fa53ff8a4c3fbb Mon Sep 17 00:00:00 2001 From: Nick Triller Date: Wed, 28 Apr 2021 18:06:09 +0200 Subject: [PATCH 06/34] Fix typo in SD docs Signed-off-by: Nick Triller --- discovery/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/discovery/README.md b/discovery/README.md index 9431876c7..ea008af9e 100644 --- a/discovery/README.md +++ b/discovery/README.md @@ -262,6 +262,6 @@ Here are some non-obvious parts of adding service discoveries that need to be ve ### Examples of Service Discovery pull requests -The exemples given might become out of date but should give a good impression about the areas touched by a new service discovery. +The examples given might become out of date but should give a good impression about the areas touched by a new service discovery. - [Eureka](https://github.com/prometheus/prometheus/pull/3369) From 9d7d818629d2db5a8521be8e3a0ccea0f2482540 Mon Sep 17 00:00:00 2001 From: Hu Shuai Date: Thu, 29 Apr 2021 16:05:33 +0800 Subject: [PATCH 07/34] Fix golint issues caused by typos (#8769) Signed-off-by: Hu Shuai --- discovery/aws/ec2.go | 4 ++-- discovery/aws/lightsail.go | 2 +- discovery/eureka/eureka.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/discovery/aws/ec2.go b/discovery/aws/ec2.go index e2464277a..fadf6f766 100644 --- a/discovery/aws/ec2.go +++ b/discovery/aws/ec2.go @@ -73,7 +73,7 @@ func init() { discovery.RegisterConfig(&EC2SDConfig{}) } -// Filter is the configuration for filtering EC2 instances. +// EC2Filter is the configuration for filtering EC2 instances. type EC2Filter struct { Name string `yaml:"name"` Values []string `yaml:"values"` @@ -128,7 +128,7 @@ func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } -// Discovery periodically performs EC2-SD requests. It implements +// EC2Discovery periodically performs EC2-SD requests. It implements // the Discoverer interface. type EC2Discovery struct { *refresh.Discovery diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index 8e11a1a4d..57e43ad59 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -110,7 +110,7 @@ func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) err return nil } -// Discovery periodically performs Lightsail-SD requests. It implements +// LightsailDiscovery periodically performs Lightsail-SD requests. It implements // the Discoverer interface. type LightsailDiscovery struct { *refresh.Discovery diff --git a/discovery/eureka/eureka.go b/discovery/eureka/eureka.go index 95b4160ff..80a9c99df 100644 --- a/discovery/eureka/eureka.go +++ b/discovery/eureka/eureka.go @@ -116,7 +116,7 @@ type Discovery struct { server string } -// New creates a new Eureka discovery for the given role. +// NewDiscovery creates a new Eureka discovery for the given role. func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd", config.WithHTTP2Disabled()) if err != nil { From 2efdf660b16be165c09338d528013f59e88a5cf5 Mon Sep 17 00:00:00 2001 From: Goutham Veeramachaneni Date: Thu, 29 Apr 2021 14:28:48 +0200 Subject: [PATCH 08/34] Increase evaluation failures on Commit() (#8770) I think we should increment the metric here, we're setting the rule health anyways. This means even if the "evaluation" suceeded, none of the samples made it to storage. This is a simplified solution to: https://github.com/prometheus/prometheus/pull/8410/ Signed-off-by: Goutham Veeramachaneni --- rules/manager.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rules/manager.go b/rules/manager.go index 3f4ce8d8b..110b64d55 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -595,13 +595,13 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { if err != nil { rule.SetHealth(HealthBad) rule.SetLastError(err) + g.metrics.evalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() // Canceled queries are intentional termination of queries. This normally // happens on shutdown and thus we skip logging of any errors here. if _, ok := err.(promql.ErrQueryCanceled); !ok { level.Warn(g.logger).Log("msg", "Evaluating rule failed", "rule", rule, "err", err) } - g.metrics.evalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() return } samplesTotal += float64(len(vector)) @@ -620,6 +620,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { if err := app.Commit(); err != nil { rule.SetHealth(HealthBad) rule.SetLastError(err) + g.metrics.evalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() level.Warn(g.logger).Log("msg", "Rule sample appending failed", "err", err) return From 23999df27c18e61b328620c79536718d095c87bb Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Fri, 30 Apr 2021 13:25:34 -0400 Subject: [PATCH 09/34] expose rule metrics fields Signed-off-by: Owen Diehl --- rules/manager.go | 128 +++++++++++++++++++++++------------------------ 1 file changed, 64 insertions(+), 64 deletions(-) diff --git a/rules/manager.go b/rules/manager.go index 110b64d55..0b38ec187 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -53,37 +53,37 @@ const namespace = "prometheus" // Metrics for rule evaluation. type Metrics struct { - evalDuration prometheus.Summary - iterationDuration prometheus.Summary - iterationsMissed *prometheus.CounterVec - iterationsScheduled *prometheus.CounterVec - evalTotal *prometheus.CounterVec - evalFailures *prometheus.CounterVec - groupInterval *prometheus.GaugeVec - groupLastEvalTime *prometheus.GaugeVec - groupLastDuration *prometheus.GaugeVec - groupRules *prometheus.GaugeVec - groupSamples *prometheus.GaugeVec + EvalDuration prometheus.Summary + IterationDuration prometheus.Summary + IterationsMissed *prometheus.CounterVec + IterationsScheduled *prometheus.CounterVec + EvalTotal *prometheus.CounterVec + EvalFailures *prometheus.CounterVec + GroupInterval *prometheus.GaugeVec + GroupLastEvalTime *prometheus.GaugeVec + GroupLastDuration *prometheus.GaugeVec + GroupRules *prometheus.GaugeVec + GroupSamples *prometheus.GaugeVec } // NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer, // if not nil. func NewGroupMetrics(reg prometheus.Registerer) *Metrics { m := &Metrics{ - evalDuration: prometheus.NewSummary( + EvalDuration: prometheus.NewSummary( prometheus.SummaryOpts{ Namespace: namespace, Name: "rule_evaluation_duration_seconds", Help: "The duration for a rule to execute.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }), - iterationDuration: prometheus.NewSummary(prometheus.SummaryOpts{ + IterationDuration: prometheus.NewSummary(prometheus.SummaryOpts{ Namespace: namespace, Name: "rule_group_duration_seconds", Help: "The duration of rule group evaluations.", Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, }), - iterationsMissed: prometheus.NewCounterVec( + IterationsMissed: prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, Name: "rule_group_iterations_missed_total", @@ -91,7 +91,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { }, []string{"rule_group"}, ), - iterationsScheduled: prometheus.NewCounterVec( + IterationsScheduled: prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, Name: "rule_group_iterations_total", @@ -99,7 +99,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { }, []string{"rule_group"}, ), - evalTotal: prometheus.NewCounterVec( + EvalTotal: prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, Name: "rule_evaluations_total", @@ -107,7 +107,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { }, []string{"rule_group"}, ), - evalFailures: prometheus.NewCounterVec( + EvalFailures: prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, Name: "rule_evaluation_failures_total", @@ -115,7 +115,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { }, []string{"rule_group"}, ), - groupInterval: prometheus.NewGaugeVec( + GroupInterval: prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, Name: "rule_group_interval_seconds", @@ -123,7 +123,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { }, []string{"rule_group"}, ), - groupLastEvalTime: prometheus.NewGaugeVec( + GroupLastEvalTime: prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, Name: "rule_group_last_evaluation_timestamp_seconds", @@ -131,7 +131,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { }, []string{"rule_group"}, ), - groupLastDuration: prometheus.NewGaugeVec( + GroupLastDuration: prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, Name: "rule_group_last_duration_seconds", @@ -139,7 +139,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { }, []string{"rule_group"}, ), - groupRules: prometheus.NewGaugeVec( + GroupRules: prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, Name: "rule_group_rules", @@ -147,7 +147,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { }, []string{"rule_group"}, ), - groupSamples: prometheus.NewGaugeVec( + GroupSamples: prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, Name: "rule_group_last_evaluation_samples", @@ -159,17 +159,17 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { if reg != nil { reg.MustRegister( - m.evalDuration, - m.iterationDuration, - m.iterationsMissed, - m.iterationsScheduled, - m.evalTotal, - m.evalFailures, - m.groupInterval, - m.groupLastEvalTime, - m.groupLastDuration, - m.groupRules, - m.groupSamples, + m.EvalDuration, + m.IterationDuration, + m.IterationsMissed, + m.IterationsScheduled, + m.EvalTotal, + m.EvalFailures, + m.GroupInterval, + m.GroupLastEvalTime, + m.GroupLastDuration, + m.GroupRules, + m.GroupSamples, ) } @@ -281,15 +281,15 @@ func NewGroup(o GroupOptions) *Group { } key := GroupKey(o.File, o.Name) - metrics.iterationsMissed.WithLabelValues(key) - metrics.iterationsScheduled.WithLabelValues(key) - metrics.evalTotal.WithLabelValues(key) - metrics.evalFailures.WithLabelValues(key) - metrics.groupLastEvalTime.WithLabelValues(key) - metrics.groupLastDuration.WithLabelValues(key) - metrics.groupRules.WithLabelValues(key).Set(float64(len(o.Rules))) - metrics.groupSamples.WithLabelValues(key) - metrics.groupInterval.WithLabelValues(key).Set(o.Interval.Seconds()) + metrics.IterationsMissed.WithLabelValues(key) + metrics.IterationsScheduled.WithLabelValues(key) + metrics.EvalTotal.WithLabelValues(key) + metrics.EvalFailures.WithLabelValues(key) + metrics.GroupLastEvalTime.WithLabelValues(key) + metrics.GroupLastDuration.WithLabelValues(key) + metrics.GroupRules.WithLabelValues(key).Set(float64(len(o.Rules))) + metrics.GroupSamples.WithLabelValues(key) + metrics.GroupInterval.WithLabelValues(key).Set(o.Interval.Seconds()) return &Group{ name: o.Name, @@ -338,13 +338,13 @@ func (g *Group) run(ctx context.Context) { }) iter := func() { - g.metrics.iterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Inc() + g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Inc() start := time.Now() g.Eval(ctx, evalTimestamp) timeSinceStart := time.Since(start) - g.metrics.iterationDuration.Observe(timeSinceStart.Seconds()) + g.metrics.IterationDuration.Observe(timeSinceStart.Seconds()) g.setEvaluationTime(timeSinceStart) g.setLastEvaluation(start) } @@ -390,8 +390,8 @@ func (g *Group) run(ctx context.Context) { case <-tick.C: missed := (time.Since(evalTimestamp) / g.interval) - 1 if missed > 0 { - g.metrics.iterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) - g.metrics.iterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) + g.metrics.IterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) + g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) } evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval) iter() @@ -412,8 +412,8 @@ func (g *Group) run(ctx context.Context) { case <-tick.C: missed := (time.Since(evalTimestamp) / g.interval) - 1 if missed > 0 { - g.metrics.iterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) - g.metrics.iterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) + g.metrics.IterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) + g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) } evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval) iter() @@ -476,7 +476,7 @@ func (g *Group) GetEvaluationTime() time.Duration { // setEvaluationTime sets the time in seconds the last evaluation took. func (g *Group) setEvaluationTime(dur time.Duration) { - g.metrics.groupLastDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(dur.Seconds()) + g.metrics.GroupLastDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(dur.Seconds()) g.mtx.Lock() defer g.mtx.Unlock() @@ -492,7 +492,7 @@ func (g *Group) GetLastEvaluation() time.Time { // setLastEvaluation updates evaluationTimestamp to the timestamp of when the rule group was last evaluated. func (g *Group) setLastEvaluation(ts time.Time) { - g.metrics.groupLastEvalTime.WithLabelValues(GroupKey(g.file, g.name)).Set(float64(ts.UnixNano()) / 1e9) + g.metrics.GroupLastEvalTime.WithLabelValues(GroupKey(g.file, g.name)).Set(float64(ts.UnixNano()) / 1e9) g.mtx.Lock() defer g.mtx.Unlock() @@ -584,18 +584,18 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { sp.Finish() since := time.Since(t) - g.metrics.evalDuration.Observe(since.Seconds()) + g.metrics.EvalDuration.Observe(since.Seconds()) rule.SetEvaluationDuration(since) rule.SetEvaluationTimestamp(t) }(time.Now()) - g.metrics.evalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() + g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL) if err != nil { rule.SetHealth(HealthBad) rule.SetLastError(err) - g.metrics.evalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() + g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() // Canceled queries are intentional termination of queries. This normally // happens on shutdown and thus we skip logging of any errors here. @@ -620,7 +620,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { if err := app.Commit(); err != nil { rule.SetHealth(HealthBad) rule.SetLastError(err) - g.metrics.evalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() + g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() level.Warn(g.logger).Log("msg", "Rule sample appending failed", "err", err) return @@ -671,7 +671,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { }(i, rule) } if g.metrics != nil { - g.metrics.groupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal) + g.metrics.GroupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal) } g.cleanupStaleSeries(ctx, ts) } @@ -996,15 +996,15 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels g.markStale = true g.stop() if m := g.metrics; m != nil { - m.iterationsMissed.DeleteLabelValues(n) - m.iterationsScheduled.DeleteLabelValues(n) - m.evalTotal.DeleteLabelValues(n) - m.evalFailures.DeleteLabelValues(n) - m.groupInterval.DeleteLabelValues(n) - m.groupLastEvalTime.DeleteLabelValues(n) - m.groupLastDuration.DeleteLabelValues(n) - m.groupRules.DeleteLabelValues(n) - m.groupSamples.DeleteLabelValues((n)) + m.IterationsMissed.DeleteLabelValues(n) + m.IterationsScheduled.DeleteLabelValues(n) + m.EvalTotal.DeleteLabelValues(n) + m.EvalFailures.DeleteLabelValues(n) + m.GroupInterval.DeleteLabelValues(n) + m.GroupLastEvalTime.DeleteLabelValues(n) + m.GroupLastDuration.DeleteLabelValues(n) + m.GroupRules.DeleteLabelValues(n) + m.GroupSamples.DeleteLabelValues((n)) } wg.Done() }(n, oldg) From 9e8df5ade9b55274fbe0c2b88c2502eccbd54a9b Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Wed, 5 May 2021 11:28:48 -0400 Subject: [PATCH 10/34] check latest exemplar timestamp (#8782) Signed-off-by: yeya24 --- tsdb/exemplar.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index e6e6cb494..8eb34a4c6 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -121,6 +121,9 @@ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*label for _, idx := range ce.index { var se exemplar.QueryResult e := ce.exemplars[idx.oldest] + if e.exemplar.Ts > end || ce.exemplars[idx.newest].exemplar.Ts < start { + continue + } if !matchesSomeMatcherSet(e.seriesLabels, matchers) { continue } From 8f05cd8f9ed90795a189b7aa6874a2db0eb5c96b Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Wed, 5 May 2021 13:51:16 -0400 Subject: [PATCH 11/34] tsdb: move exemplar series labels to index entry (#8783) Signed-off-by: yeya24 --- tsdb/exemplar.go | 21 +++++++++++---------- tsdb/exemplar_test.go | 4 ++-- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 8eb34a4c6..a7d3182ac 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -41,14 +41,15 @@ type CircularExemplarStorage struct { } type indexEntry struct { - oldest int - newest int + oldest int + newest int + seriesLabels labels.Labels } type circularBufferEntry struct { - exemplar exemplar.Exemplar - seriesLabels labels.Labels - next int + exemplar exemplar.Exemplar + next int + ref *indexEntry } // NewCircularExemplarStorage creates an circular in memory exemplar storage. @@ -124,10 +125,10 @@ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*label if e.exemplar.Ts > end || ce.exemplars[idx.newest].exemplar.Ts < start { continue } - if !matchesSomeMatcherSet(e.seriesLabels, matchers) { + if !matchesSomeMatcherSet(idx.seriesLabels, matchers) { continue } - se.SeriesLabels = e.seriesLabels + se.SeriesLabels = idx.seriesLabels // Loop through all exemplars in the circular buffer for the current series. for e.exemplar.Ts <= end { @@ -174,7 +175,7 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp idx, ok := ce.index[seriesLabels] if !ok { - ce.index[seriesLabels] = &indexEntry{oldest: ce.nextIndex} + ce.index[seriesLabels] = &indexEntry{oldest: ce.nextIndex, seriesLabels: l} } else { // Check for duplicate vs last stored exemplar for this series. // NB these are expected, add appending them is a no-op. @@ -195,7 +196,7 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp } else { // There exists exemplar already on this ce.nextIndex entry, drop it, to make place // for others. - prevLabels := prev.seriesLabels.String() + prevLabels := prev.ref.seriesLabels.String() if prev.next == -1 { // Last item for this series, remove index entry. delete(ce.index, prevLabels) @@ -208,7 +209,7 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp // since this is the first exemplar stored for this series. ce.exemplars[ce.nextIndex].exemplar = e ce.exemplars[ce.nextIndex].next = -1 - ce.exemplars[ce.nextIndex].seriesLabels = l + ce.exemplars[ce.nextIndex].ref = ce.index[seriesLabels] ce.index[seriesLabels].newest = ce.nextIndex ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars) diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go index 76976f8be..1c1780d22 100644 --- a/tsdb/exemplar_test.go +++ b/tsdb/exemplar_test.go @@ -304,11 +304,11 @@ func TestIndexOverwrite(t *testing.T) { // index entry for series l1 since we just wrote two exemplars for series l2. _, ok := es.index[l1.String()] require.False(t, ok) - require.Equal(t, &indexEntry{1, 0}, es.index[l2.String()]) + require.Equal(t, &indexEntry{1, 0, l2}, es.index[l2.String()]) err = es.AddExemplar(l1, exemplar.Exemplar{Value: 4, Ts: 4}) require.NoError(t, err) i := es.index[l2.String()] - require.Equal(t, &indexEntry{0, 0}, i) + require.Equal(t, &indexEntry{0, 0, l2}, i) } From b50f9c1c8486b403c0299e84498eeec73f475e1a Mon Sep 17 00:00:00 2001 From: Damien Grisonnet Date: Thu, 6 May 2021 10:56:21 +0200 Subject: [PATCH 12/34] Add label scrape limits (#8777) * scrape: add label limits per scrape Add three new limits to the scrape configuration to provide some mechanism to defend against unbound number of labels and excessive label lengths. If any of these limits are broken by a sample from a scrape, the whole scrape will fail. For all of these configuration options, a zero value means no limit. The `label_limit` configuration will provide a mechanism to bound the number of labels per-scrape of a certain sample to a user defined limit. This limit will be tested against the sample labels plus the discovery labels, but it will exclude the __name__ from the count since it is a mandatory Prometheus label to which applying constraints isn't meaningful. The `label_name_length_limit` and `label_value_length_limit` will prevent having labels of excessive lengths. These limits also skip the __name__ label for the same reasons as the `label_limit` option and will also make the scrape fail if any sample has a label name/value length that exceed the predefined limits. Signed-off-by: Damien Grisonnet * scrape: add metrics and alert to label limits Add three gauge, one for each label limit to easily access the limit set by a certain scrape target. Also add a counter to count the number of targets that exceeded the label limits and thus were dropped. This is useful for the `PrometheusLabelLimitHit` alert that will notify the users that scraping some targets failed because they had samples exceeding the label limits defined in the scrape configuration. Signed-off-by: Damien Grisonnet * scrape: apply label limits to __name__ label Apply limits to the __name__ label that was previously skipped and truncate the label names and values in the error messages as they can be very very long. Signed-off-by: Damien Grisonnet * scrape: remove label limits gauges and refactor Remove `prometheus_target_scrape_pool_label_limit`, `prometheus_target_scrape_pool_label_name_length_limit`, and `prometheus_target_scrape_pool_label_value_length_limit` as they are not really useful since we don't have the information on the labels in it. Signed-off-by: Damien Grisonnet --- config/config.go | 12 +- docs/configuration/configuration.md | 15 ++ .../prometheus-mixin/alerts.libsonnet | 14 ++ scrape/scrape.go | 98 +++++++++++-- scrape/scrape_test.go | 133 +++++++++++++++++- 5 files changed, 257 insertions(+), 15 deletions(-) diff --git a/config/config.go b/config/config.go index 56350e1be..e4045eb05 100644 --- a/config/config.go +++ b/config/config.go @@ -382,11 +382,21 @@ type ScrapeConfig struct { MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. Scheme string `yaml:"scheme,omitempty"` - // More than this many samples post metric-relabeling will cause the scrape to fail. + // More than this many samples post metric-relabeling will cause the scrape to + // fail. SampleLimit uint `yaml:"sample_limit,omitempty"` // More than this many targets after the target relabeling will cause the // scrapes to fail. TargetLimit uint `yaml:"target_limit,omitempty"` + // More than this many labels post metric-relabeling will cause the scrape to + // fail. + LabelLimit uint `yaml:"label_limit,omitempty"` + // More than this label name length post metric-relabeling will cause the + // scrape to fail. + LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"` + // More than this label value length post metric-relabeling will cause the + // scrape to fail. + LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 72135a270..ac45fa82f 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -288,6 +288,21 @@ metric_relabel_configs: # the entire scrape will be treated as failed. 0 means no limit. [ sample_limit: | default = 0 ] +# Per-scrape limit on number of labels that will be accepted for a sample. If +# more than this number of labels are present post metric-relabeling, the +# entire scrape will be treated as failed. 0 means no limit. +[ label_limit: | default = 0 ] + +# Per-scrape limit on length of labels name that will be accepted for a sample. +# If a label name is longer than this number post metric-relabeling, the entire +# scrape will be treated as failed. 0 means no limit. +[ label_name_length_limit: | default = 0 ] + +# Per-scrape limit on length of labels value that will be accepted for a sample. +# If a label value is longer than this number post metric-relabeling, the +# entire scrape will be treated as failed. 0 means no limit. +[ label_value_length_limit: | default = 0 ] + # Per-scrape config limit on number of unique targets that will be # accepted. If more than this number of targets are present after target # relabeling, Prometheus will mark the targets as failed without scraping them. diff --git a/documentation/prometheus-mixin/alerts.libsonnet b/documentation/prometheus-mixin/alerts.libsonnet index f555a4cdf..d4face577 100644 --- a/documentation/prometheus-mixin/alerts.libsonnet +++ b/documentation/prometheus-mixin/alerts.libsonnet @@ -261,6 +261,20 @@ description: 'Prometheus %(prometheusName)s has dropped {{ printf "%%.0f" $value }} targets because the number of targets exceeded the configured target_limit.' % $._config, }, }, + { + alert: 'PrometheusLabelLimitHit', + expr: ||| + increase(prometheus_target_scrape_pool_exceeded_label_limits_total{%(prometheusSelector)s}[5m]) > 0 + ||| % $._config, + 'for': '15m', + labels: { + severity: 'warning', + }, + annotations: { + summary: 'Prometheus has dropped targets because some scrape configs have exceeded the labels limit.', + description: 'Prometheus %(prometheusName)s has dropped {{ printf "%%.0f" $value }} targets because some samples exceeded the configured label_limit, label_name_length_limit or label_value_length_limit.' % $._config, + }, + }, ] + if $._config.prometheusHAGroupLabels == '' then self.rulesWithoutHA else self.rulesWithHA, rulesWithoutHA:: [ { diff --git a/scrape/scrape.go b/scrape/scrape.go index f7888f3ee..0985d2f46 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -170,6 +170,12 @@ var ( Help: "Total number of exemplar rejected due to not being out of the expected order.", }, ) + targetScrapePoolExceededLabelLimits = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_exceeded_label_limits_total", + Help: "Total number of times scrape pools hit the label limits, during sync or config reload.", + }, + ) ) func init() { @@ -192,6 +198,7 @@ func init() { targetScrapeCacheFlushForced, targetMetadataCache, targetScrapeExemplarOutOfOrder, + targetScrapePoolExceededLabelLimits, ) } @@ -218,10 +225,17 @@ type scrapePool struct { newLoop func(scrapeLoopOptions) loop } +type labelLimits struct { + labelLimit int + labelNameLengthLimit int + labelValueLengthLimit int +} + type scrapeLoopOptions struct { target *Target scraper scraper - limit int + sampleLimit int + labelLimits *labelLimits honorLabels bool honorTimestamps bool mrc []*relabel.Config @@ -273,10 +287,11 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc) }, func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) }, - func(ctx context.Context) storage.Appender { return appender(app.Appender(ctx), opts.limit) }, + func(ctx context.Context) storage.Appender { return appender(app.Appender(ctx), opts.sampleLimit) }, cache, jitterSeed, opts.honorTimestamps, + opts.labelLimits, ) } @@ -357,10 +372,15 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) var ( - wg sync.WaitGroup - interval = time.Duration(sp.config.ScrapeInterval) - timeout = time.Duration(sp.config.ScrapeTimeout) - limit = int(sp.config.SampleLimit) + wg sync.WaitGroup + interval = time.Duration(sp.config.ScrapeInterval) + timeout = time.Duration(sp.config.ScrapeTimeout) + sampleLimit = int(sp.config.SampleLimit) + labelLimits = &labelLimits{ + labelLimit: int(sp.config.LabelLimit), + labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), + labelValueLengthLimit: int(sp.config.LabelValueLengthLimit), + } honorLabels = sp.config.HonorLabels honorTimestamps = sp.config.HonorTimestamps mrc = sp.config.MetricRelabelConfigs @@ -383,7 +403,8 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { newLoop = sp.newLoop(scrapeLoopOptions{ target: t, scraper: s, - limit: limit, + sampleLimit: sampleLimit, + labelLimits: labelLimits, honorLabels: honorLabels, honorTimestamps: honorTimestamps, mrc: mrc, @@ -451,10 +472,15 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { // It returns after all stopped scrape loops terminated. func (sp *scrapePool) sync(targets []*Target) { var ( - uniqueLoops = make(map[uint64]loop) - interval = time.Duration(sp.config.ScrapeInterval) - timeout = time.Duration(sp.config.ScrapeTimeout) - limit = int(sp.config.SampleLimit) + uniqueLoops = make(map[uint64]loop) + interval = time.Duration(sp.config.ScrapeInterval) + timeout = time.Duration(sp.config.ScrapeTimeout) + sampleLimit = int(sp.config.SampleLimit) + labelLimits = &labelLimits{ + labelLimit: int(sp.config.LabelLimit), + labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), + labelValueLengthLimit: int(sp.config.LabelValueLengthLimit), + } honorLabels = sp.config.HonorLabels honorTimestamps = sp.config.HonorTimestamps mrc = sp.config.MetricRelabelConfigs @@ -469,7 +495,8 @@ func (sp *scrapePool) sync(targets []*Target) { l := sp.newLoop(scrapeLoopOptions{ target: t, scraper: s, - limit: limit, + sampleLimit: sampleLimit, + labelLimits: labelLimits, honorLabels: honorLabels, honorTimestamps: honorTimestamps, mrc: mrc, @@ -544,6 +571,41 @@ func (sp *scrapePool) refreshTargetLimitErr() error { return err } +func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error { + if limits == nil { + return nil + } + + met := lset.Get(labels.MetricName) + if limits.labelLimit > 0 { + nbLabels := len(lset) + if nbLabels > int(limits.labelLimit) { + return fmt.Errorf("label_limit exceeded (metric: %.50s, number of label: %d, limit: %d)", met, nbLabels, limits.labelLimit) + } + } + + if limits.labelNameLengthLimit == 0 && limits.labelValueLengthLimit == 0 { + return nil + } + + for _, l := range lset { + if limits.labelNameLengthLimit > 0 { + nameLength := len(l.Name) + if nameLength > int(limits.labelNameLengthLimit) { + return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label: %.50v, name length: %d, limit: %d)", met, l, nameLength, limits.labelNameLengthLimit) + } + } + + if limits.labelValueLengthLimit > 0 { + valueLength := len(l.Value) + if valueLength > int(limits.labelValueLengthLimit) { + return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label: %.50v, value length: %d, limit: %d)", met, l, valueLength, limits.labelValueLengthLimit) + } + } + } + return nil +} + func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels { lb := labels.NewBuilder(lset) @@ -707,6 +769,7 @@ type scrapeLoop struct { honorTimestamps bool forcedErr error forcedErrMtx sync.Mutex + labelLimits *labelLimits appender func(ctx context.Context) storage.Appender sampleMutator labelsMutator @@ -974,6 +1037,7 @@ func newScrapeLoop(ctx context.Context, cache *scrapeCache, jitterSeed uint64, honorTimestamps bool, + labelLimits *labelLimits, ) *scrapeLoop { if l == nil { l = log.NewNopLogger() @@ -996,6 +1060,7 @@ func newScrapeLoop(ctx context.Context, l: l, parentCtx: ctx, honorTimestamps: honorTimestamps, + labelLimits: labelLimits, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1346,6 +1411,12 @@ loop: err = errNameLabelMandatory break loop } + + // If any label limits is exceeded the scrape should fail. + if err = verifyLabelLimits(lset, sl.labelLimits); err != nil { + targetScrapePoolExceededLabelLimits.Inc() + break loop + } } ref, err = app.Append(ref, lset, t, v) @@ -1577,6 +1648,9 @@ func zeroConfig(c *config.ScrapeConfig) *config.ScrapeConfig { z.ScrapeInterval = 0 z.ScrapeTimeout = 0 z.SampleLimit = 0 + z.LabelLimit = 0 + z.LabelNameLengthLimit = 0 + z.LabelValueLengthLimit = 0 z.HTTPClientConfig = config_util.HTTPClientConfig{} return &z } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index f21d10157..93877035e 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -464,8 +464,8 @@ func TestScrapePoolAppender(t *testing.T) { require.True(t, ok, "Expected base appender but got %T", tl.Appender) loop = sp.newLoop(scrapeLoopOptions{ - target: &Target{}, - limit: 100, + target: &Target{}, + sampleLimit: 100, }) appl, ok = loop.(*scrapeLoop) require.True(t, ok, "Expected scrapeLoop but got %T", loop) @@ -577,6 +577,7 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) { nopMutator, nil, nil, 0, true, + nil, ) // The scrape pool synchronizes on stopping scrape loops. However, new scrape @@ -641,6 +642,7 @@ func TestScrapeLoopStop(t *testing.T) { nil, 0, true, + nil, ) // Terminate loop after 2 scrapes. @@ -708,6 +710,7 @@ func TestScrapeLoopRun(t *testing.T) { nil, 0, true, + nil, ) // The loop must terminate during the initial offset if the context @@ -755,6 +758,7 @@ func TestScrapeLoopRun(t *testing.T) { nil, 0, true, + nil, ) go func() { @@ -806,6 +810,7 @@ func TestScrapeLoopForcedErr(t *testing.T) { nil, 0, true, + nil, ) forcedErr := fmt.Errorf("forced err") @@ -856,6 +861,7 @@ func TestScrapeLoopMetadata(t *testing.T) { cache, 0, true, + nil, ) defer cancel() @@ -905,6 +911,7 @@ func TestScrapeLoopSeriesAdded(t *testing.T) { nil, 0, true, + nil, ) defer cancel() @@ -943,6 +950,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) { nil, 0, true, + nil, ) // Succeed once, several failures, then stop. numScrapes := 0 @@ -997,6 +1005,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) { nil, 0, true, + nil, ) // Succeed once, several failures, then stop. @@ -1055,6 +1064,7 @@ func TestScrapeLoopCache(t *testing.T) { nil, 0, true, + nil, ) numScrapes := 0 @@ -1129,6 +1139,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) { nil, 0, true, + nil, ) numScrapes := 0 @@ -1235,6 +1246,7 @@ func TestScrapeLoopAppend(t *testing.T) { nil, 0, true, + nil, ) now := time.Now() @@ -1276,6 +1288,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) { nil, 0, true, + nil, ) fakeRef := uint64(1) @@ -1325,6 +1338,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) { nil, 0, true, + nil, ) // Get the value of the Counter before performing the append. @@ -1394,6 +1408,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) { nil, 0, true, + nil, ) now := time.Now() @@ -1434,6 +1449,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) { nil, 0, true, + nil, ) now := time.Now() @@ -1477,6 +1493,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) { nil, 0, true, + nil, ) now := time.Now() @@ -1578,6 +1595,7 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000 nil, 0, true, + nil, ) now := time.Now() @@ -1635,6 +1653,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) { nil, 0, true, + nil, ) now := time.Now() @@ -1679,6 +1698,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) { nil, 0, true, + nil, ) scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { @@ -1707,6 +1727,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) { nil, 0, true, + nil, ) scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { @@ -1748,6 +1769,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T nil, 0, true, + nil, ) now := time.Unix(1, 0) @@ -1785,6 +1807,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) { nil, 0, true, + nil, ) now := time.Now().Add(20 * time.Minute) @@ -1972,6 +1995,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) { func(ctx context.Context) storage.Appender { return capp }, nil, 0, true, + nil, ) now := time.Now() @@ -2005,6 +2029,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) { func(ctx context.Context) storage.Appender { return capp }, nil, 0, false, + nil, ) now := time.Now() @@ -2037,6 +2062,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) { nil, 0, true, + nil, ) defer cancel() @@ -2087,6 +2113,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) { nil, 0, true, + nil, ) defer cancel() @@ -2304,6 +2331,7 @@ func TestScrapeAddFast(t *testing.T) { nil, 0, true, + nil, ) defer cancel() @@ -2387,6 +2415,7 @@ func TestScrapeReportSingleAppender(t *testing.T) { nil, 0, true, + nil, ) numScrapes := 0 @@ -2430,3 +2459,103 @@ func TestScrapeReportSingleAppender(t *testing.T) { t.Fatalf("Scrape wasn't stopped.") } } + +func TestScrapeLoopLabelLimit(t *testing.T) { + tests := []struct { + title string + scrapeLabels string + discoveryLabels []string + labelLimits labelLimits + expectErr bool + }{ + { + title: "Valid number of labels", + scrapeLabels: `metric{l1="1", l2="2"} 0`, + discoveryLabels: nil, + labelLimits: labelLimits{labelLimit: 5}, + expectErr: false, + }, { + title: "Too many labels", + scrapeLabels: `metric{l1="1", l2="2", l3="3", l4="4", l5="5", l6="6"} 0`, + discoveryLabels: nil, + labelLimits: labelLimits{labelLimit: 5}, + expectErr: true, + }, { + title: "Too many labels including discovery labels", + scrapeLabels: `metric{l1="1", l2="2", l3="3", l4="4"} 0`, + discoveryLabels: []string{"l5", "5", "l6", "6"}, + labelLimits: labelLimits{labelLimit: 5}, + expectErr: true, + }, { + title: "Valid labels name length", + scrapeLabels: `metric{l1="1", l2="2"} 0`, + discoveryLabels: nil, + labelLimits: labelLimits{labelNameLengthLimit: 10}, + expectErr: false, + }, { + title: "Label name too long", + scrapeLabels: `metric{label_name_too_long="0"} 0`, + discoveryLabels: nil, + labelLimits: labelLimits{labelNameLengthLimit: 10}, + expectErr: true, + }, { + title: "Discovery label name too long", + scrapeLabels: `metric{l1="1", l2="2"} 0`, + discoveryLabels: []string{"label_name_too_long", "0"}, + labelLimits: labelLimits{labelNameLengthLimit: 10}, + expectErr: true, + }, { + title: "Valid labels value length", + scrapeLabels: `metric{l1="1", l2="2"} 0`, + discoveryLabels: nil, + labelLimits: labelLimits{labelValueLengthLimit: 10}, + expectErr: false, + }, { + title: "Label value too long", + scrapeLabels: `metric{l1="label_value_too_long"} 0`, + discoveryLabels: nil, + labelLimits: labelLimits{labelValueLengthLimit: 10}, + expectErr: true, + }, { + title: "Discovery label value too long", + scrapeLabels: `metric{l1="1", l2="2"} 0`, + discoveryLabels: []string{"l1", "label_value_too_long"}, + labelLimits: labelLimits{labelValueLengthLimit: 10}, + expectErr: true, + }, + } + + for _, test := range tests { + app := &collectResultAppender{} + + discoveryLabels := &Target{ + labels: labels.FromStrings(test.discoveryLabels...), + } + + sl := newScrapeLoop(context.Background(), + nil, nil, nil, + func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, discoveryLabels, false, nil) + }, + func(l labels.Labels) labels.Labels { + return mutateReportSampleLabels(l, discoveryLabels) + }, + func(ctx context.Context) storage.Appender { return app }, + nil, + 0, + true, + &test.labelLimits, + ) + + slApp := sl.appender(context.Background()) + _, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "", time.Now()) + + t.Logf("Test:%s", test.title) + if test.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.NoError(t, slApp.Commit()) + } + } +} From 7c7dafc32135683082f22d1589acdc92c7b0ec33 Mon Sep 17 00:00:00 2001 From: Chris Marchbanks Date: Thu, 6 May 2021 12:56:45 -0600 Subject: [PATCH 13/34] Do not snappy encode if record is too large (#8790) Snappy cannot encode records larger than ~3.7 GB and will panic if an encoding is attempted. Check to make sure that the record is smaller than this before encoding. In the future, we could improve this behavior to still compress large records (or break them up into smaller records), but this avoids the panic for users with very large single scrape targets. Signed-off-by: Chris Marchbanks --- tsdb/wal/wal.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tsdb/wal/wal.go b/tsdb/wal/wal.go index 7b45e0e91..ba82251a3 100644 --- a/tsdb/wal/wal.go +++ b/tsdb/wal/wal.go @@ -616,7 +616,10 @@ func (w *WAL) log(rec []byte, final bool) error { // Compress the record before calculating if a new segment is needed. compressed := false - if w.compress && len(rec) > 0 { + if w.compress && + len(rec) > 0 && + // If MaxEncodedLen is less than 0 the record is too large to be compressed. + snappy.MaxEncodedLen(len(rec)) >= 0 { // The snappy library uses `len` to calculate if we need a new buffer. // In order to allocate as few buffers as possible make the length // equal to the capacity. From 45c7c51a3b8d1e51242fdb789261bf0efcfa99d6 Mon Sep 17 00:00:00 2001 From: Chris Marchbanks Date: Thu, 6 May 2021 13:15:16 -0600 Subject: [PATCH 14/34] Update go dependencies Signed-off-by: Chris Marchbanks --- go.mod | 32 +++++++++--------- go.sum | 104 +++++++++++++++++++++++++++++++++++---------------------- 2 files changed, 81 insertions(+), 55 deletions(-) diff --git a/go.mod b/go.mod index 62828efc6..dd22c9be7 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/prometheus/prometheus go 1.14 require ( - github.com/Azure/azure-sdk-for-go v52.5.0+incompatible + github.com/Azure/azure-sdk-for-go v54.0.0+incompatible github.com/Azure/go-autorest/autorest v0.11.18 github.com/Azure/go-autorest/autorest/adal v0.9.13 github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect @@ -11,29 +11,29 @@ require ( github.com/HdrHistogram/hdrhistogram-go v1.0.1 // indirect github.com/Microsoft/go-winio v0.4.16 // indirect github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 - github.com/aws/aws-sdk-go v1.38.3 + github.com/aws/aws-sdk-go v1.38.35 github.com/cespare/xxhash/v2 v2.1.1 github.com/containerd/containerd v1.4.3 // indirect github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 - github.com/digitalocean/godo v1.58.0 + github.com/digitalocean/godo v1.60.0 github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/docker v20.10.5+incompatible + github.com/docker/docker v20.10.6+incompatible github.com/docker/go-connections v0.4.0 // indirect github.com/edsrzf/mmap-go v1.0.0 github.com/go-kit/kit v0.10.0 github.com/go-logfmt/logfmt v0.5.0 - github.com/go-openapi/strfmt v0.20.0 + github.com/go-openapi/strfmt v0.20.1 github.com/go-openapi/validate v0.20.2 // indirect github.com/go-zookeeper/zk v1.0.2 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.3 - github.com/google/pprof v0.0.0-20210323184331-8eee2492667d - github.com/gophercloud/gophercloud v0.16.0 + github.com/google/pprof v0.0.0-20210504235042-3a04a4d88a10 + github.com/gophercloud/gophercloud v0.17.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/consul/api v1.8.1 - github.com/hetznercloud/hcloud-go v1.24.0 - github.com/influxdata/influxdb v1.8.4 - github.com/json-iterator/go v1.1.10 + github.com/hetznercloud/hcloud-go v1.25.0 + github.com/influxdata/influxdb v1.8.5 + github.com/json-iterator/go v1.1.11 github.com/miekg/dns v1.1.41 github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect github.com/morikuni/aec v1.0.0 // indirect @@ -54,17 +54,17 @@ require ( github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 github.com/stretchr/testify v1.7.0 - github.com/uber/jaeger-client-go v2.25.0+incompatible - github.com/uber/jaeger-lib v2.4.0+incompatible + github.com/uber/jaeger-client-go v2.28.0+incompatible + github.com/uber/jaeger-lib v2.4.1+incompatible go.uber.org/atomic v1.7.0 go.uber.org/goleak v1.1.10 - golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826 - golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558 + golang.org/x/net v0.0.0-20210505214959-0714010a04ed + golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210324051608-47abb6519492 + golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6 golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba golang.org/x/tools v0.1.0 - google.golang.org/api v0.42.0 + google.golang.org/api v0.46.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify/fsnotify.v1 v1.4.7 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 5d8b1cb2b..03a0abd29 100644 --- a/go.sum +++ b/go.sum @@ -17,8 +17,9 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0 h1:oqqswrt4x6b9OGBnNqdssxBl1xf0rSUNjU2BR4BZar0= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -39,8 +40,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v52.5.0+incompatible h1:/NLBWHCnIHtZyLPc1P7WIqi4Te4CC23kIQyK3Ep/7lA= -github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v54.0.0+incompatible h1:Bq3L9LF0DHCexlT0fccwxgrOMfjHx8LGz+d+L7gGQv4= +github.com/Azure/azure-sdk-for-go v54.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= @@ -113,8 +114,8 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.38.3 h1:QCL/le04oAz2jELMRSuJVjGT7H+4hhoQc66eMPCfU/k= -github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.35 h1:7AlAO0FC+8nFjxiGKEmq0QLpiA8/XFr6eIxgRTwkdTg= +github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -157,17 +158,18 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/digitalocean/godo v1.58.0 h1:Iy8ULTvgCAxH8dlxZ54qRYpm5uTEb2deUqijywLH7Lo= -github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/digitalocean/godo v1.60.0 h1:o/vimtn/HKtYSakFAAZ59Zc5ASORd41S4z1X7pAXPn8= +github.com/digitalocean/godo v1.60.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.5+incompatible h1:o5WL5onN4awYGwrW7+oTn5x9AF2prw7V0Ox8ZEkoCdg= -github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ= +github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -189,6 +191,7 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -289,8 +292,9 @@ github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6 github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/strfmt v0.20.0 h1:l2omNtmNbMc39IGptl9BuXBEKcZfS8zjrTsPKTiJiDM= github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.1 h1:1VgxvehFne1mbChGeCmZ5pc0LxUf6yaACVSIYAR91Xc= +github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= @@ -380,8 +384,11 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= @@ -421,8 +428,8 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210323184331-8eee2492667d h1:Rwivyny4wymF1qWzOk800eSVa/n9njfdOm+kHjiQhZQ= -github.com/google/pprof v0.0.0-20210323184331-8eee2492667d/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210504235042-3a04a4d88a10 h1:wAh7XxYU1O92WP9JMsK0elU+haxEN0HTc2m/C89wQvk= +github.com/google/pprof v0.0.0-20210504235042-3a04a4d88a10/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -433,8 +440,8 @@ github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gophercloud/gophercloud v0.16.0 h1:sWjPfypuzxRxjVbk3/MsU4H8jS0NNlyauZtIUl78BPU= -github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= +github.com/gophercloud/gophercloud v0.17.0 h1:BgVw0saxyeHWH5us/SQe1ltp0GRnytjmOLXDA8pO77E= +github.com/gophercloud/gophercloud v0.17.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -491,8 +498,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/hetznercloud/hcloud-go v1.24.0 h1:/CeHDzhH3Fhm83pjxvE3xNNLbvACl0Lu1/auJ83gG5U= -github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= +github.com/hetznercloud/hcloud-go v1.25.0 h1:QAaFKtGKWRxjwjKJWBGMxGYUxVEQmIkb35j/WXrsazY= +github.com/hetznercloud/hcloud-go v1.25.0/go.mod h1:2C5uMtBiMoFr3m7lBFPf7wXTdh33CevmZpQIIDPGYJI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= @@ -501,8 +508,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= -github.com/influxdata/influxdb v1.8.4 h1:FUcPJJ1/sM47gX3Xr7QCIbkmZ2N4ug5CV7iOFQCR75A= -github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb v1.8.5 h1:MMrJF6eWCD7sWMxOFFctm1U1RNQ2Hh2nHgz2iF9/wHY= +github.com/influxdata/influxdb v1.8.5/go.mod h1:oFH+pbEyDln/1TKwa98oJzVrkZwdjrJOwIDGYZj7Ma0= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= @@ -526,8 +533,9 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= @@ -802,19 +810,23 @@ github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= -github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.4.0+incompatible h1:fY7QsGQWiCt8pajv4r7JEvmATdCVaWxXbjwyYwsNaLQ= -github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-client-go v2.28.0+incompatible h1:G4QSBfvPKvg5ZM2j9MrJFdfI5iSljY/WnJqOGFao6HI= +github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -828,8 +840,9 @@ go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.6 h1:rh7GdYmDrb8AQSkF8yteAus8qYOgOASWDOv1BWqBXkU= go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.5.1 h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI= +go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -865,6 +878,7 @@ golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= @@ -957,8 +971,10 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826 h1:lNRDRnwZWawoPHDS50ebYHTOHjctRMLSrUSQFcAHiW4= -golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210505214959-0714010a04ed h1:V9kAVxLvz1lkufatrpHuUVyJ/5tR3Ms7rk951P4mI98= +golang.org/x/net v0.0.0-20210505214959-0714010a04ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -970,8 +986,8 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558 h1:D7nTwh4J0i+5mW4Zjzn5omvlr6YBcWywE6KOcatyNxY= -golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c h1:SgVl/sCtkicsS7psKkje4H9YtjdEl3xsYh7N+5TDHqY= +golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1050,10 +1066,12 @@ golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6 h1:cdsMqa2nXzqlgs183pHxtvoVwU7CyzaCTAUOg94af4c= +golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= @@ -1064,8 +1082,9 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1170,8 +1189,9 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.42.0 h1:uqATLkpxiBrhrvFoebXUjvyzE9nQf+pVyy0Z0IHE+fc= -google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.46.0 h1:jkDWHOBIoNSD0OQpq4rtBVu+Rh325MPjXG1rakAp8JU= +google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1220,8 +1240,10 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f h1:YRBxgxUW6GFi+AKsn8WGA9k1SZohK+gGuEqdeT5aoNQ= -google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab h1:dkb90hr43A2Q5as5ZBphcOF2II0+EqfCBqGp7qFSpN4= +google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1243,8 +1265,10 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0 h1:o1bcQ6imQMIOpdrO3SWf2z5RV72WbDwdXuK0MDlc8As= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1254,8 +1278,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 063ab7555d7c8afbaac8efd779638dff3e868615 Mon Sep 17 00:00:00 2001 From: Chris Marchbanks Date: Thu, 6 May 2021 13:16:06 -0600 Subject: [PATCH 15/34] Update javascript dependencies Signed-off-by: Chris Marchbanks --- web/ui/react-app/package.json | 2 +- web/ui/react-app/yarn.lock | 951 +++++++++++++++++----------------- 2 files changed, 489 insertions(+), 464 deletions(-) diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 12b61aa84..d46aeaa5d 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -32,7 +32,6 @@ "jsdom": "^16.4.0", "moment": "^2.24.0", "moment-timezone": "^0.5.23", - "sass": "1.32.10", "popper.js": "^1.14.3", "react": "^16.7.0", "react-copy-to-clipboard": "^5.0.1", @@ -42,6 +41,7 @@ "react-test-renderer": "^16.9.0", "reactstrap": "^8.9.0", "sanitize-html": "^2.3.3", + "sass": "1.32.10", "tempusdominus-bootstrap-4": "^5.1.2", "tempusdominus-core": "^5.0.3", "typescript": "^3.3.3", diff --git a/web/ui/react-app/yarn.lock b/web/ui/react-app/yarn.lock index d79fbe416..2212719c9 100644 --- a/web/ui/react-app/yarn.lock +++ b/web/ui/react-app/yarn.lock @@ -16,10 +16,10 @@ dependencies: "@babel/highlight" "^7.12.13" -"@babel/compat-data@^7.13.0", "@babel/compat-data@^7.13.12", "@babel/compat-data@^7.13.8", "@babel/compat-data@^7.9.0": - version "7.13.12" - resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.13.12.tgz#a8a5ccac19c200f9dd49624cac6e19d7be1236a1" - integrity sha512-3eJJ841uKxeV8dcN/2yGEUy+RfgQspPEgQat85umsE1rotuquQ2AbIub4S6j7c50a2d+4myc+zSlnXeIHrOnhQ== +"@babel/compat-data@^7.13.11", "@babel/compat-data@^7.13.15", "@babel/compat-data@^7.13.8", "@babel/compat-data@^7.14.0", "@babel/compat-data@^7.9.0": + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.14.0.tgz#a901128bce2ad02565df95e6ecbf195cf9465919" + integrity sha512-vu9V3uMM/1o5Hl5OekMUowo3FqXLJSw+s+66nt0fSWVWTtmosdzn45JHOB3cPtZoe6CTBDzvSw0RdOY85Q37+Q== "@babel/core@7.9.0": version "7.9.0" @@ -44,33 +44,32 @@ source-map "^0.5.0" "@babel/core@^7.1.0", "@babel/core@^7.4.5": - version "7.13.10" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.13.10.tgz#07de050bbd8193fcd8a3c27918c0890613a94559" - integrity sha512-bfIYcT0BdKeAZrovpMqX2Mx5NrgAckGbwT982AkdS5GNfn3KMGiprlBAtmBcFZRUmpaufS6WZFP8trvx8ptFDw== + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.14.0.tgz#47299ff3ec8d111b493f1a9d04bf88c04e728d88" + integrity sha512-8YqpRig5NmIHlMLw09zMlPTvUVMILjqCOtVgu+TVNWEBvy9b5I3RRyhqnrV4hjgEK7n8P9OqvkWJAFmEL6Wwfw== dependencies: "@babel/code-frame" "^7.12.13" - "@babel/generator" "^7.13.9" - "@babel/helper-compilation-targets" "^7.13.10" - "@babel/helper-module-transforms" "^7.13.0" - "@babel/helpers" "^7.13.10" - "@babel/parser" "^7.13.10" + "@babel/generator" "^7.14.0" + "@babel/helper-compilation-targets" "^7.13.16" + "@babel/helper-module-transforms" "^7.14.0" + "@babel/helpers" "^7.14.0" + "@babel/parser" "^7.14.0" "@babel/template" "^7.12.13" - "@babel/traverse" "^7.13.0" - "@babel/types" "^7.13.0" + "@babel/traverse" "^7.14.0" + "@babel/types" "^7.14.0" convert-source-map "^1.7.0" debug "^4.1.0" gensync "^1.0.0-beta.2" json5 "^2.1.2" - lodash "^4.17.19" semver "^6.3.0" source-map "^0.5.0" -"@babel/generator@^7.13.0", "@babel/generator@^7.13.9", "@babel/generator@^7.4.0", "@babel/generator@^7.9.0": - version "7.13.9" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.13.9.tgz#3a7aa96f9efb8e2be42d38d80e2ceb4c64d8de39" - integrity sha512-mHOOmY0Axl/JCTkxTU6Lf5sWOg/v8nUa+Xkt4zMTftX0wqmb6Sh7J8gvcehBw7q0AhrhAR+FDacKjCZ2X8K+Sw== +"@babel/generator@^7.14.0", "@babel/generator@^7.4.0", "@babel/generator@^7.9.0": + version "7.14.1" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.14.1.tgz#1f99331babd65700183628da186f36f63d615c93" + integrity sha512-TMGhsXMXCP/O1WtQmZjpEYDhCYC9vFhayWZPJSZCGkPJgUqX0rF0wwtrYvnzVxIjcF80tkUertXVk5cwqi5cAQ== dependencies: - "@babel/types" "^7.13.0" + "@babel/types" "^7.14.1" jsesc "^2.5.1" source-map "^0.5.0" @@ -89,25 +88,26 @@ "@babel/helper-explode-assignable-expression" "^7.12.13" "@babel/types" "^7.12.13" -"@babel/helper-compilation-targets@^7.13.0", "@babel/helper-compilation-targets@^7.13.10", "@babel/helper-compilation-targets@^7.13.8", "@babel/helper-compilation-targets@^7.8.7": - version "7.13.10" - resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.13.10.tgz#1310a1678cb8427c07a753750da4f8ce442bdd0c" - integrity sha512-/Xju7Qg1GQO4mHZ/Kcs6Au7gfafgZnwm+a7sy/ow/tV1sHeraRUHbjdat8/UvDor4Tez+siGKDk6zIKtCPKVJA== +"@babel/helper-compilation-targets@^7.13.0", "@babel/helper-compilation-targets@^7.13.16", "@babel/helper-compilation-targets@^7.13.8", "@babel/helper-compilation-targets@^7.8.7": + version "7.13.16" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.13.16.tgz#6e91dccf15e3f43e5556dffe32d860109887563c" + integrity sha512-3gmkYIrpqsLlieFwjkGgLaSHmhnvlAYzZLlYVjlW+QwI+1zE17kGxuJGmIqDQdYp56XdmGeD+Bswx0UTyG18xA== dependencies: - "@babel/compat-data" "^7.13.8" + "@babel/compat-data" "^7.13.15" "@babel/helper-validator-option" "^7.12.17" browserslist "^4.14.5" semver "^6.3.0" -"@babel/helper-create-class-features-plugin@^7.13.0", "@babel/helper-create-class-features-plugin@^7.8.3": - version "7.13.11" - resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.13.11.tgz#30d30a005bca2c953f5653fc25091a492177f4f6" - integrity sha512-ays0I7XYq9xbjCSvT+EvysLgfc3tOkwCULHjrnscGT3A9qD4sk3wXnJ3of0MAWsWGjdinFvajHU2smYuqXKMrw== +"@babel/helper-create-class-features-plugin@^7.13.0", "@babel/helper-create-class-features-plugin@^7.14.0", "@babel/helper-create-class-features-plugin@^7.8.3": + version "7.14.1" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.14.1.tgz#1fe11b376f3c41650ad9fedc665b0068722ea76c" + integrity sha512-r8rsUahG4ywm0QpGcCrLaUSOuNAISR3IZCg4Fx05Ozq31aCUrQsTLH6KPxy0N5ULoQ4Sn9qjNdGNtbPWAC6hYg== dependencies: + "@babel/helper-annotate-as-pure" "^7.12.13" "@babel/helper-function-name" "^7.12.13" - "@babel/helper-member-expression-to-functions" "^7.13.0" + "@babel/helper-member-expression-to-functions" "^7.13.12" "@babel/helper-optimise-call-expression" "^7.12.13" - "@babel/helper-replace-supers" "^7.13.0" + "@babel/helper-replace-supers" "^7.13.12" "@babel/helper-split-export-declaration" "^7.12.13" "@babel/helper-create-regexp-features-plugin@^7.12.13": @@ -118,10 +118,10 @@ "@babel/helper-annotate-as-pure" "^7.12.13" regexpu-core "^4.7.1" -"@babel/helper-define-polyfill-provider@^0.1.5": - version "0.1.5" - resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.1.5.tgz#3c2f91b7971b9fc11fe779c945c014065dea340e" - integrity sha512-nXuzCSwlJ/WKr8qxzW816gwyT6VZgiJG17zR40fou70yfAcqjoNyTLl/DQ+FExw5Hx5KNqshmN8Ldl/r2N7cTg== +"@babel/helper-define-polyfill-provider@^0.2.0": + version "0.2.0" + resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.2.0.tgz#a640051772045fedaaecc6f0c6c69f02bdd34bf1" + integrity sha512-JT8tHuFjKBo8NnaUbblz7mIu1nnvUDiHVjXXkulZULyidvo/7P6TY7+YqpV37IfF+KUFxmlK04elKtGKXaiVgw== dependencies: "@babel/helper-compilation-targets" "^7.13.0" "@babel/helper-module-imports" "^7.12.13" @@ -156,14 +156,14 @@ "@babel/types" "^7.12.13" "@babel/helper-hoist-variables@^7.13.0": - version "7.13.0" - resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.13.0.tgz#5d5882e855b5c5eda91e0cadc26c6e7a2c8593d8" - integrity sha512-0kBzvXiIKfsCA0y6cFEIJf4OdzfpRuNk4+YTeHZpGGc666SATFKTz6sRncwFnQk7/ugJ4dSrCj6iJuvW4Qwr2g== + version "7.13.16" + resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.13.16.tgz#1b1651249e94b51f8f0d33439843e33e39775b30" + integrity sha512-1eMtTrXtrwscjcAeO4BVK+vvkxaLJSPFz1w1KLawz6HLNi9bPFGBNwwDyVfiu1Tv/vRRFYfoGaKhmAQPGPn5Wg== dependencies: - "@babel/traverse" "^7.13.0" - "@babel/types" "^7.13.0" + "@babel/traverse" "^7.13.15" + "@babel/types" "^7.13.16" -"@babel/helper-member-expression-to-functions@^7.13.0", "@babel/helper-member-expression-to-functions@^7.13.12": +"@babel/helper-member-expression-to-functions@^7.13.12": version "7.13.12" resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.13.12.tgz#dfe368f26d426a07299d8d6513821768216e6d72" integrity sha512-48ql1CLL59aKbU94Y88Xgb2VFy7a95ykGRbJJaaVv+LX5U8wFpLfiGXJJGUozsmA1oEh/o5Bp60Voq7ACyA/Sw== @@ -177,19 +177,19 @@ dependencies: "@babel/types" "^7.13.12" -"@babel/helper-module-transforms@^7.13.0", "@babel/helper-module-transforms@^7.9.0": - version "7.13.12" - resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.13.12.tgz#600e58350490828d82282631a1422268e982ba96" - integrity sha512-7zVQqMO3V+K4JOOj40kxiCrMf6xlQAkewBB0eu2b03OO/Q21ZutOzjpfD79A5gtE/2OWi1nv625MrDlGlkbknQ== +"@babel/helper-module-transforms@^7.13.0", "@babel/helper-module-transforms@^7.14.0", "@babel/helper-module-transforms@^7.9.0": + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.14.0.tgz#8fcf78be220156f22633ee204ea81f73f826a8ad" + integrity sha512-L40t9bxIuGOfpIGA3HNkJhU9qYrf4y5A5LUSw7rGMSn+pcG8dfJ0g6Zval6YJGd2nEjI7oP00fRdnhLKndx6bw== dependencies: "@babel/helper-module-imports" "^7.13.12" "@babel/helper-replace-supers" "^7.13.12" "@babel/helper-simple-access" "^7.13.12" "@babel/helper-split-export-declaration" "^7.12.13" - "@babel/helper-validator-identifier" "^7.12.11" + "@babel/helper-validator-identifier" "^7.14.0" "@babel/template" "^7.12.13" - "@babel/traverse" "^7.13.0" - "@babel/types" "^7.13.12" + "@babel/traverse" "^7.14.0" + "@babel/types" "^7.14.0" "@babel/helper-optimise-call-expression@^7.12.13": version "7.12.13" @@ -222,7 +222,7 @@ "@babel/traverse" "^7.13.0" "@babel/types" "^7.13.12" -"@babel/helper-simple-access@^7.12.13", "@babel/helper-simple-access@^7.13.12": +"@babel/helper-simple-access@^7.13.12": version "7.13.12" resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.13.12.tgz#dd6c538afb61819d205a012c31792a39c7a5eaf6" integrity sha512-7FEjbrx5SL9cWvXioDbnlYTppcZGuCY6ow3/D5vMggb2Ywgu4dMrpTJX0JdQAIcRRUElOIxF3yEooa9gUb9ZbA== @@ -243,10 +243,10 @@ dependencies: "@babel/types" "^7.12.13" -"@babel/helper-validator-identifier@^7.12.11": - version "7.12.11" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz#c9a1f021917dcb5ccf0d4e453e399022981fc9ed" - integrity sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw== +"@babel/helper-validator-identifier@^7.12.11", "@babel/helper-validator-identifier@^7.14.0": + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.0.tgz#d26cad8a47c65286b15df1547319a5d0bcf27288" + integrity sha512-V3ts7zMSu5lfiwWDVWzRDGIN+lnCEUdaXgtVHJgLb1rGaA6jMrtB9EmE7L18foXJIE8Un/A/h6NJfGQp/e1J4A== "@babel/helper-validator-option@^7.12.17": version "7.12.17" @@ -263,28 +263,28 @@ "@babel/traverse" "^7.13.0" "@babel/types" "^7.13.0" -"@babel/helpers@^7.13.10", "@babel/helpers@^7.9.0": - version "7.13.10" - resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.13.10.tgz#fd8e2ba7488533cdeac45cc158e9ebca5e3c7df8" - integrity sha512-4VO883+MWPDUVRF3PhiLBUFHoX/bsLTGFpFK/HqvvfBZz2D57u9XzPVNFVBTc0PW/CWR9BXTOKt8NF4DInUHcQ== +"@babel/helpers@^7.14.0", "@babel/helpers@^7.9.0": + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.14.0.tgz#ea9b6be9478a13d6f961dbb5f36bf75e2f3b8f62" + integrity sha512-+ufuXprtQ1D1iZTO/K9+EBRn+qPWMJjZSw/S0KlFrxCw4tkrzv9grgpDHkY9MeQTjTY8i2sp7Jep8DfU6tN9Mg== dependencies: "@babel/template" "^7.12.13" - "@babel/traverse" "^7.13.0" - "@babel/types" "^7.13.0" + "@babel/traverse" "^7.14.0" + "@babel/types" "^7.14.0" "@babel/highlight@^7.12.13", "@babel/highlight@^7.8.3": - version "7.13.10" - resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.13.10.tgz#a8b2a66148f5b27d666b15d81774347a731d52d1" - integrity sha512-5aPpe5XQPzflQrFwL1/QoeHkP2MsA4JCntcXHRhEsdsfPVkvPi2w7Qix4iV7t5S/oC9OodGrggd8aco1g3SZFg== + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.14.0.tgz#3197e375711ef6bf834e67d0daec88e4f46113cf" + integrity sha512-YSCOwxvTYEIMSGaBQb5kDDsCopDdiUGsqpatp3fOlI4+2HQSkTmEVWnVuySdAC5EWCqSWWTv0ib63RjR7dTBdg== dependencies: - "@babel/helper-validator-identifier" "^7.12.11" + "@babel/helper-validator-identifier" "^7.14.0" chalk "^2.0.0" js-tokens "^4.0.0" -"@babel/parser@^7.1.0", "@babel/parser@^7.12.13", "@babel/parser@^7.13.0", "@babel/parser@^7.13.10", "@babel/parser@^7.4.3", "@babel/parser@^7.7.0", "@babel/parser@^7.9.0": - version "7.13.12" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.13.12.tgz#ba320059420774394d3b0c0233ba40e4250b81d1" - integrity sha512-4T7Pb244rxH24yR116LAuJ+adxXXnHhZaLJjegJVKSdoNCe4x1eDBaud5YIcQFcqzsaD5BHvJw5BQ0AZapdCRw== +"@babel/parser@^7.1.0", "@babel/parser@^7.12.13", "@babel/parser@^7.14.0", "@babel/parser@^7.4.3", "@babel/parser@^7.7.0", "@babel/parser@^7.9.0": + version "7.14.1" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.14.1.tgz#1bd644b5db3f5797c4479d89ec1817fe02b84c47" + integrity sha512-muUGEKu8E/ftMTPlNp+mc6zL3E9zKWmF5sDHZ5MSsoTP9Wyz64AhEf9kD08xYJ7w6Hdcu8H550ircnPyWSIF0Q== "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.13.12": version "7.13.12" @@ -295,10 +295,10 @@ "@babel/helper-skip-transparent-expression-wrappers" "^7.12.1" "@babel/plugin-proposal-optional-chaining" "^7.13.12" -"@babel/plugin-proposal-async-generator-functions@^7.13.8", "@babel/plugin-proposal-async-generator-functions@^7.8.3": - version "7.13.8" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.13.8.tgz#87aacb574b3bc4b5603f6fe41458d72a5a2ec4b1" - integrity sha512-rPBnhj+WgoSmgq+4gQUtXx/vOcU+UYtjy1AA/aeD61Hwj410fwYyqfUcRP3lR8ucgliVJL/G7sXcNUecC75IXA== +"@babel/plugin-proposal-async-generator-functions@^7.13.15", "@babel/plugin-proposal-async-generator-functions@^7.8.3": + version "7.13.15" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.13.15.tgz#80e549df273a3b3050431b148c892491df1bcc5b" + integrity sha512-VapibkWzFeoa6ubXy/NgV5U2U4MVnUlvnx6wo1XhlsaTrLYWE0UFpDQsVrmn22q5CzeloqJ8gEMHSKxuee6ZdA== dependencies: "@babel/helper-plugin-utils" "^7.13.0" "@babel/helper-remap-async-to-generator" "^7.13.0" @@ -320,6 +320,14 @@ "@babel/helper-create-class-features-plugin" "^7.13.0" "@babel/helper-plugin-utils" "^7.13.0" +"@babel/plugin-proposal-class-static-block@^7.13.11": + version "7.13.11" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.13.11.tgz#6fcbba4a962702c17e5371a0c7b39afde186d703" + integrity sha512-fJTdFI4bfnMjvxJyNuaf8i9mVcZ0UhetaGEUHaHV9KEnibLugJkZAtXikR8KcYj+NYmI4DZMS8yQAyg+hvfSqg== + dependencies: + "@babel/helper-plugin-utils" "^7.13.0" + "@babel/plugin-syntax-class-static-block" "^7.12.13" + "@babel/plugin-proposal-decorators@7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.8.3.tgz#2156860ab65c5abf068c3f67042184041066543e" @@ -437,6 +445,16 @@ "@babel/helper-create-class-features-plugin" "^7.13.0" "@babel/helper-plugin-utils" "^7.13.0" +"@babel/plugin-proposal-private-property-in-object@^7.14.0": + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.14.0.tgz#b1a1f2030586b9d3489cc26179d2eb5883277636" + integrity sha512-59ANdmEwwRUkLjB7CRtwJxxwtjESw+X2IePItA+RGQh+oy5RmpCh/EvVVvh5XQc3yxsm5gtv0+i9oBZhaDNVTg== + dependencies: + "@babel/helper-annotate-as-pure" "^7.12.13" + "@babel/helper-create-class-features-plugin" "^7.14.0" + "@babel/helper-plugin-utils" "^7.13.0" + "@babel/plugin-syntax-private-property-in-object" "^7.14.0" + "@babel/plugin-proposal-unicode-property-regex@^7.12.13", "@babel/plugin-proposal-unicode-property-regex@^7.4.4", "@babel/plugin-proposal-unicode-property-regex@^7.8.3": version "7.12.13" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.12.13.tgz#bebde51339be829c17aaaaced18641deb62b39ba" @@ -459,6 +477,13 @@ dependencies: "@babel/helper-plugin-utils" "^7.12.13" +"@babel/plugin-syntax-class-static-block@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.12.13.tgz#8e3d674b0613e67975ceac2776c97b60cafc5c9c" + integrity sha512-ZmKQ0ZXR0nYpHZIIuj9zE7oIqCx2hw9TKi+lIo73NNrMPAZGHfS92/VRV0ZmPj6H2ffBgyFHXvJ5NYsNeEaP2A== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + "@babel/plugin-syntax-decorators@^7.8.3": version "7.12.13" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.12.13.tgz#fac829bf3c7ef4a1bc916257b403e58c6bdaf648" @@ -543,6 +568,13 @@ dependencies: "@babel/helper-plugin-utils" "^7.8.0" +"@babel/plugin-syntax-private-property-in-object@^7.14.0": + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.0.tgz#762a4babec61176fec6c88480dec40372b140c0b" + integrity sha512-bda3xF8wGl5/5btF794utNOL0Jw+9jE5C1sLZcoK7c4uonE/y3iQiyG+KbkF3WBV/paX58VCpjhxLPkdj5Fe4w== + dependencies: + "@babel/helper-plugin-utils" "^7.13.0" + "@babel/plugin-syntax-top-level-await@^7.12.13", "@babel/plugin-syntax-top-level-await@^7.8.3": version "7.12.13" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.12.13.tgz#c5f0fa6e249f5b739727f923540cf7a806130178" @@ -580,12 +612,12 @@ dependencies: "@babel/helper-plugin-utils" "^7.12.13" -"@babel/plugin-transform-block-scoping@^7.12.13", "@babel/plugin-transform-block-scoping@^7.8.3": - version "7.12.13" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.12.13.tgz#f36e55076d06f41dfd78557ea039c1b581642e61" - integrity sha512-Pxwe0iqWJX4fOOM2kEZeUuAxHMWb9nK+9oh5d11bsLoB0xMg+mkDpt0eYuDZB7ETrY9bbcVlKUGTOGWy7BHsMQ== +"@babel/plugin-transform-block-scoping@^7.14.1", "@babel/plugin-transform-block-scoping@^7.8.3": + version "7.14.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.14.1.tgz#ac1b3a8e3d8cbb31efc6b9be2f74eb9823b74ab2" + integrity sha512-2mQXd0zBrwfp0O1moWIhPpEeTKDvxyHcnma3JATVP1l+CctWBuot6OJG8LQ4DnBj4ZZPSmlb/fm4mu47EOAnVA== dependencies: - "@babel/helper-plugin-utils" "^7.12.13" + "@babel/helper-plugin-utils" "^7.13.0" "@babel/plugin-transform-classes@^7.13.0", "@babel/plugin-transform-classes@^7.9.0": version "7.13.0" @@ -607,10 +639,10 @@ dependencies: "@babel/helper-plugin-utils" "^7.13.0" -"@babel/plugin-transform-destructuring@^7.13.0", "@babel/plugin-transform-destructuring@^7.8.3": - version "7.13.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.13.0.tgz#c5dce270014d4e1ebb1d806116694c12b7028963" - integrity sha512-zym5em7tePoNT9s964c0/KU3JPPnuq7VhIxPRefJ4/s82cD+q1mgKfuGRDMCPL0HTyKz4dISuQlCusfgCJ86HA== +"@babel/plugin-transform-destructuring@^7.13.17", "@babel/plugin-transform-destructuring@^7.8.3": + version "7.13.17" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.13.17.tgz#678d96576638c19d5b36b332504d3fd6e06dea27" + integrity sha512-UAUqiLv+uRLO+xuBKKMEpC+t7YRNVRqBsWWq1yKXbBZBje/t3IXCiSinZhjn/DC3qzBfICeYd2EFGEbHsh5RLA== dependencies: "@babel/helper-plugin-utils" "^7.13.0" @@ -674,23 +706,23 @@ dependencies: "@babel/helper-plugin-utils" "^7.12.13" -"@babel/plugin-transform-modules-amd@^7.13.0", "@babel/plugin-transform-modules-amd@^7.9.0": - version "7.13.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.13.0.tgz#19f511d60e3d8753cc5a6d4e775d3a5184866cc3" - integrity sha512-EKy/E2NHhY/6Vw5d1k3rgoobftcNUmp9fGjb9XZwQLtTctsRBOTRO7RHHxfIky1ogMN5BxN7p9uMA3SzPfotMQ== +"@babel/plugin-transform-modules-amd@^7.14.0", "@babel/plugin-transform-modules-amd@^7.9.0": + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.14.0.tgz#589494b5b290ff76cf7f59c798011f6d77026553" + integrity sha512-CF4c5LX4LQ03LebQxJ5JZes2OYjzBuk1TdiF7cG7d5dK4lAdw9NZmaxq5K/mouUdNeqwz3TNjnW6v01UqUNgpQ== dependencies: - "@babel/helper-module-transforms" "^7.13.0" + "@babel/helper-module-transforms" "^7.14.0" "@babel/helper-plugin-utils" "^7.13.0" babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-modules-commonjs@^7.13.8", "@babel/plugin-transform-modules-commonjs@^7.9.0": - version "7.13.8" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.13.8.tgz#7b01ad7c2dcf2275b06fa1781e00d13d420b3e1b" - integrity sha512-9QiOx4MEGglfYZ4XOnU79OHr6vIWUakIj9b4mioN8eQIoEh+pf5p/zEB36JpDFWA12nNMiRf7bfoRvl9Rn79Bw== +"@babel/plugin-transform-modules-commonjs@^7.14.0", "@babel/plugin-transform-modules-commonjs@^7.9.0": + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.14.0.tgz#52bc199cb581e0992edba0f0f80356467587f161" + integrity sha512-EX4QePlsTaRZQmw9BsoPeyh5OCtRGIhwfLquhxGp5e32w+dyL8htOcDwamlitmNFK6xBZYlygjdye9dbd9rUlQ== dependencies: - "@babel/helper-module-transforms" "^7.13.0" + "@babel/helper-module-transforms" "^7.14.0" "@babel/helper-plugin-utils" "^7.13.0" - "@babel/helper-simple-access" "^7.12.13" + "@babel/helper-simple-access" "^7.13.12" babel-plugin-dynamic-import-node "^2.3.3" "@babel/plugin-transform-modules-systemjs@^7.13.8", "@babel/plugin-transform-modules-systemjs@^7.9.0": @@ -704,12 +736,12 @@ "@babel/helper-validator-identifier" "^7.12.11" babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-modules-umd@^7.13.0", "@babel/plugin-transform-modules-umd@^7.9.0": - version "7.13.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.13.0.tgz#8a3d96a97d199705b9fd021580082af81c06e70b" - integrity sha512-D/ILzAh6uyvkWjKKyFE/W0FzWwasv6vPTSqPcjxFqn6QpX3u8DjRVliq4F2BamO2Wee/om06Vyy+vPkNrd4wxw== +"@babel/plugin-transform-modules-umd@^7.14.0", "@babel/plugin-transform-modules-umd@^7.9.0": + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.14.0.tgz#2f8179d1bbc9263665ce4a65f305526b2ea8ac34" + integrity sha512-nPZdnWtXXeY7I87UZr9VlsWme3Y0cfFFE41Wbxz4bbaexAjNMInXPFUpRRUJ8NoMm0Cw+zxbqjdPmLhcjfazMw== dependencies: - "@babel/helper-module-transforms" "^7.13.0" + "@babel/helper-module-transforms" "^7.14.0" "@babel/helper-plugin-utils" "^7.13.0" "@babel/plugin-transform-named-capturing-groups-regex@^7.12.13", "@babel/plugin-transform-named-capturing-groups-regex@^7.8.3": @@ -749,9 +781,9 @@ "@babel/helper-plugin-utils" "^7.12.13" "@babel/plugin-transform-react-constant-elements@^7.0.0": - version "7.13.10" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.13.10.tgz#5d3de8a8ee53f4612e728f4f17b8c9125f8019e5" - integrity sha512-E+aCW9j7mLq01tOuGV08YzLBt+vSyr4bOPT75B6WrAlrUfmOYOZ/yWk847EH0dv0xXiCihWLEmlX//O30YhpIw== + version "7.13.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.13.13.tgz#0208b1d942bf939cd4f7aa5b255d42602aa4a920" + integrity sha512-SNJU53VM/SjQL0bZhyU+f4kJQz7bQQajnrZRSaU21hruG/NWY41AEM9AWXeXX90pYr/C2yAmTgI6yW3LlLrAUQ== dependencies: "@babel/helper-plugin-utils" "^7.13.0" @@ -769,7 +801,7 @@ dependencies: "@babel/helper-plugin-utils" "^7.12.13" -"@babel/plugin-transform-react-jsx-development@^7.12.12", "@babel/plugin-transform-react-jsx-development@^7.9.0": +"@babel/plugin-transform-react-jsx-development@^7.12.17", "@babel/plugin-transform-react-jsx-development@^7.9.0": version "7.12.17" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.12.17.tgz#f510c0fa7cd7234153539f9a362ced41a5ca1447" integrity sha512-BPjYV86SVuOaudFhsJR1zjgxxOhJDt6JHNoD48DxWEIxUCAMjV1ys6DYw4SDYZh0b1QsS2vfIA9t/ZsQGsDOUQ== @@ -790,7 +822,7 @@ dependencies: "@babel/helper-plugin-utils" "^7.12.13" -"@babel/plugin-transform-react-jsx@^7.12.13", "@babel/plugin-transform-react-jsx@^7.12.17", "@babel/plugin-transform-react-jsx@^7.9.1": +"@babel/plugin-transform-react-jsx@^7.12.17", "@babel/plugin-transform-react-jsx@^7.13.12", "@babel/plugin-transform-react-jsx@^7.9.1": version "7.13.12" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.13.12.tgz#1df5dfaf0f4b784b43e96da6f28d630e775f68b3" integrity sha512-jcEI2UqIcpCqB5U5DRxIl0tQEProI2gcu+g8VTIqxLO5Iidojb4d77q+fwGseCvd8af/lJ9masp4QWzBXFE2xA== @@ -809,10 +841,10 @@ "@babel/helper-annotate-as-pure" "^7.10.4" "@babel/helper-plugin-utils" "^7.10.4" -"@babel/plugin-transform-regenerator@^7.12.13", "@babel/plugin-transform-regenerator@^7.8.7": - version "7.12.13" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.12.13.tgz#b628bcc9c85260ac1aeb05b45bde25210194a2f5" - integrity sha512-lxb2ZAvSLyJ2PEe47hoGWPmW22v7CtSl9jW8mingV4H2sEX/JOcrAj2nPuGWi56ERUm2bUpjKzONAuT6HCn2EA== +"@babel/plugin-transform-regenerator@^7.13.15", "@babel/plugin-transform-regenerator@^7.8.7": + version "7.13.15" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.13.15.tgz#e5eb28945bf8b6563e7f818945f966a8d2997f39" + integrity sha512-Bk9cOLSz8DiurcMETZ8E2YtIVJbFCPGW28DJWUakmyVWtQSm6Wsf0p4B4BfEr/eL2Nkhe/CICiUiMOCi1TPhuQ== dependencies: regenerator-transform "^0.14.2" @@ -960,17 +992,18 @@ semver "^5.5.0" "@babel/preset-env@^7.4.5": - version "7.13.12" - resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.13.12.tgz#6dff470478290582ac282fb77780eadf32480237" - integrity sha512-JzElc6jk3Ko6zuZgBtjOd01pf9yYDEIH8BcqVuYIuOkzOwDesoa/Nz4gIo4lBG6K861KTV9TvIgmFuT6ytOaAA== + version "7.14.1" + resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.14.1.tgz#b55914e2e68885ea03f69600b2d3537e54574a93" + integrity sha512-0M4yL1l7V4l+j/UHvxcdvNfLB9pPtIooHTbEhgD/6UGyh8Hy3Bm1Mj0buzjDXATCSz3JFibVdnoJZCrlUCanrQ== dependencies: - "@babel/compat-data" "^7.13.12" - "@babel/helper-compilation-targets" "^7.13.10" + "@babel/compat-data" "^7.14.0" + "@babel/helper-compilation-targets" "^7.13.16" "@babel/helper-plugin-utils" "^7.13.0" "@babel/helper-validator-option" "^7.12.17" "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.13.12" - "@babel/plugin-proposal-async-generator-functions" "^7.13.8" + "@babel/plugin-proposal-async-generator-functions" "^7.13.15" "@babel/plugin-proposal-class-properties" "^7.13.0" + "@babel/plugin-proposal-class-static-block" "^7.13.11" "@babel/plugin-proposal-dynamic-import" "^7.13.8" "@babel/plugin-proposal-export-namespace-from" "^7.12.13" "@babel/plugin-proposal-json-strings" "^7.13.8" @@ -981,9 +1014,11 @@ "@babel/plugin-proposal-optional-catch-binding" "^7.13.8" "@babel/plugin-proposal-optional-chaining" "^7.13.12" "@babel/plugin-proposal-private-methods" "^7.13.0" + "@babel/plugin-proposal-private-property-in-object" "^7.14.0" "@babel/plugin-proposal-unicode-property-regex" "^7.12.13" "@babel/plugin-syntax-async-generators" "^7.8.4" "@babel/plugin-syntax-class-properties" "^7.12.13" + "@babel/plugin-syntax-class-static-block" "^7.12.13" "@babel/plugin-syntax-dynamic-import" "^7.8.3" "@babel/plugin-syntax-export-namespace-from" "^7.8.3" "@babel/plugin-syntax-json-strings" "^7.8.3" @@ -993,14 +1028,15 @@ "@babel/plugin-syntax-object-rest-spread" "^7.8.3" "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" "@babel/plugin-syntax-optional-chaining" "^7.8.3" + "@babel/plugin-syntax-private-property-in-object" "^7.14.0" "@babel/plugin-syntax-top-level-await" "^7.12.13" "@babel/plugin-transform-arrow-functions" "^7.13.0" "@babel/plugin-transform-async-to-generator" "^7.13.0" "@babel/plugin-transform-block-scoped-functions" "^7.12.13" - "@babel/plugin-transform-block-scoping" "^7.12.13" + "@babel/plugin-transform-block-scoping" "^7.14.1" "@babel/plugin-transform-classes" "^7.13.0" "@babel/plugin-transform-computed-properties" "^7.13.0" - "@babel/plugin-transform-destructuring" "^7.13.0" + "@babel/plugin-transform-destructuring" "^7.13.17" "@babel/plugin-transform-dotall-regex" "^7.12.13" "@babel/plugin-transform-duplicate-keys" "^7.12.13" "@babel/plugin-transform-exponentiation-operator" "^7.12.13" @@ -1008,16 +1044,16 @@ "@babel/plugin-transform-function-name" "^7.12.13" "@babel/plugin-transform-literals" "^7.12.13" "@babel/plugin-transform-member-expression-literals" "^7.12.13" - "@babel/plugin-transform-modules-amd" "^7.13.0" - "@babel/plugin-transform-modules-commonjs" "^7.13.8" + "@babel/plugin-transform-modules-amd" "^7.14.0" + "@babel/plugin-transform-modules-commonjs" "^7.14.0" "@babel/plugin-transform-modules-systemjs" "^7.13.8" - "@babel/plugin-transform-modules-umd" "^7.13.0" + "@babel/plugin-transform-modules-umd" "^7.14.0" "@babel/plugin-transform-named-capturing-groups-regex" "^7.12.13" "@babel/plugin-transform-new-target" "^7.12.13" "@babel/plugin-transform-object-super" "^7.12.13" "@babel/plugin-transform-parameters" "^7.13.0" "@babel/plugin-transform-property-literals" "^7.12.13" - "@babel/plugin-transform-regenerator" "^7.12.13" + "@babel/plugin-transform-regenerator" "^7.13.15" "@babel/plugin-transform-reserved-words" "^7.12.13" "@babel/plugin-transform-shorthand-properties" "^7.12.13" "@babel/plugin-transform-spread" "^7.13.0" @@ -1027,10 +1063,10 @@ "@babel/plugin-transform-unicode-escapes" "^7.12.13" "@babel/plugin-transform-unicode-regex" "^7.12.13" "@babel/preset-modules" "^0.1.4" - "@babel/types" "^7.13.12" - babel-plugin-polyfill-corejs2 "^0.1.4" - babel-plugin-polyfill-corejs3 "^0.1.3" - babel-plugin-polyfill-regenerator "^0.1.2" + "@babel/types" "^7.14.1" + babel-plugin-polyfill-corejs2 "^0.2.0" + babel-plugin-polyfill-corejs3 "^0.2.0" + babel-plugin-polyfill-regenerator "^0.2.0" core-js-compat "^3.9.0" semver "^6.3.0" @@ -1058,14 +1094,15 @@ "@babel/plugin-transform-react-jsx-source" "^7.9.0" "@babel/preset-react@^7.0.0": - version "7.12.13" - resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.12.13.tgz#5f911b2eb24277fa686820d5bd81cad9a0602a0a" - integrity sha512-TYM0V9z6Abb6dj1K7i5NrEhA13oS5ujUYQYDfqIBXYHOc2c2VkFgc+q9kyssIyUfy4/hEwqrgSlJ/Qgv8zJLsA== + version "7.13.13" + resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.13.13.tgz#fa6895a96c50763fe693f9148568458d5a839761" + integrity sha512-gx+tDLIE06sRjKJkVtpZ/t3mzCDOnPG+ggHZG9lffUbX8+wC739x20YQc9V35Do6ZAxaUc/HhVHIiOzz5MvDmA== dependencies: - "@babel/helper-plugin-utils" "^7.12.13" + "@babel/helper-plugin-utils" "^7.13.0" + "@babel/helper-validator-option" "^7.12.17" "@babel/plugin-transform-react-display-name" "^7.12.13" - "@babel/plugin-transform-react-jsx" "^7.12.13" - "@babel/plugin-transform-react-jsx-development" "^7.12.12" + "@babel/plugin-transform-react-jsx" "^7.13.12" + "@babel/plugin-transform-react-jsx-development" "^7.12.17" "@babel/plugin-transform-react-pure-annotations" "^7.12.1" "@babel/preset-typescript@7.9.0": @@ -1077,9 +1114,9 @@ "@babel/plugin-transform-typescript" "^7.9.0" "@babel/runtime-corejs3@^7.10.2", "@babel/runtime-corejs3@^7.12.1": - version "7.13.10" - resolved "https://registry.yarnpkg.com/@babel/runtime-corejs3/-/runtime-corejs3-7.13.10.tgz#14c3f4c85de22ba88e8e86685d13e8861a82fe86" - integrity sha512-x/XYVQ1h684pp1mJwOV4CyvqZXqbc8CMsMGUnAbuc82ZCdv1U63w5RSUzgDSXQHG5Rps/kiksH6g2D5BuaKyXg== + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/runtime-corejs3/-/runtime-corejs3-7.14.0.tgz#6bf5fbc0b961f8e3202888cb2cd0fb7a0a9a3f66" + integrity sha512-0R0HTZWHLk6G8jIk0FtoX+AatCtKnswS98VhXwGImFc759PJRp4Tru0PQYZofyijTFUr+gT8Mu7sgXVJLQ0ceg== dependencies: core-js-pure "^3.0.0" regenerator-runtime "^0.13.4" @@ -1092,9 +1129,9 @@ regenerator-runtime "^0.13.4" "@babel/runtime@^7.0.0", "@babel/runtime@^7.1.2", "@babel/runtime@^7.10.2", "@babel/runtime@^7.11.2", "@babel/runtime@^7.12.5", "@babel/runtime@^7.3.4", "@babel/runtime@^7.4.5", "@babel/runtime@^7.7.2", "@babel/runtime@^7.8.4": - version "7.13.10" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.13.10.tgz#47d42a57b6095f4468da440388fdbad8bebf0d7d" - integrity sha512-4QPkjJq6Ns3V/RgpEahRk+AGfL0eO6RHHtTWoNNr5mO49G6B5+X6d6THgWEAvTrznU5xYpbAlVKRYcsCgh/Akw== + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.14.0.tgz#46794bc20b612c5f75e62dd071e24dfd95f1cbe6" + integrity sha512-JELkvo/DlpNdJ7dlyw/eY7E0suy5i5GQH+Vlxaq1nsNJ+H7f4Vtv3jMeCEgRhZZQFXTjldYfQgv2qmM6M1v5wA== dependencies: regenerator-runtime "^0.13.4" @@ -1107,28 +1144,26 @@ "@babel/parser" "^7.12.13" "@babel/types" "^7.12.13" -"@babel/traverse@^7.1.0", "@babel/traverse@^7.13.0", "@babel/traverse@^7.4.3", "@babel/traverse@^7.7.0", "@babel/traverse@^7.9.0": - version "7.13.0" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.13.0.tgz#6d95752475f86ee7ded06536de309a65fc8966cc" - integrity sha512-xys5xi5JEhzC3RzEmSGrs/b3pJW/o87SypZ+G/PhaE7uqVQNv/jlmVIBXuoh5atqQ434LfXV+sf23Oxj0bchJQ== +"@babel/traverse@^7.1.0", "@babel/traverse@^7.13.0", "@babel/traverse@^7.13.15", "@babel/traverse@^7.14.0", "@babel/traverse@^7.4.3", "@babel/traverse@^7.7.0", "@babel/traverse@^7.9.0": + version "7.14.0" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.14.0.tgz#cea0dc8ae7e2b1dec65f512f39f3483e8cc95aef" + integrity sha512-dZ/a371EE5XNhTHomvtuLTUyx6UEoJmYX+DT5zBCQN3McHemsuIaKKYqsc/fs26BEkHs/lBZy0J571LP5z9kQA== dependencies: "@babel/code-frame" "^7.12.13" - "@babel/generator" "^7.13.0" + "@babel/generator" "^7.14.0" "@babel/helper-function-name" "^7.12.13" "@babel/helper-split-export-declaration" "^7.12.13" - "@babel/parser" "^7.13.0" - "@babel/types" "^7.13.0" + "@babel/parser" "^7.14.0" + "@babel/types" "^7.14.0" debug "^4.1.0" globals "^11.1.0" - lodash "^4.17.19" -"@babel/types@^7.0.0", "@babel/types@^7.12.1", "@babel/types@^7.12.13", "@babel/types@^7.13.0", "@babel/types@^7.13.12", "@babel/types@^7.3.0", "@babel/types@^7.4.0", "@babel/types@^7.4.4", "@babel/types@^7.7.0", "@babel/types@^7.9.0": - version "7.13.12" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.13.12.tgz#edbf99208ef48852acdff1c8a681a1e4ade580cd" - integrity sha512-K4nY2xFN4QMvQwkQ+zmBDp6ANMbVNw6BbxWmYA4qNjhR9W+Lj/8ky5MEY2Me5r+B2c6/v6F53oMndG+f9s3IiA== +"@babel/types@^7.0.0", "@babel/types@^7.12.1", "@babel/types@^7.12.13", "@babel/types@^7.13.0", "@babel/types@^7.13.12", "@babel/types@^7.13.16", "@babel/types@^7.14.0", "@babel/types@^7.14.1", "@babel/types@^7.3.0", "@babel/types@^7.4.0", "@babel/types@^7.4.4", "@babel/types@^7.7.0", "@babel/types@^7.9.0": + version "7.14.1" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.14.1.tgz#095bd12f1c08ab63eff6e8f7745fa7c9cc15a9db" + integrity sha512-S13Qe85fzLs3gYRUnrpyeIrBJIMYv33qSTg1qoBwiG6nPKwUWAD9odSzWhEedpwOIzSEI6gbdQIWEMiCI42iBA== dependencies: - "@babel/helper-validator-identifier" "^7.12.11" - lodash "^4.17.19" + "@babel/helper-validator-identifier" "^7.14.0" to-fast-properties "^2.0.0" "@cnakazawa/watch@^1.0.3": @@ -1140,9 +1175,9 @@ minimist "^1.2.0" "@codemirror/autocomplete@^0.18.3": - version "0.18.3" - resolved "https://registry.yarnpkg.com/@codemirror/autocomplete/-/autocomplete-0.18.3.tgz#6c75904c1156e4d9a00e56b9a3e559dda6149e1e" - integrity sha512-XAilIpYxsessr3Zh39nc5T97Zz9wLMwQTCDlIKapm/VK3JnX1I1jkoe8JqpbyVyabVxGXpB2K88GIVS9X+nLZQ== + version "0.18.5" + resolved "https://registry.yarnpkg.com/@codemirror/autocomplete/-/autocomplete-0.18.5.tgz#5c25ddbef858503920fa4912b48bf78be93ee462" + integrity sha512-Zp/HMTwvaP4B01HQx+5Ga0xBBLAwNaAlbALip355NPRBkYX9PZheX5b/F5IUAdI6kIrF2eYz1xAOChXgYg9maw== dependencies: "@codemirror/language" "^0.18.0" "@codemirror/state" "^0.18.0" @@ -1163,9 +1198,9 @@ "@codemirror/view" "^0.18.0" "@codemirror/commands@^0.18.0": - version "0.18.0" - resolved "https://registry.yarnpkg.com/@codemirror/commands/-/commands-0.18.0.tgz#10344d7d7a0fecad826e9853c1e6069d706298c6" - integrity sha512-4H63B4oqr8dmJ3VOKvMI7xrZIBJjAdtz3Z0V/Jt0HlIYTe76Omy4h9BS3b9EgyNaWjIO/Slz3KQPwihcC8uR5Q== + version "0.18.2" + resolved "https://registry.yarnpkg.com/@codemirror/commands/-/commands-0.18.2.tgz#a90067b1e3127ffe2c1be2daa68c0f4aeda59308" + integrity sha512-NeIpsQe5yUIhG7sD1HPaw/9TO5V7miMKwGwhT/0tkgkmgnMtJcgnguM1gjaUlaZ09BBJO6s61D8JHNDUvBI6tA== dependencies: "@codemirror/language" "^0.18.0" "@codemirror/matchbrackets" "^0.18.0" @@ -1204,9 +1239,9 @@ "@codemirror/view" "^0.18.0" "@codemirror/language@^0.18.0": - version "0.18.0" - resolved "https://registry.yarnpkg.com/@codemirror/language/-/language-0.18.0.tgz#16c3beaf372d0ecfcb76d708a8f55efccaa25563" - integrity sha512-gryu0Sej1vG3S3njwsJ+bhz9zLoJxZ2TahLlxpqOB3uqVGZrGDyE+GmZBnA6I3hwHvaO1O4WXKScVsKoW6HqFA== + version "0.18.1" + resolved "https://registry.yarnpkg.com/@codemirror/language/-/language-0.18.1.tgz#23682324228606c4ae5b6a9f7cd0a4b9fdff83dd" + integrity sha512-j/TWV8sNmzU79kk/hPLb9NqSPoH59850kQSpgY11LxOEBlKVRKgaWabgNtUCSeVCAnfisGekupk3aq2ftILqug== dependencies: "@codemirror/state" "^0.18.0" "@codemirror/text" "^0.18.0" @@ -1215,9 +1250,9 @@ lezer-tree "^0.13.0" "@codemirror/lint@^0.18.1": - version "0.18.1" - resolved "https://registry.yarnpkg.com/@codemirror/lint/-/lint-0.18.1.tgz#ef5502d3bc27eaf23c670fa888bd23d09b59af55" - integrity sha512-2Ueg/ojU56vF5thxMdS67XQzvHNcBnPKw2zgbDVmL/Z+84SMjP7EKvHV5FlbrKFNGZiwnaAKk5MZRYUwBY3f0g== + version "0.18.2" + resolved "https://registry.yarnpkg.com/@codemirror/lint/-/lint-0.18.2.tgz#d80adb1767b486894e921785b5e82fa435d28ecf" + integrity sha512-WUZdlWNqwF6OIJFD/n3Pfsi1R3eIYrAmnnFhfEBUI/ftBh8RqjqOoPJvctHm++re85cwWzXSSZgSWxzse11PSA== dependencies: "@codemirror/panel" "^0.18.1" "@codemirror/state" "^0.18.0" @@ -1244,9 +1279,9 @@ "@codemirror/view" "^0.18.0" "@codemirror/rangeset@^0.18.0": - version "0.18.0" - resolved "https://registry.yarnpkg.com/@codemirror/rangeset/-/rangeset-0.18.0.tgz#8b3bec00c1cee8c3db3827a752a76819ead2dfab" - integrity sha512-+dpK3T6EFv2vkoAc3sTZld0N5SHZDjD3YowFYuMWn0Xw3t+u6k+JZlGBuaFTXdsLeF0auclsm0XhRUpMVuXhTg== + version "0.18.1" + resolved "https://registry.yarnpkg.com/@codemirror/rangeset/-/rangeset-0.18.1.tgz#0e51e1117fa5aae0134073735a07c659f334a699" + integrity sha512-Q+t92KlvDDD9oNXOvYHNL3kEUF1Y595OjSsl5GNZYI2JPY8IW9PoH1z37sgqgxMMuWQrDNp6AOrnd2j/7uBhFA== dependencies: "@codemirror/state" "^0.18.0" @@ -1263,9 +1298,9 @@ crelt "^1.0.5" "@codemirror/state@^0.18.0", "@codemirror/state@^0.18.2", "@codemirror/state@^0.18.3": - version "0.18.3" - resolved "https://registry.yarnpkg.com/@codemirror/state/-/state-0.18.3.tgz#f275293b077d6c3867c0343320d6b29c10e54f84" - integrity sha512-LQlQWFAw+OLd1ufdf8o8yU/iJd88ixw8kOQ8KgEHDZJnegJOz0NafXvkFoWPiLNaiIUxif02KlZXJAPqDThZ4g== + version "0.18.7" + resolved "https://registry.yarnpkg.com/@codemirror/state/-/state-0.18.7.tgz#3339a732387bb2c034987c57ccf0649ef2f7c4c1" + integrity sha512-cVyTiAC9vv90NKmGOfNtBjyIem3BqKui1L5Hfcxurp8K9votQj2oH9COcgWPnQ2Xs64yC70tEuTt9DF1pj5PFQ== dependencies: "@codemirror/text" "^0.18.0" @@ -1283,9 +1318,9 @@ "@codemirror/view" "^0.18.0" "@codemirror/view@^0.18.0", "@codemirror/view@^0.18.3": - version "0.18.3" - resolved "https://registry.yarnpkg.com/@codemirror/view/-/view-0.18.3.tgz#31ffcd0a073124b95feac47d2a3a03bfb3546fca" - integrity sha512-9scPYgDoUFRjDKjClCIxPBMZuoiATn01gKGm/OqSODUcsWQ37LS9qs/gJNdrIn8gQNlzI9wNRyBck7ycZo4Rng== + version "0.18.11" + resolved "https://registry.yarnpkg.com/@codemirror/view/-/view-0.18.11.tgz#3a9655758f0743cc57d718723b57ea49a72cdfa7" + integrity sha512-hNWTEGTpfFk1tjUnq4VjREe77rQQiS2nnhK9CDvU2M44g7wtkCFLB8ymvNp+Swo21hfUix33o2MKBeb830961g== dependencies: "@codemirror/rangeset" "^0.18.0" "@codemirror/state" "^0.18.0" @@ -1561,9 +1596,9 @@ react-lifecycles-compat "^3.0.4" "@sinonjs/commons@^1.6.0", "@sinonjs/commons@^1.7.0", "@sinonjs/commons@^1.8.1": - version "1.8.2" - resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-1.8.2.tgz#858f5c4b48d80778fde4b9d541f27edc0d56488b" - integrity sha512-sruwd86RJHdsVf/AtBoijDmUqJp3B6hF/DGC23C+JaegnDHaZyewCjoVGTdg3J0uz3Zs7NnIT05OBOmML72lQw== + version "1.8.3" + resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-1.8.3.tgz#3802ddd21a50a949b6721ddd72da36e67e7f1b2d" + integrity sha512-xkNcLAn/wZaX14RPlwizcKicDk9G3F8m2nU3L7Ukm5zBgTwiT0wsoFAHx9Jq56fJA1z/7uKGtCRu16sOUCLIHQ== dependencies: type-detect "4.0.8" @@ -1802,9 +1837,9 @@ "@types/istanbul-lib-report" "*" "@types/jest@^26.0.10": - version "26.0.21" - resolved "https://registry.yarnpkg.com/@types/jest/-/jest-26.0.21.tgz#3a73c2731e7e4f0fbaea56ce7ff8c79cf812bd24" - integrity sha512-ab9TyM/69yg7eew9eOwKMUmvIZAKEGZYlq/dhe5/0IMUd/QLJv5ldRMdddSn+u22N13FP3s5jYyktxuBwY0kDA== + version "26.0.23" + resolved "https://registry.yarnpkg.com/@types/jest/-/jest-26.0.23.tgz#a1b7eab3c503b80451d019efb588ec63522ee4e7" + integrity sha512-ZHLmWMJ9jJ9PTiT58juykZpL7KjwJywFN3Rr2pTSkyQfydf/rk22yS7W8p5DaVUMQ2BQC7oYiU3FjbTM/mYrOA== dependencies: jest-diff "^26.0.0" pretty-format "^26.0.0" @@ -1827,9 +1862,9 @@ integrity sha1-7ihweulOEdK4J7y+UnC86n8+ce4= "@types/minimatch@*": - version "3.0.3" - resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.3.tgz#3dca0e3f33b200fc7d1139c0cd96c1268cadfd9d" - integrity sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA== + version "3.0.4" + resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.4.tgz#f0ec25dbf2f0e4b18647313ac031134ca5b24b21" + integrity sha512-1z8k4wzFnNjVK/tlxvrWuK5WMt6mydWWP7+zvH5eFep4oj+UkrfiJTRtjCeBXNpwaA/FYqqtb4/QS4ianFpIRA== "@types/moment-timezone@^0.5.10": version "0.5.30" @@ -1839,14 +1874,14 @@ moment-timezone "*" "@types/node@*": - version "14.14.35" - resolved "https://registry.yarnpkg.com/@types/node/-/node-14.14.35.tgz#42c953a4e2b18ab931f72477e7012172f4ffa313" - integrity sha512-Lt+wj8NVPx0zUmUwumiVXapmaLUcAk3yPuHCFVXras9k5VT9TdhJqKqGVUQCD60OTMCl0qxJ57OiTL0Mic3Iag== + version "15.0.2" + resolved "https://registry.yarnpkg.com/@types/node/-/node-15.0.2.tgz#51e9c0920d1b45936ea04341aa3e2e58d339fb67" + integrity sha512-p68+a+KoxpoB47015IeYZYRrdqMUcpbK8re/zpFB8Ld46LHC1lPEbp3EXgkEhAYEcPvjJF6ZO+869SQ0aH1dcA== "@types/node@^12.11.1": - version "12.20.6" - resolved "https://registry.yarnpkg.com/@types/node/-/node-12.20.6.tgz#7b73cce37352936e628c5ba40326193443cfba25" - integrity sha512-sRVq8d+ApGslmkE9e3i+D3gFGk7aZHAT+G4cIpIEdLJYPsWiSPwcAnJEjddLQQDqV3Ra2jOclX/Sv6YrvGYiWA== + version "12.20.12" + resolved "https://registry.yarnpkg.com/@types/node/-/node-12.20.12.tgz#fd9c1c2cfab536a2383ed1ef70f94adea743a226" + integrity sha512-KQZ1al2hKOONAs2MFv+yTQP1LkDWMrRJ9YCVRalXltOfXsBmH5IownLxQaiq0lnAHwAViLnh2aTYqrPcRGEbgg== "@types/parse-json@^4.0.0": version "4.0.0" @@ -1899,18 +1934,18 @@ "@types/react" "*" "@types/react@*": - version "17.0.3" - resolved "https://registry.yarnpkg.com/@types/react/-/react-17.0.3.tgz#ba6e215368501ac3826951eef2904574c262cc79" - integrity sha512-wYOUxIgs2HZZ0ACNiIayItyluADNbONl7kt8lkLjVK8IitMH5QMyAh75Fwhmo37r1m7L2JaFj03sIfxBVDvRAg== + version "17.0.5" + resolved "https://registry.yarnpkg.com/@types/react/-/react-17.0.5.tgz#3d887570c4489011f75a3fc8f965bf87d09a1bea" + integrity sha512-bj4biDB9ZJmGAYTWSKJly6bMr4BLUiBrx9ujiJEoP9XIDY9CTaPGxE5QWN/1WjpPLzYF7/jRNnV2nNxNe970sw== dependencies: "@types/prop-types" "*" "@types/scheduler" "*" csstype "^3.0.2" "@types/react@^16", "@types/react@^16.8.2": - version "16.14.5" - resolved "https://registry.yarnpkg.com/@types/react/-/react-16.14.5.tgz#2c39b5cadefaf4829818f9219e5e093325979f4d" - integrity sha512-YRRv9DNZhaVTVRh9Wmmit7Y0UFhEVqXqCSw3uazRWMxa2x85hWQZ5BN24i7GXZbaclaLXEcodEeIHsjBA8eAMw== + version "16.14.6" + resolved "https://registry.yarnpkg.com/@types/react/-/react-16.14.6.tgz#d933a2a6bc1bfe320a5eea480e8f45ba8126d6ee" + integrity sha512-Ol/aFKune+P0FSFKIgf+XbhGzYGyz0p7g5befSt4rmbzfGLaZR0q7jPew9k7d3bvrcuaL8dPy9Oz3XGZmf9n+w== dependencies: "@types/prop-types" "*" "@types/scheduler" "*" @@ -1924,9 +1959,9 @@ reactstrap "*" "@types/sanitize-html@^1.20.2": - version "1.27.1" - resolved "https://registry.yarnpkg.com/@types/sanitize-html/-/sanitize-html-1.27.1.tgz#1fc4b67edd6296eeb366960d13cd01f5d6bffdcd" - integrity sha512-TW5gfZYplKQYO8003WrxaDgwyJsEG74920S+Ei7zB9mbUFgm7l2NvFAumXzxL+1fOwM2I9A+G/1rgiEebQOxcQ== + version "1.27.2" + resolved "https://registry.yarnpkg.com/@types/sanitize-html/-/sanitize-html-1.27.2.tgz#f7bf16ca4b1408278f97ae737f0377a08a10b35c" + integrity sha512-DrH26m7CV6PB4YVckjbSIx+xloB7HBolr9Ctm0gZBffSu5dDV4yJKFQGPquJlReVW+xmg59gx+b/8/qYHxZEuw== dependencies: htmlparser2 "^4.1.0" @@ -1948,9 +1983,9 @@ integrity sha512-dIPoZ3g5gcx9zZEszaxLSVTvMReD3xxyyDnQUjA6IYDG9Ba2AV0otMPs+77sG9ojB4Qr2N2Vk5RnKeuA0X/0bg== "@types/sizzle@*": - version "2.3.2" - resolved "https://registry.yarnpkg.com/@types/sizzle/-/sizzle-2.3.2.tgz#a811b8c18e2babab7d542b3365887ae2e4d9de47" - integrity sha512-7EJYyKTL7tFR8+gDbB6Wwz/arpGa0Mywk1TJbNzKzHtzbwVmY4HR9WqS5VV7dsBUKQmPNr192jHr/VpBluj/hg== + version "2.3.3" + resolved "https://registry.yarnpkg.com/@types/sizzle/-/sizzle-2.3.3.tgz#ff5e2f1902969d305225a047c8a0fd5c915cebef" + integrity sha512-JYM8x9EGF163bEyhdJBpR2QX1R5naCJHC8ucJylJ3w9/CVBaskdQ8WqBf8MmQrd1kRvp/a4TS8HJ+bxzR7ZJYQ== "@types/stack-utils@^1.0.1": version "1.0.1" @@ -2241,10 +2276,10 @@ acorn@^7.1.1: resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.4.1.tgz#feaed255973d2e77555b83dbc08851a6c63520fa" integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A== -acorn@^8.0.5: - version "8.1.0" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.1.0.tgz#52311fd7037ae119cbb134309e901aa46295b3fe" - integrity sha512-LWCF/Wn0nfHOmJ9rzQApGnxnvgfROzGilS8936rqN/lfcYkY9MYZzdMqN+2NJ4SlTc+m5HiSa+kNfDtI64dwUA== +acorn@^8.1.0: + version "8.2.4" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.2.4.tgz#caba24b08185c3b56e3168e97d15ed17f4d31fd0" + integrity sha512-Ibt84YwBDDA890eDiDCEqcbwvHlBvzzDkU2cGBBDDI1QWT12jTiXIOn2CIw5KK4i6N5Z2HUxwYjzriDyqaqqZg== address@1.1.2, address@^1.0.1: version "1.1.2" @@ -2318,11 +2353,11 @@ ansi-escapes@^3.0.0: integrity sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ== ansi-escapes@^4.2.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.1.tgz#a5c47cc43181f1f38ffd7076837700d395522a61" - integrity sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA== + version "4.3.2" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.2.tgz#6b2291d1db7d98b6521d5f1efa42d0f3a9feb65e" + integrity sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ== dependencies: - type-fest "^0.11.0" + type-fest "^0.21.3" ansi-html@0.0.7: version "0.0.7" @@ -2377,9 +2412,9 @@ anymatch@^2.0.0: normalize-path "^2.1.1" anymatch@~3.1.1: - version "3.1.1" - resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.1.tgz#c55ecf02185e2469259399310c173ce31233b142" - integrity sha512-mM8522psRCqzV+6LhomX5wgp25YVibjh8Wj23I5RPkPppSVSjyKD2A2mBJmWGa+KN7f2D6LNh9jkBCeyLktzjg== + version "3.1.2" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.2.tgz#c0557c096af32f106198f4f4e2a383537e378716" + integrity sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg== dependencies: normalize-path "^3.0.0" picomatch "^2.0.4" @@ -2613,9 +2648,9 @@ aws4@^1.8.0: integrity sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA== axe-core@^4.0.2: - version "4.1.3" - resolved "https://registry.yarnpkg.com/axe-core/-/axe-core-4.1.3.tgz#64a4c85509e0991f5168340edc4bedd1ceea6966" - integrity sha512-vwPpH4Aj4122EW38mxO/fxhGKtwWTMLDIJfZ1He0Edbtjcfna/R3YB67yVhezUMzqc3Jr3+Ii50KRntlENL4xQ== + version "4.2.0" + resolved "https://registry.yarnpkg.com/axe-core/-/axe-core-4.2.0.tgz#6594db4ee62f78be79e32a7295d21b099b60668d" + integrity sha512-1uIESzroqpaTzt9uX48HO+6gfnKu3RwvWdCcWSrX4csMInJfCo1yvKPNXCwXFRpJqRW25tiASb6No0YH57PXqg== axobject-query@^2.0.2, axobject-query@^2.2.0: version "2.2.0" @@ -2712,29 +2747,29 @@ babel-plugin-named-asset-import@^0.3.6: resolved "https://registry.yarnpkg.com/babel-plugin-named-asset-import/-/babel-plugin-named-asset-import-0.3.7.tgz#156cd55d3f1228a5765774340937afc8398067dd" integrity sha512-squySRkf+6JGnvjoUtDEjSREJEBirnXi9NqP6rjSYsylxQxqBTz+pkmf395i9E2zsvmYUaI40BHo6SqZUdydlw== -babel-plugin-polyfill-corejs2@^0.1.4: - version "0.1.10" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.1.10.tgz#a2c5c245f56c0cac3dbddbf0726a46b24f0f81d1" - integrity sha512-DO95wD4g0A8KRaHKi0D51NdGXzvpqVLnLu5BTvDlpqUEpTmeEtypgC1xqesORaWmiUOQI14UHKlzNd9iZ2G3ZA== +babel-plugin-polyfill-corejs2@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.2.0.tgz#686775bf9a5aa757e10520903675e3889caeedc4" + integrity sha512-9bNwiR0dS881c5SHnzCmmGlMkJLl0OUZvxrxHo9w/iNoRuqaPjqlvBf4HrovXtQs/au5yKkpcdgfT1cC5PAZwg== dependencies: - "@babel/compat-data" "^7.13.0" - "@babel/helper-define-polyfill-provider" "^0.1.5" + "@babel/compat-data" "^7.13.11" + "@babel/helper-define-polyfill-provider" "^0.2.0" semver "^6.1.1" -babel-plugin-polyfill-corejs3@^0.1.3: - version "0.1.7" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.1.7.tgz#80449d9d6f2274912e05d9e182b54816904befd0" - integrity sha512-u+gbS9bbPhZWEeyy1oR/YaaSpod/KDT07arZHb80aTpl8H5ZBq+uN1nN9/xtX7jQyfLdPfoqI4Rue/MQSWJquw== +babel-plugin-polyfill-corejs3@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.2.0.tgz#f4b4bb7b19329827df36ff56f6e6d367026cb7a2" + integrity sha512-zZyi7p3BCUyzNxLx8KV61zTINkkV65zVkDAFNZmrTCRVhjo1jAS+YLvDJ9Jgd/w2tsAviCwFHReYfxO3Iql8Yg== dependencies: - "@babel/helper-define-polyfill-provider" "^0.1.5" - core-js-compat "^3.8.1" + "@babel/helper-define-polyfill-provider" "^0.2.0" + core-js-compat "^3.9.1" -babel-plugin-polyfill-regenerator@^0.1.2: - version "0.1.6" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.1.6.tgz#0fe06a026fe0faa628ccc8ba3302da0a6ce02f3f" - integrity sha512-OUrYG9iKPKz8NxswXbRAdSwF0GhRdIEMTloQATJi4bDuFqrXaXcCUT/VGNrr8pBcjMh1RxZ7Xt9cytVJTJfvMg== +babel-plugin-polyfill-regenerator@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.2.0.tgz#853f5f5716f4691d98c84f8069c7636ea8da7ab8" + integrity sha512-J7vKbCuD2Xi/eEHxquHN14bXAW9CXtecwuLrOIDJtcZzTaPzV1VdEfoUf9AzcRBMolKUQKM9/GVojeh0hFiqMg== dependencies: - "@babel/helper-define-polyfill-provider" "^0.1.5" + "@babel/helper-define-polyfill-provider" "^0.2.0" babel-plugin-syntax-object-rest-spread@^6.8.0: version "6.13.0" @@ -2797,9 +2832,9 @@ babylon@^6.18.0: integrity sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ== balanced-match@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" - integrity sha1-ibTRmasr7kneFk6gK4nORi1xt2c= + version "1.0.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== base64-js@^1.0.2: version "1.5.1" @@ -3025,16 +3060,16 @@ browserslist@4.10.0: node-releases "^1.1.52" pkg-up "^3.1.0" -browserslist@^4.0.0, browserslist@^4.12.0, browserslist@^4.14.5, browserslist@^4.16.3, browserslist@^4.6.2, browserslist@^4.6.4, browserslist@^4.9.1: - version "4.16.3" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.16.3.tgz#340aa46940d7db878748567c5dea24a48ddf3717" - integrity sha512-vIyhWmIkULaq04Gt93txdh+j02yX/JzlyhLYbV3YQCn/zvES3JnY7TifHHvvr1w5hTDluNKMkV05cs4vy8Q7sw== +browserslist@^4.0.0, browserslist@^4.12.0, browserslist@^4.14.5, browserslist@^4.16.6, browserslist@^4.6.2, browserslist@^4.6.4, browserslist@^4.9.1: + version "4.16.6" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.16.6.tgz#d7901277a5a88e554ed305b183ec9b0c08f66fa2" + integrity sha512-Wspk/PqO+4W9qp5iUTJsa1B/QrYn1keNCcEP5OvP7WBwT4KaDly0uONYmC6Xa3Z5IqnUgS0KcgLYu1l74x0ZXQ== dependencies: - caniuse-lite "^1.0.30001181" - colorette "^1.2.1" - electron-to-chromium "^1.3.649" + caniuse-lite "^1.0.30001219" + colorette "^1.2.2" + electron-to-chromium "^1.3.723" escalade "^3.1.1" - node-releases "^1.1.70" + node-releases "^1.1.71" bser@2.1.1: version "2.1.1" @@ -3202,10 +3237,10 @@ caniuse-api@^3.0.0: lodash.memoize "^4.1.2" lodash.uniq "^4.5.0" -caniuse-lite@^1.0.0, caniuse-lite@^1.0.30000981, caniuse-lite@^1.0.30001035, caniuse-lite@^1.0.30001109, caniuse-lite@^1.0.30001181: - version "1.0.30001204" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001204.tgz#256c85709a348ec4d175e847a3b515c66e79f2aa" - integrity sha512-JUdjWpcxfJ9IPamy2f5JaRDCaqJOxDzOSKtbdx4rH9VivMd1vIzoPumsJa9LoMIi4Fx2BV2KZOxWhNkBjaYivQ== +caniuse-lite@^1.0.0, caniuse-lite@^1.0.30000981, caniuse-lite@^1.0.30001035, caniuse-lite@^1.0.30001109, caniuse-lite@^1.0.30001219: + version "1.0.30001223" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001223.tgz#39b49ff0bfb3ee3587000d2f66c47addc6e14443" + integrity sha512-k/RYs6zc/fjbxTjaWZemeSmOjO0JJV+KguOBA3NwPup8uzxM1cMhR2BD9XmO86GuqaqTCO8CgkgH9Rz//vdDiA== capture-exit@^2.0.0: version "2.0.0" @@ -3245,9 +3280,9 @@ chalk@^1.1.3: supports-color "^2.0.0" chalk@^4.0.0, chalk@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.0.tgz#4e14870a618d9e2edd97dd8345fd9d9dc315646a" - integrity sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A== + version "4.1.1" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.1.tgz#c80b3fab28bf6371e6863325eee67e618b77e6ad" + integrity sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg== dependencies: ansi-styles "^4.1.0" supports-color "^7.1.0" @@ -3257,29 +3292,29 @@ chardet@^0.7.0: resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.7.0.tgz#90094849f0937f2eedc2425d0d28a9e5f0cbad9e" integrity sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA== -cheerio-select-tmp@^0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/cheerio-select-tmp/-/cheerio-select-tmp-0.1.1.tgz#55bbef02a4771710195ad736d5e346763ca4e646" - integrity sha512-YYs5JvbpU19VYJyj+F7oYrIE2BOll1/hRU7rEy/5+v9BzkSo3bK81iAeeQEMI92vRIxz677m72UmJUiVwwgjfQ== +cheerio-select@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/cheerio-select/-/cheerio-select-1.4.0.tgz#3a16f21e37a2ef0f211d6d1aa4eff054bb22cdc9" + integrity sha512-sobR3Yqz27L553Qa7cK6rtJlMDbiKPdNywtR95Sj/YgfpLfy0u6CGJuaBKe5YE/vTc23SCRKxWSdlon/w6I/Ew== dependencies: - css-select "^3.1.2" - css-what "^4.0.0" - domelementtype "^2.1.0" - domhandler "^4.0.0" - domutils "^2.4.4" + css-select "^4.1.2" + css-what "^5.0.0" + domelementtype "^2.2.0" + domhandler "^4.2.0" + domutils "^2.6.0" cheerio@^1.0.0-rc.3: - version "1.0.0-rc.5" - resolved "https://registry.yarnpkg.com/cheerio/-/cheerio-1.0.0-rc.5.tgz#88907e1828674e8f9fee375188b27dadd4f0fa2f" - integrity sha512-yoqps/VCaZgN4pfXtenwHROTp8NG6/Hlt4Jpz2FEP0ZJQ+ZUkVDd0hAPDNKhj3nakpfPt/CNs57yEtxD1bXQiw== + version "1.0.0-rc.9" + resolved "https://registry.yarnpkg.com/cheerio/-/cheerio-1.0.0-rc.9.tgz#a3ae6b7ce7af80675302ff836f628e7cb786a67f" + integrity sha512-QF6XVdrLONO6DXRF5iaolY+odmhj2CLj+xzNod7INPWMi/x9X4SOylH0S/vaPpX+AUU6t04s34SQNh7DbkuCng== dependencies: - cheerio-select-tmp "^0.1.0" - dom-serializer "~1.2.0" - domhandler "^4.0.0" - entities "~2.1.0" - htmlparser2 "^6.0.0" - parse5 "^6.0.0" - parse5-htmlparser2-tree-adapter "^6.0.0" + cheerio-select "^1.4.0" + dom-serializer "^1.3.1" + domhandler "^4.2.0" + htmlparser2 "^6.1.0" + parse5 "^6.0.1" + parse5-htmlparser2-tree-adapter "^6.0.1" + tslib "^2.2.0" "chokidar@>=3.0.0 <4.0.0", chokidar@^3.3.0, chokidar@^3.4.1: version "3.5.1" @@ -3321,11 +3356,9 @@ chownr@^1.1.1, chownr@^1.1.2: integrity sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg== chrome-trace-event@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/chrome-trace-event/-/chrome-trace-event-1.0.2.tgz#234090ee97c7d4ad1a2c4beae27505deffc608a4" - integrity sha512-9e/zx1jw7B4CO+c/RXoCsfg/x1AfUBioy4owYH0bJprEYAx5hRFLRhWBqHAG57D0ZM4H7vxbP7bPe0VwhQRYDQ== - dependencies: - tslib "^1.9.0" + version "1.0.3" + resolved "https://registry.yarnpkg.com/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz#1015eced4741e15d06664a957dbbf50d041e26ac" + integrity sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg== ci-info@^2.0.0: version "2.0.0" @@ -3351,9 +3384,9 @@ class-utils@^0.3.5: static-extend "^0.1.1" classnames@^2.2.3: - version "2.2.6" - resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.2.6.tgz#43935bffdd291f326dad0a205309b38d00f650ce" - integrity sha512-JR/iSQOSt+LQIWwrwEzJ9uk0xfN3mTVYMwt1Ir5mUcSN6pU+V4zQFFaJsclJbPuAUQH+yfWef6tm7l1quW3C8Q== + version "2.3.1" + resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.3.1.tgz#dfcfa3891e306ec1dad105d0e88f4417b8535e8e" + integrity sha512-OlQdbZ7gLfGarSqxesMesDa5uz7KFbID8Kpq/SxIoNGDqY8lSYs0D+hhtBXhcdB3rcbXArFr7vlHheLk1voeNA== clean-css@^4.2.3: version "4.2.3" @@ -3650,18 +3683,18 @@ copy-to-clipboard@^3: dependencies: toggle-selection "^1.0.6" -core-js-compat@^3.6.2, core-js-compat@^3.8.1, core-js-compat@^3.9.0: - version "3.9.1" - resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.9.1.tgz#4e572acfe90aff69d76d8c37759d21a5c59bb455" - integrity sha512-jXAirMQxrkbiiLsCx9bQPJFA6llDadKMpYrBJQJ3/c4/vsPP/fAf29h24tviRlvwUL6AmY5CHLu2GvjuYviQqA== +core-js-compat@^3.6.2, core-js-compat@^3.9.0, core-js-compat@^3.9.1: + version "3.12.0" + resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.12.0.tgz#a031e51fe411085e33cb629bfee2acaa53bc309a" + integrity sha512-vvaN8EOvYBEjrr+MN3vCKrMNc/xdYZI+Rt/uPMROi4T5Hj8Fz6TiPQm2mrB9aZoQVW1lCFHYmMrv99aUct9mkg== dependencies: - browserslist "^4.16.3" + browserslist "^4.16.6" semver "7.0.0" core-js-pure@^3.0.0: - version "3.9.1" - resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.9.1.tgz#677b322267172bd490e4464696f790cbc355bec5" - integrity sha512-laz3Zx0avrw9a4QEIdmIblnVuJz8W51leY9iLThatCsFawWxC3sE4guASC78JbCin+DkwMpCdp1AVAuzL/GN7A== + version "3.12.0" + resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.12.0.tgz#c59d45954a6569232f0704d085916a5e8c3b272f" + integrity sha512-j2y084taJU4VMUpwuC93l19tsPbTAtOpg6/do3UOwX4eUJbsFdhEaGRQfTYthn5rDubsB88YITtei0Kw46vEQQ== core-js@^2.4.0: version "2.6.12" @@ -3669,9 +3702,9 @@ core-js@^2.4.0: integrity sha512-Kb2wC0fvsWfQrgk8HU5lW6U/Lcs8+9aaYcy4ZFc6DDlo4nZ7n70dEgE5rtR0oG6ufKDUnrwfWL1mXR5ljDatrQ== core-js@^3.5.0: - version "3.9.1" - resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.9.1.tgz#cec8de593db8eb2a85ffb0dbdeb312cb6e5460ae" - integrity sha512-gSjRvzkxQc1zjM/5paAmL4idJBFzuJoo+jDjF1tStYFMV2ERfD02HhahhCGXUyHxQRG4yFKVSdO6g62eoRMcDg== + version "3.12.0" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.12.0.tgz#62bac86f7d7f087d40dba3e90a211c2c3c8559ea" + integrity sha512-SaMnchL//WwU2Ot1hhkPflE8gzo7uq1FGvUJ8GKmi3TOU7rGTHIU+eir1WGf6qOtTyxdfdcp10yPdGZ59sQ3hw== core-util-is@1.0.2, core-util-is@~1.0.0: version "1.0.2" @@ -3744,9 +3777,9 @@ crelt@^1.0.5: integrity sha512-+BO9wPPi+DWTDcNYhr/W90myha8ptzftZT+LwcmUbbok0rcP/fequmFYCw8NMoH7pkAZQzU78b3kYrlua5a9eA== cross-fetch@^3.0.4: - version "3.1.2" - resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-3.1.2.tgz#ee0c2f18844c4fde36150c2a4ddc068d20c1bc41" - integrity sha512-+JhD65rDNqLbGmB3Gzs3HrEKC0aQnD+XA3SY6RjgkF88jV2q5cTc5+CwxlS3sdmLk98gpPt5CF9XRnPdlxZe6w== + version "3.1.4" + resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-3.1.4.tgz#9723f3a3a247bf8b89039f3a380a9244e8fa2f39" + integrity sha512-1eAtFWdIubi6T4XPy6ei9iUFoKpUkIF971QLN8lIvvvwueI65+Nw5haMNKUwfJxabqlIIDODJKGrQ66gxC0PbQ== dependencies: node-fetch "2.6.1" @@ -3855,15 +3888,15 @@ css-select@^2.0.0, css-select@^2.0.2: domutils "^1.7.0" nth-check "^1.0.2" -css-select@^3.1.2: - version "3.1.2" - resolved "https://registry.yarnpkg.com/css-select/-/css-select-3.1.2.tgz#d52cbdc6fee379fba97fb0d3925abbd18af2d9d8" - integrity sha512-qmss1EihSuBNWNNhHjxzxSfJoFBM/lERB/Q4EnsJQQC62R2evJDW481091oAdOr9uh46/0n4nrg0It5cAnj1RA== +css-select@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/css-select/-/css-select-4.1.2.tgz#8b52b6714ed3a80d8221ec971c543f3b12653286" + integrity sha512-nu5ye2Hg/4ISq4XqdLY2bEatAcLIdt3OYGFc9Tm9n7VSlFBcfRv0gBNksHRgSdUDQGtN3XrZ94ztW+NfzkFSUw== dependencies: boolbase "^1.0.0" - css-what "^4.0.0" - domhandler "^4.0.0" - domutils "^2.4.3" + css-what "^5.0.0" + domhandler "^4.2.0" + domutils "^2.6.0" nth-check "^2.0.0" css-tree@1.0.0-alpha.37: @@ -3875,9 +3908,9 @@ css-tree@1.0.0-alpha.37: source-map "^0.6.1" css-tree@^1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.1.2.tgz#9ae393b5dafd7dae8a622475caec78d3d8fbd7b5" - integrity sha512-wCoWush5Aeo48GLhfHPbmvZs59Z+M7k5+B1xDnXbdWNcEF423DoFdqSWE0PM5aNk5nI5cp1q7ms36zGApY/sKQ== + version "1.1.3" + resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.1.3.tgz#eb4870fb6fd7707327ec95c2ff2ab09b5e8db91d" + integrity sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q== dependencies: mdn-data "2.0.14" source-map "^0.6.1" @@ -3887,10 +3920,10 @@ css-what@^3.2.1: resolved "https://registry.yarnpkg.com/css-what/-/css-what-3.4.2.tgz#ea7026fcb01777edbde52124e21f327e7ae950e4" integrity sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ== -css-what@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/css-what/-/css-what-4.0.0.tgz#35e73761cab2eeb3d3661126b23d7aa0e8432233" - integrity sha512-teijzG7kwYfNVsUh2H/YN62xW3KK9YhXEgSlbxMlcyjPNvdKJqFx5lrwlJgoFP1ZHlB89iGDlo/JyshKeRhv5A== +css-what@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/css-what/-/css-what-5.0.0.tgz#f0bf4f8bac07582722346ab243f6a35b512cfc47" + integrity sha512-qxyKHQvgKwzwDWC/rGbT821eJalfupxYW2qbSJSAtdSTimsr/MlaGONoNLllaUPZWf8QnbcKM/kPVYUQuEKAFA== css.escape@^1.5.1: version "1.5.1" @@ -4022,9 +4055,9 @@ cssstyle@^2.3.0: cssom "~0.3.6" csstype@^3.0.2: - version "3.0.7" - resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.0.7.tgz#2a5fb75e1015e84dd15692f71e89a1450290950b" - integrity sha512-KxnUB0ZMlnUWCsx2Z8MUsr6qV6ja1w9ArPErJaJaF8a5SOWoHLIszeCTKGRGRgtLgYrs1E8CHkNSP1VZTTPc9g== + version "3.0.8" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.0.8.tgz#d2266a792729fb227cd216fb572f43728e1ad340" + integrity sha512-jXKhWqXPmlUeoQnF/EhTtTl4C9SnrxSH/jZUih3jmO6lBKr99rP3/+FmrMj4EFpOXzMtXHAZkd3x0E6h6Fgflw== cyclist@^1.0.1: version "1.0.1" @@ -4040,9 +4073,9 @@ d@1, d@^1.0.1: type "^1.0.1" damerau-levenshtein@^1.0.4, damerau-levenshtein@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/damerau-levenshtein/-/damerau-levenshtein-1.0.6.tgz#143c1641cb3d85c60c32329e26899adea8701791" - integrity sha512-JVrozIeElnj3QzfUIt8tB8YMluBJom4Vw9qTPpjGYQ9fYlB3D/rb6OordUxf3xeFB35LKWs0xqcO5U6ySvBtug== + version "1.0.7" + resolved "https://registry.yarnpkg.com/damerau-levenshtein/-/damerau-levenshtein-1.0.7.tgz#64368003512a1a6992593741a09a9d31a836f55d" + integrity sha512-VvdQIPGdWP0SqFXghj79Wf/5LArmreyMsGLa6FG6iC4t3j7j5s71TrwWmT/4akbDQIqjfACkLZmjXhA7g2oUZw== dashdash@^1.12.0: version "1.14.1" @@ -4319,10 +4352,10 @@ dom-serializer@0: domelementtype "^2.0.1" entities "^2.0.0" -dom-serializer@^1.0.1, dom-serializer@~1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-1.2.0.tgz#3433d9136aeb3c627981daa385fc7f32d27c48f1" - integrity sha512-n6kZFH/KlCrqs/1GHMOd5i2fd/beQHuehKdWvNNffbGHTr/almdhuVvTVFb3V7fglz+nC50fFusu3lY33h12pA== +dom-serializer@^1.0.1, dom-serializer@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-1.3.1.tgz#d845a1565d7c041a95e5dab62184ab41e3a519be" + integrity sha512-Pv2ZluG5ife96udGgEDovOOOA5UELkltfJpnIExPrAk1LTvecolUGn6lIaoLh86d83GiB86CjzciMd9BuRB71Q== dependencies: domelementtype "^2.0.1" domhandler "^4.0.0" @@ -4338,10 +4371,10 @@ domelementtype@1, domelementtype@^1.3.1: resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.1.tgz#d048c44b37b0d10a7f2a3d5fee3f4333d790481f" integrity sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w== -domelementtype@^2.0.1, domelementtype@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-2.1.0.tgz#a851c080a6d1c3d94344aed151d99f669edf585e" - integrity sha512-LsTgx/L5VpD+Q8lmsXSHW2WpA+eBlZ9HPf3erD1IoPF00/3JKHZ3BknUVA2QGDNu69ZNmyFmCWBSO45XjYKC5w== +domelementtype@^2.0.1, domelementtype@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-2.2.0.tgz#9a0b6c2782ed6a1c7323d42267183df9bd8b1d57" + integrity sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A== domexception@^1.0.1: version "1.0.1" @@ -4371,12 +4404,12 @@ domhandler@^3.0.0: dependencies: domelementtype "^2.0.1" -domhandler@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-4.0.0.tgz#01ea7821de996d85f69029e81fa873c21833098e" - integrity sha512-KPTbnGQ1JeEMQyO1iYXoagsI6so/C96HZiFyByU3T6iAzpXn8EGEvct6unm1ZGoed8ByO2oirxgwxBmqKF9haA== +domhandler@^4.0.0, domhandler@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-4.2.0.tgz#f9768a5f034be60a89a27c2e4d0f74eba0d8b059" + integrity sha512-zk7sgt970kzPks2Bf+dwT/PLzghLnsivb9CcxkvR8Mzr66Olr0Ofd8neSbglHJHaHa2MadfoSdNlKYAaafmWfA== dependencies: - domelementtype "^2.1.0" + domelementtype "^2.2.0" domutils@^1.5.1, domutils@^1.7.0: version "1.7.0" @@ -4386,14 +4419,14 @@ domutils@^1.5.1, domutils@^1.7.0: dom-serializer "0" domelementtype "1" -domutils@^2.0.0, domutils@^2.4.3, domutils@^2.4.4: - version "2.5.0" - resolved "https://registry.yarnpkg.com/domutils/-/domutils-2.5.0.tgz#42f49cffdabb92ad243278b331fd761c1c2d3039" - integrity sha512-Ho16rzNMOFk2fPwChGh3D2D9OEHAfG19HgmRR2l+WLSsIstNsAYBzePH412bL0y5T44ejABIVfTHQ8nqi/tBCg== +domutils@^2.0.0, domutils@^2.5.2, domutils@^2.6.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-2.6.0.tgz#2e15c04185d43fb16ae7057cb76433c6edb938b7" + integrity sha512-y0BezHuy4MDYxh6OvolXYsH+1EMGmFbwv5FKW7ovwMG6zTPWqNPq3WF9ayZssFq+UlKdffGLbOEaghNdaOm1WA== dependencies: dom-serializer "^1.0.1" - domelementtype "^2.0.1" - domhandler "^4.0.0" + domelementtype "^2.2.0" + domhandler "^4.2.0" dot-case@^3.0.4: version "3.0.4" @@ -4458,10 +4491,10 @@ ee-first@1.1.1: resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" integrity sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0= -electron-to-chromium@^1.3.378, electron-to-chromium@^1.3.649: - version "1.3.698" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.698.tgz#5de813960f23581a268718a0058683dffa15d221" - integrity sha512-VEXDzYblnlT+g8Q3gedwzgKOso1evkeJzV8lih7lV8mL8eAnGVnKyC3KsFT6S+R5PQO4ffdr1PI16/ElibY/kQ== +electron-to-chromium@^1.3.378, electron-to-chromium@^1.3.723: + version "1.3.727" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.727.tgz#857e310ca00f0b75da4e1db6ff0e073cc4a91ddf" + integrity sha512-Mfz4FIB4FSvEwBpDfdipRIrwd6uo8gUDoRDF4QEYb4h4tSuI3ov594OrjU6on042UlFHouIJpClDODGkPcBSbg== elliptic@^6.5.3: version "6.5.4" @@ -4532,11 +4565,6 @@ entities@^2.0.0: resolved "https://registry.yarnpkg.com/entities/-/entities-2.2.0.tgz#098dc90ebb83d8dffa089d55256b351d34c4da55" integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A== -entities@~2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/entities/-/entities-2.1.0.tgz#992d3129cf7df6870b96c57858c249a120f8b8b5" - integrity sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w== - enzyme-adapter-react-16@^1.15.1: version "1.15.6" resolved "https://registry.yarnpkg.com/enzyme-adapter-react-16/-/enzyme-adapter-react-16-1.15.6.tgz#fd677a658d62661ac5afd7f7f541f141f8085901" @@ -4574,12 +4602,12 @@ enzyme-shallow-equal@^1.0.1, enzyme-shallow-equal@^1.0.4: object-is "^1.1.2" enzyme-to-json@^3.4.3: - version "3.6.1" - resolved "https://registry.yarnpkg.com/enzyme-to-json/-/enzyme-to-json-3.6.1.tgz#d60740950bc7ca6384dfe6fe405494ec5df996bc" - integrity sha512-15tXuONeq5ORoZjV/bUo2gbtZrN2IH+Z6DvL35QmZyKHgbY1ahn6wcnLd9Xv9OjiwbAXiiP8MRZwbZrCv1wYNg== + version "3.6.2" + resolved "https://registry.yarnpkg.com/enzyme-to-json/-/enzyme-to-json-3.6.2.tgz#94f85c413bcae8ab67be53b0a94b69a560e27823" + integrity sha512-Ynm6Z6R6iwQ0g2g1YToz6DWhxVnt8Dy1ijR2zynRKxTyBGA8rCDXU3rs2Qc4OKvUvc2Qoe1bcFK6bnPs20TrTg== dependencies: "@types/cheerio" "^0.22.22" - lodash "^4.17.15" + lodash "^4.17.21" react-is "^16.12.0" enzyme@^3.10.0: @@ -4855,9 +4883,9 @@ eslint-plugin-jsx-a11y@6.x: language-tags "^1.0.5" eslint-plugin-prettier@^3.1.1: - version "3.3.1" - resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-3.3.1.tgz#7079cfa2497078905011e6f82e8dd8453d1371b7" - integrity sha512-Rq3jkcFY8RYeQLgk2cCwuc0P7SEFwDravPhsJZOQ5N4YI4DSg50NyqJ/9gdZHzQlHf8MvafSesbNJCcP/FF6pQ== + version "3.4.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-3.4.0.tgz#cdbad3bf1dbd2b177e9825737fe63b476a08f0c7" + integrity sha512-UDK6rJT6INSfcOo545jiaOwB701uAIt2/dR7WnFQoGCVl1/EMqdANBmwUaqqQ45aXprsTGzSa39LI1PyuRBxxw== dependencies: prettier-linter-helpers "^1.0.0" @@ -4890,9 +4918,9 @@ eslint-plugin-react@7.19.0: xregexp "^4.3.0" eslint-plugin-react@7.x: - version "7.23.1" - resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.23.1.tgz#f1a2e844c0d1967c822388204a8bc4dee8415b11" - integrity sha512-MvFGhZjI8Z4HusajmSw0ougGrq3Gs4vT/0WgwksZgf5RrLrRa2oYAw56okU4tZJl8+j7IYNuTM+2RnFEuTSdRQ== + version "7.23.2" + resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.23.2.tgz#2d2291b0f95c03728b55869f01102290e792d494" + integrity sha512-AfjgFQB+nYszudkxRkTFu0UR1zEQig0ArVMPloKhxwlwkzaw/fBiH0QWcBBhZONlXqQC51+nfqFrkn4EzHcGBw== dependencies: array-includes "^3.1.3" array.prototype.flatmap "^1.2.4" @@ -5407,9 +5435,9 @@ flush-write-stream@^1.0.0: readable-stream "^2.3.6" follow-redirects@^1.0.0: - version "1.13.3" - resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.13.3.tgz#e5598ad50174c1bc4e872301e82ac2cd97f90267" - integrity sha512-DUgl6+HDzB0iEptNQEXLx/KhTmDb8tZUHSeLqpnjpknR70H0nC2t9N73BK6fN4hOvJ84pKlIQVQ4k5FFlBedKA== + version "1.14.0" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.0.tgz#f5d260f95c5f8c105894491feee5dc8993b402fe" + integrity sha512-0vRwd7RKQBTt+mgu87mtYeofLFZpTas2S9zY+jIeuLJMNvudIgF52nr19q40HOwH5RrhWIPuj9puybzSJiRrVg== for-each@^0.3.3: version "0.3.3" @@ -5759,9 +5787,9 @@ har-validator@~5.1.3: har-schema "^2.0.0" harmony-reflect@^1.4.6: - version "1.6.1" - resolved "https://registry.yarnpkg.com/harmony-reflect/-/harmony-reflect-1.6.1.tgz#c108d4f2bb451efef7a37861fdbdae72c9bdefa9" - integrity sha512-WJTeyp0JzGtHcuMsi7rw2VwtkvLa+JyfEKJCFyfcS0+CDkjQ5lHPu7zEhFZP+PDSRrEgXa5Ah0l1MbgbE41XjA== + version "1.6.2" + resolved "https://registry.yarnpkg.com/harmony-reflect/-/harmony-reflect-1.6.2.tgz#31ecbd32e648a34d030d86adb67d4d47547fe710" + integrity sha512-HIp/n38R9kQjDEziXyDTuW3vvoxxyxjxFzXLrBr18uB47GnSt+G9D29fqrpM5ZkspMcPICud3XsBJQ4Y2URg8g== has-ansi@^2.0.0: version "2.0.0" @@ -5770,7 +5798,7 @@ has-ansi@^2.0.0: dependencies: ansi-regex "^2.0.0" -has-bigints@^1.0.0: +has-bigints@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.1.tgz#64fe6acb020673e3b78db035a5af69aa9d07b113" integrity sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA== @@ -5785,7 +5813,7 @@ has-flag@^4.0.0: resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== -has-symbols@^1.0.0, has-symbols@^1.0.1, has-symbols@^1.0.2: +has-symbols@^1.0.1, has-symbols@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.2.tgz#165d3070c00309752a1236a479331e3ac56f1423" integrity sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw== @@ -5865,9 +5893,9 @@ hmac-drbg@^1.0.1: minimalistic-crypto-utils "^1.0.1" hosted-git-info@^2.1.4: - version "2.8.8" - resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.8.8.tgz#7539bd4bc1e0e0a895815a2e0262420b12858488" - integrity sha512-f/wzC2QaWBs7t9IYqB4T3sR1xviIViXJRJTWBlx2Gf3g0Xi5vI7Yy4koXQ1c9OYDGHN9sBy1DQ2AB8fqZBWhUg== + version "2.8.9" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.8.9.tgz#dffc0bf9a21c02209090f2aa69429e1414daf3f9" + integrity sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw== hpack.js@^2.1.6: version "2.1.6" @@ -5968,14 +5996,14 @@ htmlparser2@^4.1.0: domutils "^2.0.0" entities "^2.0.0" -htmlparser2@^6.0.0: - version "6.0.1" - resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-6.0.1.tgz#422521231ef6d42e56bd411da8ba40aa36e91446" - integrity sha512-GDKPd+vk4jvSuvCbyuzx/unmXkk090Azec7LovXP8as1Hn8q9p3hbjmDGbUqqhknw0ajwit6LiiWqfiTUPMK7w== +htmlparser2@^6.0.0, htmlparser2@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-6.1.0.tgz#c4d762b6c3371a05dbe65e94ae43a9f845fb8fb7" + integrity sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A== dependencies: domelementtype "^2.0.1" domhandler "^4.0.0" - domutils "^2.4.4" + domutils "^2.5.2" entities "^2.0.0" http-deceiver@^1.2.7: @@ -6309,9 +6337,9 @@ is-arrayish@^0.3.1: integrity sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ== is-bigint@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.0.1.tgz#6923051dfcbc764278540b9ce0e6b3213aa5ebc2" - integrity sha512-J0ELF4yHFxHy0cmSxZuheDOz2luOdVvqjwmEcj8H/L1JHeuEDSDbeRP+Dk9kFVk5RTFzbucJ2Kb9F7ixY2QaCg== + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.0.2.tgz#ffb381442503235ad245ea89e45b3dbff040ee5a" + integrity sha512-0JV5+SOCQkIdzjBK9buARcV804Ddu7A0Qet6sHi3FimE9ne6m4BGQZfRn+NZiXbBk4F4XmHfDZIipLj9pX8dSA== is-binary-path@^1.0.0: version "1.0.1" @@ -6364,9 +6392,9 @@ is-color-stop@^1.0.0: rgba-regex "^1.0.0" is-core-module@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.2.0.tgz#97037ef3d52224d85163f5597b2b63d9afed981a" - integrity sha512-XRAfAdyyY5F5cOXn7hYQDqh2Xmii+DEfIcQGxK/uNwMHhIkPWO0g8msXcbzLe+MpGoR951MlqM/2iIlU4vKDdQ== + version "2.3.0" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.3.0.tgz#d341652e3408bca69c4671b79a0954a3d349f887" + integrity sha512-xSphU2KG9867tsYdLD4RWQ1VqdFl4HTO9Thf3I/3dLEfr0dbPTWKsuCKrgqMljg4nPE+Gq0VCnzT3gr0CyBmsw== dependencies: has "^1.0.3" @@ -6385,9 +6413,9 @@ is-data-descriptor@^1.0.0: kind-of "^6.0.0" is-date-object@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.2.tgz#bda736f2cd8fd06d32844e7743bfa7494c3bfd7e" - integrity sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g== + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.3.tgz#4c0802ae9c8097939ea8001eaae3c502f3dbe72f" + integrity sha512-tDpEUInNcy2Yw3lNSepK3Wdw1RnXLcIVienz6Ou631Acl15cJyRWK4dgA1vCmOEgIbtOV0W7MHg+AR2Gdg1NXQ== is-descriptor@^0.1.0: version "0.1.6" @@ -6413,9 +6441,9 @@ is-directory@^0.3.1: integrity sha1-YTObbyR1/Hcv2cnYP1yFddwVSuE= is-docker@^2.0.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-2.1.1.tgz#4125a88e44e450d384e09047ede71adc2d144156" - integrity sha512-ZOoqiXfEwtGknTiuDEy8pN2CfE3TxMHprvNer1mXiqwkOT77Rw3YVrUQ52EqAOU3QAWDQ+bQdx7HJzrv7LS2Hw== + version "2.2.1" + resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-2.2.1.tgz#33eeabe23cfe86f14bde4408a02c0cfb853acdaa" + integrity sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ== is-extendable@^0.1.0, is-extendable@^0.1.1: version "0.1.1" @@ -6532,9 +6560,9 @@ is-plain-object@^5.0.0: integrity sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q== is-potential-custom-element-name@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.0.tgz#0c52e54bcca391bb2c494b21e8626d7336c6e397" - integrity sha1-DFLlS8yjkbssSUsh6GJtczbG45c= + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz#171ed6f19e3ac554394edf78caa05784a45bebb5" + integrity sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ== is-regex@^1.0.4, is-regex@^1.0.5, is-regex@^1.1.0, is-regex@^1.1.2: version "1.1.2" @@ -7188,12 +7216,12 @@ jsdom@^14.1.0: xml-name-validator "^3.0.0" jsdom@^16.4.0: - version "16.5.1" - resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-16.5.1.tgz#4ced6bbd7b77d67fb980e64d9e3e6fb900f97dd6" - integrity sha512-pF73EOsJgwZekbDHEY5VO/yKXUkab/DuvrQB/ANVizbr6UAHJsDdHXuotZYwkJSGQl1JM+ivXaqY+XBDDL4TiA== + version "16.5.3" + resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-16.5.3.tgz#13a755b3950eb938b4482c407238ddf16f0d2136" + integrity sha512-Qj1H+PEvUsOtdPJ056ewXM4UJPCi4hhLA8wpiz9F2YvsRBhuFsXxtrIFAgGBDynQA9isAMGE91PfUYbdMPXuTA== dependencies: abab "^2.0.5" - acorn "^8.0.5" + acorn "^8.1.0" acorn-globals "^6.0.0" cssom "^0.4.4" cssstyle "^2.3.0" @@ -7215,7 +7243,7 @@ jsdom@^16.4.0: webidl-conversions "^6.1.0" whatwg-encoding "^1.0.5" whatwg-mimetype "^2.3.0" - whatwg-url "^8.0.0" + whatwg-url "^8.5.0" ws "^7.4.4" xml-name-validator "^3.0.0" @@ -7324,9 +7352,9 @@ jsx-ast-utils@^2.2.1, jsx-ast-utils@^2.2.3: object.assign "^4.1.2" just-extend@^4.0.2: - version "4.1.1" - resolved "https://registry.yarnpkg.com/just-extend/-/just-extend-4.1.1.tgz#158f1fdb01f128c411dc8b286a7b4837b3545282" - integrity sha512-aWgeGFW67BP3e5181Ep1Fv2v8z//iBJfrvyTnq8wG86vEESwmonn1zPBJ0VfmT9CJq2FIT0VsETtrNFm2a+SHA== + version "4.2.1" + resolved "https://registry.yarnpkg.com/just-extend/-/just-extend-4.2.1.tgz#ef5e589afb61e5d66b24eca749409a8939a8c744" + integrity sha512-g3UB796vUFIY90VIv/WX3L2c8CS2MdWUww3CNrYmqza1Fg0DURc2K/O4YrnklBdQarSJ/y8JnJYDGc+1iumQjg== killable@^1.0.1: version "1.0.1" @@ -7574,7 +7602,7 @@ lodash.sortby@^4.7.0: resolved "https://registry.yarnpkg.com/lodash.sortby/-/lodash.sortby-4.7.0.tgz#edd14c824e2cc9c1e0b0a1b42bb5210516a42438" integrity sha1-7dFMgk4sycHgsKG0K7UhBRakJDg= -lodash.template@^4.4.0, lodash.template@^4.5.0: +lodash.template@^4.4.0: version "4.5.0" resolved "https://registry.yarnpkg.com/lodash.template/-/lodash.template-4.5.0.tgz#f976195cf3f347d0d5f52483569fe8031ccce8ab" integrity sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A== @@ -7594,7 +7622,7 @@ lodash.uniq@^4.5.0: resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" integrity sha1-0CJTc662Uq3BvILklFM5qEJ1R3M= -"lodash@>=3.5 <5", lodash@^4.17.11, lodash@^4.17.13, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.5: +"lodash@>=3.5 <5", lodash@^4.17.11, lodash@^4.17.13, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.21, lodash@^4.17.5, lodash@^4.7.0: version "4.17.21" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== @@ -7772,17 +7800,17 @@ miller-rabin@^4.0.0: bn.js "^4.0.0" brorand "^1.0.1" -mime-db@1.46.0, "mime-db@>= 1.43.0 < 2": - version "1.46.0" - resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.46.0.tgz#6267748a7f799594de3cbc8cde91def349661cee" - integrity sha512-svXaP8UQRZ5K7or+ZmfNhg2xX3yKDMUzqadsSqi4NCH/KomcH75MAMYAGVlvXn4+b/xOPhS3I2uHKRUzvjY7BQ== +mime-db@1.47.0, "mime-db@>= 1.43.0 < 2": + version "1.47.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.47.0.tgz#8cb313e59965d3c05cfbf898915a267af46a335c" + integrity sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw== mime-types@^2.1.12, mime-types@~2.1.17, mime-types@~2.1.19, mime-types@~2.1.24: - version "2.1.29" - resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.29.tgz#1d4ab77da64b91f5f72489df29236563754bb1b2" - integrity sha512-Y/jMt/S5sR9OaqteJtslsFZKWOIIqMACsJSiHghlCAyhf7jfVYjKBmLiX8OgpWeW+fjJ2b+Az69aPFPkUOY6xQ== + version "2.1.30" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.30.tgz#6e7be8b4c479825f85ed6326695db73f9305d62d" + integrity sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg== dependencies: - mime-db "1.46.0" + mime-db "1.47.0" mime@1.6.0: version "1.6.0" @@ -8116,7 +8144,7 @@ node-notifier@^5.4.2: shellwords "^0.1.1" which "^1.3.0" -node-releases@^1.1.52, node-releases@^1.1.70: +node-releases@^1.1.52, node-releases@^1.1.71: version "1.1.71" resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.71.tgz#cb1334b179896b1c89ecfdd4b725fb7bbdfc7dbb" integrity sha512-zR6HoT6LrLCRBwukmrVbHv0EpEQjksO6GmFcZQQuCAy139BEsoVKPYnf3jongYW83fAa1torLGYwxxky/p28sg== @@ -8219,9 +8247,9 @@ object-hash@^2.0.1: integrity sha512-VOJmgmS+7wvXf8CjbQmimtCnEx3IAoLxI3fp2fbWehxrWBcAQFbk+vcwb6vzR0VZv/eNCJ/27j151ZTwqW/JeQ== object-inspect@^1.7.0, object-inspect@^1.9.0: - version "1.9.0" - resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.9.0.tgz#c90521d74e1127b67266ded3394ad6116986533a" - integrity sha512-i3Bp9iTqwhaLZBxGkRfo5ZbE07BQRT7MGu8+nNgwW9ItGp1TzCTw2DLEoWwjClxBjOFI/hWljTAmYGCEwmtnOw== + version "1.10.2" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.10.2.tgz#b6385a3e2b7cae0b5eafcf90cddf85d128767f30" + integrity sha512-gz58rdPpadwztRrPjZE9DZLOABUpTGdcANUgOwBFO1C+HZZhePoP83M65WGDmbpwFYJSWqavbl4SgDn4k8RYTA== object-is@^1.0.1, object-is@^1.0.2, object-is@^1.1.2: version "1.1.5" @@ -8533,7 +8561,7 @@ parse-srcset@^1.0.2: resolved "https://registry.yarnpkg.com/parse-srcset/-/parse-srcset-1.0.2.tgz#f2bd221f6cc970a938d88556abc589caaaa2bde1" integrity sha1-8r0iH2zJcKk42IVWq8WJyqqiveE= -parse5-htmlparser2-tree-adapter@^6.0.0: +parse5-htmlparser2-tree-adapter@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz#2cdf9ad823321140370d4dbf5d3e92c7c8ddc6e6" integrity sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA== @@ -8550,7 +8578,7 @@ parse5@5.1.0: resolved "https://registry.yarnpkg.com/parse5/-/parse5-5.1.0.tgz#c59341c9723f414c452975564c7c00a68d58acd2" integrity sha512-fxNG2sQjHvlVAYmzBZS9YlDp6PTSSDwa98vkD4QgVDDCAo84z5X1t5XyJQ62ImdLXx5NdIIfihey6xpum9/gRQ== -parse5@6.0.1, parse5@^6.0.0, parse5@^6.0.1: +parse5@6.0.1, parse5@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b" integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== @@ -8657,9 +8685,9 @@ path-type@^4.0.0: integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== pbkdf2@^3.0.3: - version "3.1.1" - resolved "https://registry.yarnpkg.com/pbkdf2/-/pbkdf2-3.1.1.tgz#cb8724b0fada984596856d1a6ebafd3584654b94" - integrity sha512-4Ejy1OPxi9f2tt1rRV7Go7zmfDQ+ZectEQz3VGUQhgq62HtIRPDyG/JtnwIxs6x3uNMwo2V7q1fMvKjb+Tnpqg== + version "3.1.2" + resolved "https://registry.yarnpkg.com/pbkdf2/-/pbkdf2-3.1.2.tgz#dd822aa0887580e52f1a039dc3eda108efae3075" + integrity sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA== dependencies: create-hash "^1.1.2" create-hmac "^1.1.4" @@ -8673,9 +8701,9 @@ performance-now@^2.1.0: integrity sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns= picomatch@^2.0.4, picomatch@^2.2.1: - version "2.2.2" - resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.2.2.tgz#21f333e9b6b8eaff02468f5146ea406d345f4dad" - integrity sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg== + version "2.2.3" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.2.3.tgz#465547f359ccc206d3c48e46a1bcb89bf7ee619d" + integrity sha512-KpELjfwcCDUb9PeigTs2mBJzXUPzAuP2oPcA989He8Rte0+YUAjw1JVedDhuTKPkHjSYzMN3npC9luThGYEKdg== pify@^2.0.0: version "2.3.0" @@ -8981,11 +9009,10 @@ postcss-image-set-function@^3.0.1: postcss-values-parser "^2.0.0" postcss-initial@^3.0.0: - version "3.0.2" - resolved "https://registry.yarnpkg.com/postcss-initial/-/postcss-initial-3.0.2.tgz#f018563694b3c16ae8eaabe3c585ac6319637b2d" - integrity sha512-ugA2wKonC0xeNHgirR4D3VWHs2JcU08WAi1KFLVcnb7IN89phID6Qtg2RIctWbnvp1TM2BOmDtX8GGLCKdR8YA== + version "3.0.4" + resolved "https://registry.yarnpkg.com/postcss-initial/-/postcss-initial-3.0.4.tgz#9d32069a10531fe2ecafa0b6ac750ee0bc7efc53" + integrity sha512-3RLn6DIpMsK1l5UUy9jxQvoDeUN4gP939tDcKUHD/kM8SGSKbFAnvkpFpj3Bhtz3HGk1jWY5ZNWX6mPta5M9fg== dependencies: - lodash.template "^4.5.0" postcss "^7.0.2" postcss-lab-function@^2.0.1: @@ -9374,13 +9401,11 @@ postcss-selector-parser@^5.0.0-rc.3, postcss-selector-parser@^5.0.0-rc.4: uniq "^1.0.1" postcss-selector-parser@^6.0.0, postcss-selector-parser@^6.0.2: - version "6.0.4" - resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.0.4.tgz#56075a1380a04604c38b063ea7767a129af5c2b3" - integrity sha512-gjMeXBempyInaBqpp8gODmwZ52WaYsVOsfr4L4lDQ7n3ncD6mEyySiDtgzCT+NYC0mmeOLvtsF8iaEf0YT6dBw== + version "6.0.5" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.0.5.tgz#042d74e137db83e6f294712096cb413f5aa612c4" + integrity sha512-aFYPoYmXbZ1V6HZaSvat08M97A8HqO6Pjz+PiNpw/DhuRrC72XWAdp3hL6wusDCN31sSmcZyMGa2hZEuX+Xfhg== dependencies: cssesc "^3.0.0" - indexes-of "^1.0.1" - uniq "^1.0.1" util-deprecate "^1.0.2" postcss-svgo@^4.0.3: @@ -9439,9 +9464,9 @@ postcss@^7, postcss@^7.0.0, postcss@^7.0.1, postcss@^7.0.14, postcss@^7.0.17, po supports-color "^6.1.0" postcss@^8.0.2: - version "8.2.12" - resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.2.12.tgz#81248a1a87e0f575cc594a99a08207fd1c4addc4" - integrity sha512-BJnGT5+0q2tzvs6oQfnY2NpEJ7rIXNfBnZtQOKCIsweeWXBXeDd5k31UgTdS3d/c02ouspufn37mTaHWkJyzMQ== + version "8.2.14" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.2.14.tgz#dcf313eb8247b3ce8078d048c0e8262ca565ad2b" + integrity sha512-+jD0ZijcvyCqPQo/m/CW0UcARpdFylq04of+Q7RKX6f/Tu+dvpUI/9Sp81+i6/vJThnOBX09Quw0ZLOVwpzX3w== dependencies: colorette "^1.2.2" nanoid "^3.1.22" @@ -9535,9 +9560,9 @@ promise@^8.0.3: asap "~2.0.6" prompts@^2.0.1: - version "2.4.0" - resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.4.0.tgz#4aa5de0723a231d1ee9121c40fdf663df73f61d7" - integrity sha512-awZAKrk3vN6CroQukBL+R9051a4R3zCZBlJm/HBfrSZ8iTpYix3VX1vU4mveiLpiwmOJT4wokTF9m6HUk4KqWQ== + version "2.4.1" + resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.4.1.tgz#befd3b1195ba052f9fd2fde8a486c4e82ee77f61" + integrity sha512-EQyfIuO2hPDsX1L/blblV+H7I0knhgAd82cVneCwcdND9B8AuCDuRcBH6yIcG4dFzlOUqbazQqwGjx5xmsNLuQ== dependencies: kleur "^3.0.3" sisteransi "^1.0.5" @@ -9669,9 +9694,9 @@ querystringify@^2.1.1: integrity sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ== raf-schd@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/raf-schd/-/raf-schd-4.0.2.tgz#bd44c708188f2e84c810bf55fcea9231bcaed8a0" - integrity sha512-VhlMZmGy6A6hrkJWHLNTGl5gtgMUm+xfGza6wbwnE914yeQ5Ybm18vgM734RZhMgfw4tacUrWseGZlpUrrakEQ== + version "4.0.3" + resolved "https://registry.yarnpkg.com/raf-schd/-/raf-schd-4.0.3.tgz#5d6c34ef46f8b2a0e880a8fcdb743efc5bfdbc1a" + integrity sha512-tQkJl2GRWh83ui2DiPTJz9wEiMN20syf+5oKfB03yYP7ioZcJwsIK8FjrtLwH1m7C7e+Tt2yYBlrOpdT+dyeIQ== raf@^3.4.1: version "3.4.1" @@ -10123,9 +10148,9 @@ renderkid@^2.0.4: strip-ansi "^3.0.0" repeat-element@^1.1.2: - version "1.1.3" - resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.3.tgz#782e0d825c0c5a3bb39731f84efee6b742e6b1ce" - integrity sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g== + version "1.1.4" + resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.4.tgz#be681520847ab58c7568ac75fbfad28ed42d39e9" + integrity sha512-LFiNfRcSu7KK3evMyYOuCzv3L10TW7yC1G2/+StMjK8Y6Vqd2MG7r/Qjw4ghtuCOjFvlnms/iMmLqpvW/ES/WQ== repeat-string@^1.6.1: version "1.6.1" @@ -10349,9 +10374,9 @@ run-queue@^1.0.0, run-queue@^1.0.3: aproba "^1.1.1" rxjs@^6.5.3, rxjs@^6.6.0: - version "6.6.6" - resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-6.6.6.tgz#14d8417aa5a07c5e633995b525e1e3c0dec03b70" - integrity sha512-/oTwee4N4iWzAMAL9xdGKjkEHmIwupR3oXbQjCKywF1BeFohswF3vZdogbmEF6pZkOsXTzWkrZszrWpQTByYVg== + version "6.6.7" + resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-6.6.7.tgz#90ac018acabf491bf65044235d5863c4dab804c9" + integrity sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ== dependencies: tslib "^1.9.0" @@ -10479,9 +10504,9 @@ select-hose@^2.0.0: integrity sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo= selfsigned@^1.10.7: - version "1.10.8" - resolved "https://registry.yarnpkg.com/selfsigned/-/selfsigned-1.10.8.tgz#0d17208b7d12c33f8eac85c41835f27fc3d81a30" - integrity sha512-2P4PtieJeEwVgTU9QEcwIRDQ/mXJLX8/+I3ur+Pg16nS8oNbrGxEso9NyYWy8NAmXiNl4dlAp5MwoNeCWzON4w== + version "1.10.11" + resolved "https://registry.yarnpkg.com/selfsigned/-/selfsigned-1.10.11.tgz#24929cd906fe0f44b6d01fb23999a739537acbe9" + integrity sha512-aVmbPOfViZqOZPgRBT0+3u4yZFHpmnIghLMlAcb5/xhp5ZtB/RVnKhz5vl2M32CLXAqR4kha9zfhNg0Lf/sxKA== dependencies: node-forge "^0.10.0" @@ -10881,9 +10906,9 @@ sshpk@^1.7.0: tweetnacl "~0.14.0" ssri@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/ssri/-/ssri-6.0.1.tgz#2a3c41b28dd45b62b63676ecb74001265ae9edd8" - integrity sha512-3Wge10hNcT1Kur4PDFwEieXSCMCJs/7WvSACcrMYrNp+b8kDL1/0wJch5Ni2WrtwEa2IO8OsVfeKIciKCDx/QA== + version "6.0.2" + resolved "https://registry.yarnpkg.com/ssri/-/ssri-6.0.2.tgz#157939134f20464e7301ddba3e90ffa8f7728ac5" + integrity sha512-cepbSq/neFK7xB6A50KHN0xHDotYzq58wWCa5LeWqnPrHG8GzfEjO/4O8kpmcGW+oaxkvhEJCWgbgNk4/ZV93Q== dependencies: figgy-pudding "^3.5.1" @@ -10901,9 +10926,9 @@ stable@^0.1.8: integrity sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w== stack-utils@^1.0.1: - version "1.0.4" - resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-1.0.4.tgz#4b600971dcfc6aed0cbdf2a8268177cc916c87c8" - integrity sha512-IPDJfugEGbfizBwBZRZ3xpccMdRyP5lqsBWXGQWimVjua/ccLCeMOAVjlc1R7LxFjo5sEDhyNIXd8mo/AiDS9w== + version "1.0.5" + resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-1.0.5.tgz#a19b0b01947e0029c8e451d5d61a498f5bb1471b" + integrity sha512-KZiTzuV3CnSnSvgMRrARVCj+Ht7rMbauGDK0LdVFRGyenwdylpajAp4Q0i6SX8rEmbTpMMf6ryq2gb8pPq2WgQ== dependencies: escape-string-regexp "^2.0.0" @@ -11429,10 +11454,10 @@ tslib@^1.8.1, tslib@^1.9.0: resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== -tslib@^2.0.3: - version "2.1.0" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.1.0.tgz#da60860f1c2ecaa5703ab7d39bc05b6bf988b97a" - integrity sha512-hcVC3wYEziELGGmEEXue7D75zbwIIVUMWAVbHItGPx0ziyXxrOMQx4rQEVEV45Ut/1IotuEvwqPopzIOkDMf0A== +tslib@^2.0.3, tslib@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.2.0.tgz#fb2c475977e35e241311ede2693cee1ec6698f5c" + integrity sha512-gS9GVHRU+RGn5KQM2rllAlR3dU6m7AcpJKdtH8gFvQiC4Otgk98XnmMU+nZenHt/+VhnBPWwgrJsyrdcw6i23w== tsutils@^3.17.1: version "3.21.0" @@ -11470,10 +11495,10 @@ type-detect@4.0.8, type-detect@^4.0.8: resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g== -type-fest@^0.11.0: - version "0.11.0" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.11.0.tgz#97abf0872310fed88a5c466b25681576145e33f1" - integrity sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ== +type-fest@^0.21.3: + version "0.21.3" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.21.3.tgz#d260a24b0198436e133fa26a524a6d65fa3b2e37" + integrity sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w== type-fest@^0.8.1: version "0.8.1" @@ -11514,14 +11539,14 @@ typescript@^3.3.3: integrity sha512-kdMjTiekY+z/ubJCATUPlRDl39vXYiMV9iyeMuEuXZh2we6zz80uovNN2WlAxmmdE/Z/YQe+EbOEXB5RHEED3w== unbox-primitive@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.0.tgz#eeacbc4affa28e9b3d36b5eaeccc50b3251b1d3f" - integrity sha512-P/51NX+JXyxK/aigg1/ZgyccdAxm5K1+n8+tvqSntjOivPt19gvm1VC49RWYetsiub8WViUchdxl/KWHHB0kzA== + version "1.0.1" + resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.1.tgz#085e215625ec3162574dc8859abee78a59b14471" + integrity sha512-tZU/3NqK3dA5gpE1KtyiJUrEB0lxnGkMFHptJ7q6ewdZ8s12QrODwNbhIJStmJkd1QDXa1NRA8aF2A1zk/Ypyw== dependencies: function-bind "^1.1.1" - has-bigints "^1.0.0" - has-symbols "^1.0.0" - which-boxed-primitive "^1.0.1" + has-bigints "^1.0.1" + has-symbols "^1.0.2" + which-boxed-primitive "^1.0.2" unicode-canonical-property-names-ecmascript@^1.0.4: version "1.0.4" @@ -11998,16 +12023,16 @@ whatwg-url@^7.0.0: tr46 "^1.0.1" webidl-conversions "^4.0.2" -whatwg-url@^8.0.0: - version "8.4.0" - resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-8.4.0.tgz#50fb9615b05469591d2b2bd6dfaed2942ed72837" - integrity sha512-vwTUFf6V4zhcPkWp/4CQPr1TW9Ml6SF4lVyaIMBdJw5i6qUUJ1QWM4Z6YYVkfka0OUIzVo/0aNtGVGk256IKWw== +whatwg-url@^8.0.0, whatwg-url@^8.5.0: + version "8.5.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-8.5.0.tgz#7752b8464fc0903fec89aa9846fc9efe07351fd3" + integrity sha512-fy+R77xWv0AiqfLl4nuGUlQ3/6b5uNfQ4WAbGQVMYshCTCCPK9psC1nWh3XHuxGVCtlcDDQPQW1csmmIQo+fwg== dependencies: - lodash.sortby "^4.7.0" + lodash "^4.7.0" tr46 "^2.0.2" webidl-conversions "^6.1.0" -which-boxed-primitive@^1.0.1: +which-boxed-primitive@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz#13757bc89b209b049fe5d86430e21cf40a89a8e6" integrity sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg== @@ -12236,9 +12261,9 @@ ws@^6.1.2, ws@^6.2.1: async-limiter "~1.0.0" ws@^7.4.4: - version "7.4.4" - resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.4.tgz#383bc9742cb202292c9077ceab6f6047b17f2d59" - integrity sha512-Qm8k8ojNQIMx7S+Zp8u/uHOx7Qazv3Yv4q68MiWWWOJhiwG5W3x7iqmRtJo8xxrciZUY4vRxUTJCKuRnF28ZZw== + version "7.4.5" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.5.tgz#a484dd851e9beb6fdb420027e3885e8ce48986c1" + integrity sha512-xzyu3hFvomRfXKH8vOFMU3OguG6oOvhXMo3xsGy3xWExqaM2dxBbVxuD99O7m3ZUFMvvscsZDqxfgMaRr/Nr1g== xml-name-validator@^3.0.0: version "3.0.0" @@ -12263,9 +12288,9 @@ xtend@^4.0.0, xtend@~4.0.1: integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== y18n@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.1.tgz#8db2b83c31c5d75099bb890b23f3094891e247d4" - integrity sha512-wNcy4NvjMYL8gogWWYAO7ZFWFfHcbdbE57tZO8e4cbpj8tfUcwrwqSl3ad8HxpYWCdXcJUCeKKZS62Av1affwQ== + version "4.0.3" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.3.tgz#b5f259c82cd6e336921efd7bfd8bf560de9eeedf" + integrity sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ== yallist@^3.0.2: version "3.1.1" From 4b49ffbad5d9851f07e9151697489bf04f4f5716 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Thu, 6 May 2021 22:18:59 +0200 Subject: [PATCH 16/34] Stop the bleed on chunk mapper panic (#8723) * Added test to reproduce panic on TSDB head chunks truncated while querying Signed-off-by: Marco Pracucci * Added test for Querier too Signed-off-by: Marco Pracucci * Stop the bleed on mmap-ed head chunks panic Signed-off-by: Marco Pracucci * Lower memory pressure in tests to ensure it doesn't OOM Signed-off-by: Marco Pracucci * Skip TestQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks Signed-off-by: Marco Pracucci * Experiment to not trigger runtime.GC() continuously Signed-off-by: Marco Pracucci * Try to fix test in CI Signed-off-by: Marco Pracucci * Do not call runtime.GC() at all Signed-off-by: Marco Pracucci * I have no idea why it's failing in CI, skipping tests Signed-off-by: Marco Pracucci --- tsdb/chunks/head_chunks.go | 9 +- tsdb/db_test.go | 258 +++++++++++++++++++++++++++++++++++++ 2 files changed, 266 insertions(+), 1 deletion(-) diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index d5386f7ea..bc6915c80 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -556,7 +556,14 @@ func (cdm *ChunkDiskMapper) Chunk(ref uint64) (chunkenc.Chunk, error) { // The chunk data itself. chkData := mmapFile.byteSlice.Range(chkDataEnd-int(chkDataLen), chkDataEnd) - chk, err := cdm.pool.Get(chunkenc.Encoding(chkEnc), chkData) + + // Make a copy of the chunk data to prevent a panic occurring because the returned + // chunk data slice references an mmap-ed file which could be closed after the + // function returns but while the chunk is still in use. + chkDataCopy := make([]byte, len(chkData)) + copy(chkDataCopy, chkData) + + chk, err := cdm.pool.Get(chunkenc.Encoding(chkEnc), chkDataCopy) if err != nil { return nil, &CorruptionErr{ Dir: cdm.dir.Name(), diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 502f2c556..8c0808e90 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -27,6 +27,7 @@ import ( "path/filepath" "sort" "strconv" + "strings" "sync" "testing" "time" @@ -41,6 +42,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/index" @@ -3122,3 +3124,259 @@ func TestNoPanicOnTSDBOpenError(t *testing.T) { require.NoError(t, lockf.Release()) } + +func TestQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t *testing.T) { + t.Skip("TODO: investigate why process crash in CI") + + const numRuns = 5 + + for i := 1; i <= numRuns; i++ { + t.Run(strconv.Itoa(i), func(t *testing.T) { + testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t) + }) + } +} + +func testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t *testing.T) { + const ( + numSeries = 1000 + numStressIterations = 10000 + minStressAllocationBytes = 128 * 1024 + maxStressAllocationBytes = 512 * 1024 + ) + + db := openTestDB(t, nil, nil) + defer func() { + require.NoError(t, db.Close()) + }() + + // Disable compactions so we can control it. + db.DisableCompactions() + + // Generate the metrics we're going to append. + metrics := make([]labels.Labels, 0, numSeries) + for i := 0; i < numSeries; i++ { + metrics = append(metrics, labels.Labels{{Name: labels.MetricName, Value: fmt.Sprintf("test_%d", i)}}) + } + + // Push 1 sample every 15s for 2x the block duration period. + ctx := context.Background() + interval := int64(15 * time.Second / time.Millisecond) + ts := int64(0) + + for ; ts < 2*DefaultBlockDuration; ts += interval { + app := db.Appender(ctx) + + for _, metric := range metrics { + _, err := app.Append(0, metric, ts, float64(ts)) + require.NoError(t, err) + } + + require.NoError(t, app.Commit()) + } + + // Compact the TSDB head for the first time. We expect the head chunks file has been cut. + require.NoError(t, db.Compact()) + require.Equal(t, float64(1), prom_testutil.ToFloat64(db.Head().metrics.headTruncateTotal)) + + // Push more samples for another 1x block duration period. + for ; ts < 3*DefaultBlockDuration; ts += interval { + app := db.Appender(ctx) + + for _, metric := range metrics { + _, err := app.Append(0, metric, ts, float64(ts)) + require.NoError(t, err) + } + + require.NoError(t, app.Commit()) + } + + // At this point we expect 2 mmap-ed head chunks. + + // Get a querier and make sure it's closed only once the test is over. + querier, err := db.Querier(ctx, 0, math.MaxInt64) + require.NoError(t, err) + defer func() { + require.NoError(t, querier.Close()) + }() + + // Query back all series. + hints := &storage.SelectHints{Start: 0, End: math.MaxInt64, Step: interval} + seriesSet := querier.Select(true, hints, labels.MustNewMatcher(labels.MatchRegexp, labels.MetricName, ".+")) + + // Fetch samples iterators from all series. + var iterators []chunkenc.Iterator + actualSeries := 0 + for seriesSet.Next() { + actualSeries++ + + // Get the iterator and call Next() so that we're sure the chunk is loaded. + it := seriesSet.At().Iterator() + it.Next() + it.At() + + iterators = append(iterators, it) + } + require.NoError(t, seriesSet.Err()) + require.Equal(t, actualSeries, numSeries) + + // Compact the TSDB head again. + require.NoError(t, db.Compact()) + require.Equal(t, float64(2), prom_testutil.ToFloat64(db.Head().metrics.headTruncateTotal)) + + // At this point we expect 1 head chunk has been deleted. + + // Stress the memory and call GC. This is required to increase the chances + // the chunk memory area is released to the kernel. + var buf []byte + for i := 0; i < numStressIterations; i++ { + //nolint:staticcheck + buf = append(buf, make([]byte, minStressAllocationBytes+rand.Int31n(maxStressAllocationBytes-minStressAllocationBytes))...) + if i%1000 == 0 { + buf = nil + } + } + + // Iterate samples. Here we're summing it just to make sure no golang compiler + // optimization triggers in case we discard the result of it.At(). + var sum float64 + var firstErr error + for _, it := range iterators { + for it.Next() { + _, v := it.At() + sum += v + } + + if err := it.Err(); err != nil { + firstErr = err + } + } + + // After having iterated all samples we also want to be sure no error occurred or + // the "cannot populate chunk XXX: not found" error occurred. This error can occur + // when the iterator tries to fetch an head chunk which has been offloaded because + // of the head compaction in the meanwhile. + if firstErr != nil && !strings.Contains(firstErr.Error(), "cannot populate chunk") { + t.Fatalf("unexpected error: %s", firstErr.Error()) + } +} + +func TestChunkQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t *testing.T) { + t.Skip("TODO: investigate why process crash in CI") + + const numRuns = 5 + + for i := 1; i <= numRuns; i++ { + t.Run(strconv.Itoa(i), func(t *testing.T) { + testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t) + }) + } +} + +func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t *testing.T) { + const ( + numSeries = 1000 + numStressIterations = 10000 + minStressAllocationBytes = 128 * 1024 + maxStressAllocationBytes = 512 * 1024 + ) + + db := openTestDB(t, nil, nil) + defer func() { + require.NoError(t, db.Close()) + }() + + // Disable compactions so we can control it. + db.DisableCompactions() + + // Generate the metrics we're going to append. + metrics := make([]labels.Labels, 0, numSeries) + for i := 0; i < numSeries; i++ { + metrics = append(metrics, labels.Labels{{Name: labels.MetricName, Value: fmt.Sprintf("test_%d", i)}}) + } + + // Push 1 sample every 15s for 2x the block duration period. + ctx := context.Background() + interval := int64(15 * time.Second / time.Millisecond) + ts := int64(0) + + for ; ts < 2*DefaultBlockDuration; ts += interval { + app := db.Appender(ctx) + + for _, metric := range metrics { + _, err := app.Append(0, metric, ts, float64(ts)) + require.NoError(t, err) + } + + require.NoError(t, app.Commit()) + } + + // Compact the TSDB head for the first time. We expect the head chunks file has been cut. + require.NoError(t, db.Compact()) + require.Equal(t, float64(1), prom_testutil.ToFloat64(db.Head().metrics.headTruncateTotal)) + + // Push more samples for another 1x block duration period. + for ; ts < 3*DefaultBlockDuration; ts += interval { + app := db.Appender(ctx) + + for _, metric := range metrics { + _, err := app.Append(0, metric, ts, float64(ts)) + require.NoError(t, err) + } + + require.NoError(t, app.Commit()) + } + + // At this point we expect 2 mmap-ed head chunks. + + // Get a querier and make sure it's closed only once the test is over. + querier, err := db.ChunkQuerier(ctx, 0, math.MaxInt64) + require.NoError(t, err) + defer func() { + require.NoError(t, querier.Close()) + }() + + // Query back all series. + hints := &storage.SelectHints{Start: 0, End: math.MaxInt64, Step: interval} + seriesSet := querier.Select(true, hints, labels.MustNewMatcher(labels.MatchRegexp, labels.MetricName, ".+")) + + // Iterate all series and get their chunks. + var chunks []chunkenc.Chunk + actualSeries := 0 + for seriesSet.Next() { + actualSeries++ + for it := seriesSet.At().Iterator(); it.Next(); { + chunks = append(chunks, it.At().Chunk) + } + } + require.NoError(t, seriesSet.Err()) + require.Equal(t, actualSeries, numSeries) + + // Compact the TSDB head again. + require.NoError(t, db.Compact()) + require.Equal(t, float64(2), prom_testutil.ToFloat64(db.Head().metrics.headTruncateTotal)) + + // At this point we expect 1 head chunk has been deleted. + + // Stress the memory and call GC. This is required to increase the chances + // the chunk memory area is released to the kernel. + var buf []byte + for i := 0; i < numStressIterations; i++ { + //nolint:staticcheck + buf = append(buf, make([]byte, minStressAllocationBytes+rand.Int31n(maxStressAllocationBytes-minStressAllocationBytes))...) + if i%1000 == 0 { + buf = nil + } + } + + // Iterate chunks and read their bytes slice. Here we're computing the CRC32 + // just to iterate through the bytes slice. We don't really care the reason why + // we read this data, we just need to read it to make sure the memory address + // of the []byte is still valid. + chkCRC32 := newCRC32() + for _, chunk := range chunks { + chkCRC32.Reset() + _, err := chkCRC32.Write(chunk.Bytes()) + require.NoError(t, err) + } +} From 8fd73b1d281d3227c337b291e8c797a85251d3d5 Mon Sep 17 00:00:00 2001 From: Callum Styan Date: Thu, 6 May 2021 13:53:52 -0700 Subject: [PATCH 17/34] Add Exemplar Remote Write support (#8296) * Write exemplars to the WAL and send them over remote write. Signed-off-by: Callum Styan * Update example for exemplars, print data in a more obvious format. Signed-off-by: Callum Styan * Add metrics for remote write of exemplars. Signed-off-by: Callum Styan * Fix incorrect slices passed to send in remote write. Signed-off-by: Callum Styan * We need to unregister the new metrics. Signed-off-by: Callum Styan * Address review comments Signed-off-by: Callum Styan * Order of exemplar append vs write exemplar to WAL needs to change. Signed-off-by: Callum Styan * Several fixes to prevent sending uninitialized or incorrect samples with an exemplar. Fix dropping exemplar for missing series. Add tests for queue_manager sending exemplars Signed-off-by: Martin Disibio * Store both samples and exemplars in the same timeseries buffer to remove the alloc when building final request, keep sub-slices in separate buffers for re-use Signed-off-by: Martin Disibio * Condense sample/exemplar delivery tests to parameterized sub-tests Signed-off-by: Martin Disibio * Rename test methods for clarity now that they also handle exemplars Signed-off-by: Martin Disibio * Rename counter variable. Fix instances where metrics were not updated correctly Signed-off-by: Martin Disibio * Add exemplars to LoadWAL benchmark Signed-off-by: Callum Styan * last exemplars timestamp metric needs to convert value to seconds with ms precision Signed-off-by: Callum Styan * Process exemplar records in a separate go routine when loading the WAL. Signed-off-by: Callum Styan * Address review comments related to clarifying comments and variable names. Also refactor sample/exemplar to enqueue prompb types. Signed-off-by: Callum Styan * Regenerate types proto with comments, update protoc version again. Signed-off-by: Callum Styan * Put remote write of exemplars behind a feature flag. Signed-off-by: Callum Styan * Address some of Ganesh's review comments. Signed-off-by: Callum Styan * Move exemplar remote write feature flag to a config file field. Signed-off-by: Callum Styan * Address Bartek's review comments. Signed-off-by: Callum Styan * Don't allocate exemplar buffers in queue_manager if we're not going to send exemplars over remote write. Signed-off-by: Callum Styan * Add ValidateExemplar function, validate exemplars when appending to head and log them all to WAL before adding them to exemplar storage. Signed-off-by: Callum Styan * Address more reivew comments from Ganesh. Signed-off-by: Callum Styan * Add exemplar total label length check. Signed-off-by: Callum Styan * Address a few last review comments Signed-off-by: Callum Styan Co-authored-by: Martin Disibio --- .circleci/config.yml | 2 +- config/config.go | 1 + docs/configuration/configuration.md | 3 + docs/disabled_features.md | 3 +- .../example_write_adapter/server.go | 10 +- pkg/exemplar/exemplar.go | 10 +- prompb/types.pb.go | 444 +++++++++++++++--- prompb/types.proto | 14 + promql/test.go | 15 +- scrape/scrape.go | 1 - scripts/genproto.sh | 4 +- storage/interface.go | 3 + storage/remote/queue_manager.go | 398 +++++++++++----- storage/remote/queue_manager_test.go | 217 ++++++--- storage/remote/write.go | 12 +- tsdb/docs/format/wal.md | 32 ++ tsdb/exemplar.go | 78 ++- tsdb/exemplar_test.go | 88 +++- tsdb/head.go | 97 +++- tsdb/head_test.go | 148 +++--- tsdb/record/record.go | 84 +++- tsdb/record/record_test.go | 19 + tsdb/wal/checkpoint.go | 36 +- tsdb/wal/checkpoint_test.go | 11 + tsdb/wal/watcher.go | 36 +- tsdb/wal/watcher_test.go | 38 +- 26 files changed, 1443 insertions(+), 361 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index d3afc6ec5..2f1f260c8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -41,7 +41,7 @@ jobs: GOMAXPROCS: "2" GO111MODULE: "on" - prometheus/check_proto: - version: "3.12.3" + version: "3.15.8" - prometheus/store_artifact: file: prometheus - prometheus/store_artifact: diff --git a/config/config.go b/config/config.go index e4045eb05..d71f6b72e 100644 --- a/config/config.go +++ b/config/config.go @@ -632,6 +632,7 @@ type RemoteWriteConfig struct { Headers map[string]string `yaml:"headers,omitempty"` WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"` Name string `yaml:"name,omitempty"` + SendExemplars bool `yaml:"send_exemplars,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index ac45fa82f..62208ab77 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -2141,6 +2141,9 @@ write_relabel_configs: # remote write configs. [ name: ] +# Enables sending of exemplars over remote write. Note that exemplar storage itself must be enabled for exemplars to be scraped in the first place. +[ send_exemplars: | default = false ] + # Sets the `Authorization` header on every remote write request with the # configured username and password. # password and password_file are mutually exclusive. diff --git a/docs/disabled_features.md b/docs/disabled_features.md index 65abd296d..4066327a3 100644 --- a/docs/disabled_features.md +++ b/docs/disabled_features.md @@ -52,5 +52,4 @@ The remote write receiver allows Prometheus to accept remote write requests from [OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars) introduces the ability for scrape targets to add exemplars to certain metrics. Exemplars are references to data outside of the MetricSet. A common use case are IDs of program traces. -Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The flag `storage.exemplars.exemplars-limit` can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `traceID=` uses roughly 100 bytes of memory via the in-memory exemplar storage. - +Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The flag `storage.exemplars.exemplars-limit` can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `traceID=` uses roughly 100 bytes of memory via the in-memory exemplar storage. If the exemplar storage is enabled, we will also append the exemplars to WAL for local persistence (for WAL duration). \ No newline at end of file diff --git a/documentation/examples/remote_storage/example_write_adapter/server.go b/documentation/examples/remote_storage/example_write_adapter/server.go index c61ed7b41..dc2bd0e70 100644 --- a/documentation/examples/remote_storage/example_write_adapter/server.go +++ b/documentation/examples/remote_storage/example_write_adapter/server.go @@ -39,7 +39,15 @@ func main() { fmt.Println(m) for _, s := range ts.Samples { - fmt.Printf(" %f %d\n", s.Value, s.Timestamp) + fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp) + } + + for _, e := range ts.Exemplars { + m := make(model.Metric, len(e.Labels)) + for _, l := range e.Labels { + m[model.LabelName(l.Name)] = model.LabelValue(l.Value) + } + fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp) } } }) diff --git a/pkg/exemplar/exemplar.go b/pkg/exemplar/exemplar.go index 8e3e01b5c..27ba64d4b 100644 --- a/pkg/exemplar/exemplar.go +++ b/pkg/exemplar/exemplar.go @@ -15,12 +15,16 @@ package exemplar import "github.com/prometheus/prometheus/pkg/labels" +// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters +// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars +const ExemplarMaxLabelSetLength = 128 + // Exemplar is additional information associated with a time series. type Exemplar struct { - Labels labels.Labels - Value float64 + Labels labels.Labels `json:"labels"` + Value float64 `json:"value"` + Ts int64 `json:"timestamp"` HasTs bool - Ts int64 } type QueryResult struct { diff --git a/prompb/types.pb.go b/prompb/types.pb.go index e09215ada..b29170e53 100644 --- a/prompb/types.pb.go +++ b/prompb/types.pb.go @@ -96,7 +96,7 @@ func (x LabelMatcher_Type) String() string { } func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{5, 0} + return fileDescriptor_d938547f84707355, []int{6, 0} } // We require this to match chunkenc.Encoding. @@ -122,7 +122,7 @@ func (x Chunk_Encoding) String() string { } func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{7, 0} + return fileDescriptor_d938547f84707355, []int{8, 0} } type MetricMetadata struct { @@ -199,7 +199,9 @@ func (m *MetricMetadata) GetUnit() string { } type Sample struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // timestamp is in ms format, see pkg/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -253,20 +255,89 @@ func (m *Sample) GetTimestamp() int64 { return 0 } -// TimeSeries represents samples and labels for a single time series. -type TimeSeries struct { - Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` - Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` +type Exemplar struct { + // Optional, can be empty. + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + // timestamp is in ms format, see pkg/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{2} +} +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return m.Size() +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabels() []Label { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// TimeSeries represents samples and labels for a single time series. +type TimeSeries struct { + // For a timeseries to be valid, and for the samples and exemplars + // to be ingested by the remote system properly, the labels field is required. + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` + Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + func (m *TimeSeries) Reset() { *m = TimeSeries{} } func (m *TimeSeries) String() string { return proto.CompactTextString(m) } func (*TimeSeries) ProtoMessage() {} func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{2} + return fileDescriptor_d938547f84707355, []int{3} } func (m *TimeSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -309,6 +380,13 @@ func (m *TimeSeries) GetSamples() []Sample { return nil } +func (m *TimeSeries) GetExemplars() []Exemplar { + if m != nil { + return m.Exemplars + } + return nil +} + type Label struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -321,7 +399,7 @@ func (m *Label) Reset() { *m = Label{} } func (m *Label) String() string { return proto.CompactTextString(m) } func (*Label) ProtoMessage() {} func (*Label) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{3} + return fileDescriptor_d938547f84707355, []int{4} } func (m *Label) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -375,7 +453,7 @@ func (m *Labels) Reset() { *m = Labels{} } func (m *Labels) String() string { return proto.CompactTextString(m) } func (*Labels) ProtoMessage() {} func (*Labels) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{4} + return fileDescriptor_d938547f84707355, []int{5} } func (m *Labels) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -425,7 +503,7 @@ func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{5} + return fileDescriptor_d938547f84707355, []int{6} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -492,7 +570,7 @@ func (m *ReadHints) Reset() { *m = ReadHints{} } func (m *ReadHints) String() string { return proto.CompactTextString(m) } func (*ReadHints) ProtoMessage() {} func (*ReadHints) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{6} + return fileDescriptor_d938547f84707355, []int{7} } func (m *ReadHints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -586,7 +664,7 @@ func (m *Chunk) Reset() { *m = Chunk{} } func (m *Chunk) String() string { return proto.CompactTextString(m) } func (*Chunk) ProtoMessage() {} func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{7} + return fileDescriptor_d938547f84707355, []int{8} } func (m *Chunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -658,7 +736,7 @@ func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} } func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) } func (*ChunkedSeries) ProtoMessage() {} func (*ChunkedSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{8} + return fileDescriptor_d938547f84707355, []int{9} } func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -707,6 +785,7 @@ func init() { proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value) proto.RegisterType((*MetricMetadata)(nil), "prometheus.MetricMetadata") proto.RegisterType((*Sample)(nil), "prometheus.Sample") + proto.RegisterType((*Exemplar)(nil), "prometheus.Exemplar") proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries") proto.RegisterType((*Label)(nil), "prometheus.Label") proto.RegisterType((*Labels)(nil), "prometheus.Labels") @@ -719,51 +798,53 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 690 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcd, 0x6e, 0xda, 0x40, - 0x10, 0xce, 0xfa, 0x17, 0x86, 0x04, 0x39, 0xab, 0x54, 0x75, 0xa3, 0x96, 0x22, 0x4b, 0x95, 0x38, - 0x54, 0x44, 0x49, 0x4f, 0x91, 0x7a, 0x21, 0x91, 0xf3, 0xa3, 0xc6, 0xa0, 0x2c, 0xa0, 0xfe, 0x5c, - 0xd0, 0x02, 0x1b, 0xb0, 0x8a, 0x8d, 0xe3, 0x5d, 0xaa, 0xf0, 0x20, 0xbd, 0xf5, 0x15, 0x7a, 0xe8, - 0x5b, 0xe4, 0xd8, 0x27, 0xa8, 0xaa, 0x3c, 0x49, 0xb5, 0x6b, 0x13, 0x13, 0xa5, 0x97, 0xf6, 0x36, - 0xf3, 0x7d, 0xdf, 0xfc, 0xec, 0xcc, 0xd8, 0x50, 0x11, 0xcb, 0x84, 0xf1, 0x66, 0x92, 0xce, 0xc5, - 0x1c, 0x43, 0x92, 0xce, 0x23, 0x26, 0xa6, 0x6c, 0xc1, 0x77, 0x77, 0x26, 0xf3, 0xc9, 0x5c, 0xc1, - 0x7b, 0xd2, 0xca, 0x14, 0xde, 0x37, 0x0d, 0xaa, 0x01, 0x13, 0x69, 0x38, 0x0a, 0x98, 0xa0, 0x63, - 0x2a, 0x28, 0x3e, 0x04, 0x43, 0xe6, 0x70, 0x51, 0x1d, 0x35, 0xaa, 0x07, 0xaf, 0x9a, 0x45, 0x8e, - 0xe6, 0x43, 0x65, 0xee, 0xf6, 0x96, 0x09, 0x23, 0x2a, 0x04, 0xbf, 0x06, 0x1c, 0x29, 0x6c, 0x70, - 0x45, 0xa3, 0x70, 0xb6, 0x1c, 0xc4, 0x34, 0x62, 0xae, 0x56, 0x47, 0x8d, 0x32, 0x71, 0x32, 0xe6, - 0x44, 0x11, 0x6d, 0x1a, 0x31, 0x8c, 0xc1, 0x98, 0xb2, 0x59, 0xe2, 0x1a, 0x8a, 0x57, 0xb6, 0xc4, - 0x16, 0x71, 0x28, 0x5c, 0x33, 0xc3, 0xa4, 0xed, 0x2d, 0x01, 0x8a, 0x4a, 0xb8, 0x02, 0x76, 0xbf, - 0xfd, 0xae, 0xdd, 0x79, 0xdf, 0x76, 0x36, 0xa4, 0x73, 0xdc, 0xe9, 0xb7, 0x7b, 0x3e, 0x71, 0x10, - 0x2e, 0x83, 0x79, 0xda, 0xea, 0x9f, 0xfa, 0x8e, 0x86, 0xb7, 0xa0, 0x7c, 0x76, 0xde, 0xed, 0x75, - 0x4e, 0x49, 0x2b, 0x70, 0x74, 0x8c, 0xa1, 0xaa, 0x98, 0x02, 0x33, 0x64, 0x68, 0xb7, 0x1f, 0x04, - 0x2d, 0xf2, 0xd1, 0x31, 0x71, 0x09, 0x8c, 0xf3, 0xf6, 0x49, 0xc7, 0xb1, 0xf0, 0x26, 0x94, 0xba, - 0xbd, 0x56, 0xcf, 0xef, 0xfa, 0x3d, 0xc7, 0xf6, 0xde, 0x82, 0xd5, 0xa5, 0x51, 0x32, 0x63, 0x78, - 0x07, 0xcc, 0x2f, 0x74, 0xb6, 0xc8, 0xc6, 0x82, 0x48, 0xe6, 0xe0, 0xe7, 0x50, 0x16, 0x61, 0xc4, - 0xb8, 0xa0, 0x51, 0xa2, 0xde, 0xa9, 0x93, 0x02, 0xf0, 0xae, 0x01, 0x7a, 0x61, 0xc4, 0xba, 0x2c, - 0x0d, 0x19, 0xc7, 0x7b, 0x60, 0xcd, 0xe8, 0x90, 0xcd, 0xb8, 0x8b, 0xea, 0x7a, 0xa3, 0x72, 0xb0, - 0xbd, 0x3e, 0xd9, 0x0b, 0xc9, 0x1c, 0x19, 0xb7, 0xbf, 0x5e, 0x6e, 0x90, 0x5c, 0x86, 0x0f, 0xc0, - 0xe6, 0xaa, 0x38, 0x77, 0x35, 0x15, 0x81, 0xd7, 0x23, 0xb2, 0xbe, 0xf2, 0x90, 0x95, 0xd0, 0xdb, - 0x07, 0x53, 0xa5, 0x92, 0x83, 0x54, 0xc3, 0x47, 0xd9, 0x20, 0xa5, 0x5d, 0xbc, 0x21, 0xdb, 0x48, - 0xe6, 0x78, 0x87, 0x60, 0x5d, 0x64, 0x05, 0xff, 0xb5, 0x43, 0xef, 0x2b, 0x82, 0x4d, 0x85, 0x07, - 0x54, 0x8c, 0xa6, 0x2c, 0xc5, 0xfb, 0x0f, 0x6e, 0xe7, 0xc5, 0xa3, 0xf8, 0x5c, 0xd7, 0x5c, 0xbb, - 0x99, 0x55, 0xa3, 0xda, 0xdf, 0x1a, 0xd5, 0xd7, 0x1b, 0x6d, 0x80, 0xa1, 0x2e, 0xc0, 0x02, 0xcd, - 0xbf, 0x74, 0x36, 0xb0, 0x0d, 0x7a, 0xdb, 0xbf, 0x74, 0x90, 0x04, 0x88, 0xdc, 0xba, 0x04, 0x88, - 0xef, 0xe8, 0xde, 0x0f, 0x04, 0x65, 0xc2, 0xe8, 0xf8, 0x2c, 0x8c, 0x05, 0xc7, 0x4f, 0xc1, 0xe6, - 0x82, 0x25, 0x83, 0x88, 0xab, 0xbe, 0x74, 0x62, 0x49, 0x37, 0xe0, 0xb2, 0xf4, 0xd5, 0x22, 0x1e, - 0xad, 0x4a, 0x4b, 0x1b, 0x3f, 0x83, 0x12, 0x17, 0x34, 0x15, 0x52, 0xad, 0x2b, 0xb5, 0xad, 0xfc, - 0x80, 0xe3, 0x27, 0x60, 0xb1, 0x78, 0x2c, 0x09, 0x43, 0x11, 0x26, 0x8b, 0xc7, 0x01, 0xc7, 0xbb, - 0x50, 0x9a, 0xa4, 0xf3, 0x45, 0x12, 0xc6, 0x13, 0xd7, 0xac, 0xeb, 0x8d, 0x32, 0xb9, 0xf7, 0x71, - 0x15, 0xb4, 0xe1, 0xd2, 0xb5, 0xea, 0xa8, 0x51, 0x22, 0xda, 0x70, 0x29, 0xb3, 0xa7, 0x34, 0x9e, - 0x30, 0x99, 0xc4, 0xce, 0xb2, 0x2b, 0x3f, 0xe0, 0xde, 0x77, 0x04, 0xe6, 0xf1, 0x74, 0x11, 0x7f, - 0xc6, 0x35, 0xa8, 0x44, 0x61, 0x3c, 0x90, 0x77, 0x54, 0xf4, 0x5c, 0x8e, 0xc2, 0x58, 0x1e, 0x53, - 0xc0, 0x15, 0x4f, 0x6f, 0xee, 0xf9, 0xfc, 0xec, 0x22, 0x7a, 0x93, 0xf3, 0xcd, 0x7c, 0x09, 0xba, - 0x5a, 0xc2, 0xee, 0xfa, 0x12, 0x54, 0x81, 0xa6, 0x1f, 0x8f, 0xe6, 0xe3, 0x30, 0x9e, 0x14, 0x1b, - 0x90, 0x9f, 0xb3, 0x7a, 0xd5, 0x26, 0x51, 0xb6, 0x57, 0x87, 0xd2, 0x4a, 0xf5, 0xf0, 0x8b, 0xb3, - 0x41, 0xff, 0xd0, 0x21, 0x0e, 0xf2, 0xae, 0x61, 0x4b, 0x65, 0x63, 0xe3, 0xff, 0xbd, 0xef, 0x3d, - 0xb0, 0x46, 0x32, 0xc3, 0xea, 0xbc, 0xb7, 0x1f, 0x75, 0xba, 0x0a, 0xc8, 0x64, 0x47, 0x3b, 0xb7, - 0x77, 0x35, 0xf4, 0xf3, 0xae, 0x86, 0x7e, 0xdf, 0xd5, 0xd0, 0x27, 0x4b, 0xaa, 0x93, 0xe1, 0xd0, - 0x52, 0x7f, 0xb2, 0x37, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf3, 0xb7, 0x12, 0x44, 0xfa, 0x04, - 0x00, 0x00, + // 734 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcb, 0x6e, 0xdb, 0x46, + 0x14, 0xf5, 0xf0, 0x29, 0x5e, 0xd9, 0x02, 0x3d, 0x50, 0x51, 0xd6, 0x68, 0x55, 0x81, 0x40, 0x01, + 0x2d, 0x0a, 0x19, 0x76, 0x37, 0x35, 0xd0, 0x8d, 0x6c, 0xd0, 0x0f, 0xd4, 0x94, 0xe0, 0x91, 0x84, + 0x3e, 0x36, 0xc2, 0x48, 0x1a, 0x4b, 0x44, 0xc4, 0x47, 0x38, 0x54, 0x60, 0x7d, 0x48, 0x76, 0xf9, + 0x83, 0x20, 0x8b, 0xfc, 0x85, 0x97, 0xf9, 0x82, 0x20, 0xf0, 0x97, 0x04, 0x33, 0xa4, 0x4c, 0x29, + 0x4e, 0x16, 0xce, 0xee, 0xde, 0x7b, 0xce, 0xb9, 0x8f, 0xb9, 0x97, 0x84, 0x6a, 0xb6, 0x4a, 0x18, + 0x6f, 0x27, 0x69, 0x9c, 0xc5, 0x18, 0x92, 0x34, 0x0e, 0x59, 0x36, 0x67, 0x4b, 0x7e, 0x50, 0x9f, + 0xc5, 0xb3, 0x58, 0x86, 0x0f, 0x85, 0x95, 0x33, 0xdc, 0x37, 0x0a, 0xd4, 0x7c, 0x96, 0xa5, 0xc1, + 0xc4, 0x67, 0x19, 0x9d, 0xd2, 0x8c, 0xe2, 0x13, 0xd0, 0x44, 0x0e, 0x07, 0x35, 0x51, 0xab, 0x76, + 0xfc, 0x5b, 0xbb, 0xcc, 0xd1, 0xde, 0x66, 0x16, 0xee, 0x60, 0x95, 0x30, 0x22, 0x25, 0xf8, 0x77, + 0xc0, 0xa1, 0x8c, 0x8d, 0x6e, 0x69, 0x18, 0x2c, 0x56, 0xa3, 0x88, 0x86, 0xcc, 0x51, 0x9a, 0xa8, + 0x65, 0x11, 0x3b, 0x47, 0xce, 0x25, 0xd0, 0xa5, 0x21, 0xc3, 0x18, 0xb4, 0x39, 0x5b, 0x24, 0x8e, + 0x26, 0x71, 0x69, 0x8b, 0xd8, 0x32, 0x0a, 0x32, 0x47, 0xcf, 0x63, 0xc2, 0x76, 0x57, 0x00, 0x65, + 0x25, 0x5c, 0x05, 0x73, 0xd8, 0xfd, 0xbb, 0xdb, 0xfb, 0xa7, 0x6b, 0xef, 0x08, 0xe7, 0xac, 0x37, + 0xec, 0x0e, 0x3c, 0x62, 0x23, 0x6c, 0x81, 0x7e, 0xd1, 0x19, 0x5e, 0x78, 0xb6, 0x82, 0xf7, 0xc0, + 0xba, 0xbc, 0xea, 0x0f, 0x7a, 0x17, 0xa4, 0xe3, 0xdb, 0x2a, 0xc6, 0x50, 0x93, 0x48, 0x19, 0xd3, + 0x84, 0xb4, 0x3f, 0xf4, 0xfd, 0x0e, 0xf9, 0xcf, 0xd6, 0x71, 0x05, 0xb4, 0xab, 0xee, 0x79, 0xcf, + 0x36, 0xf0, 0x2e, 0x54, 0xfa, 0x83, 0xce, 0xc0, 0xeb, 0x7b, 0x03, 0xdb, 0x74, 0xff, 0x02, 0xa3, + 0x4f, 0xc3, 0x64, 0xc1, 0x70, 0x1d, 0xf4, 0x57, 0x74, 0xb1, 0xcc, 0x9f, 0x05, 0x91, 0xdc, 0xc1, + 0x3f, 0x83, 0x95, 0x05, 0x21, 0xe3, 0x19, 0x0d, 0x13, 0x39, 0xa7, 0x4a, 0xca, 0x80, 0x1b, 0x43, + 0xc5, 0xbb, 0x63, 0x61, 0xb2, 0xa0, 0x29, 0x3e, 0x04, 0x63, 0x41, 0xc7, 0x6c, 0xc1, 0x1d, 0xd4, + 0x54, 0x5b, 0xd5, 0xe3, 0xfd, 0xcd, 0x77, 0xbd, 0x16, 0xc8, 0xa9, 0x76, 0xff, 0xf1, 0xd7, 0x1d, + 0x52, 0xd0, 0xca, 0x82, 0xca, 0x37, 0x0b, 0xaa, 0x5f, 0x16, 0x7c, 0x8b, 0x00, 0x06, 0x41, 0xc8, + 0xfa, 0x2c, 0x0d, 0x18, 0x7f, 0x7e, 0xcd, 0x63, 0x30, 0xb9, 0x1c, 0x97, 0x3b, 0x8a, 0x54, 0xe0, + 0x4d, 0x45, 0xfe, 0x12, 0x85, 0x64, 0x4d, 0xc4, 0x7f, 0x82, 0xc5, 0x8a, 0x21, 0xb9, 0xa3, 0x4a, + 0x55, 0x7d, 0x53, 0xb5, 0x7e, 0x81, 0x42, 0x57, 0x92, 0xdd, 0x23, 0xd0, 0x65, 0x13, 0x62, 0xe9, + 0xf2, 0x50, 0x50, 0xbe, 0x74, 0x61, 0x6f, 0x8f, 0x6f, 0x15, 0xe3, 0xbb, 0x27, 0x60, 0x5c, 0xe7, + 0xad, 0x3e, 0x77, 0x36, 0xf7, 0x35, 0x82, 0x5d, 0x19, 0xf7, 0x69, 0x36, 0x99, 0xb3, 0x14, 0x1f, + 0x6d, 0xdd, 0xf9, 0x2f, 0x4f, 0xf4, 0x05, 0xaf, 0xbd, 0x71, 0xdf, 0xeb, 0x46, 0x95, 0xaf, 0x35, + 0xaa, 0x6e, 0x36, 0xda, 0x02, 0x4d, 0x5e, 0xab, 0x01, 0x8a, 0x77, 0x63, 0xef, 0x60, 0x13, 0xd4, + 0xae, 0x77, 0x63, 0x23, 0x11, 0x20, 0xe2, 0x42, 0x45, 0x80, 0x78, 0xb6, 0xea, 0xbe, 0x47, 0x60, + 0x11, 0x46, 0xa7, 0x97, 0x41, 0x94, 0x71, 0xfc, 0x23, 0x98, 0x3c, 0x63, 0xc9, 0x28, 0xe4, 0xb2, + 0x2f, 0x95, 0x18, 0xc2, 0xf5, 0xb9, 0x28, 0x7d, 0xbb, 0x8c, 0x26, 0xeb, 0xd2, 0xc2, 0xc6, 0x3f, + 0x41, 0x85, 0x67, 0x34, 0xcd, 0x04, 0x3b, 0xbf, 0x05, 0x53, 0xfa, 0x3e, 0xc7, 0x3f, 0x80, 0xc1, + 0xa2, 0xa9, 0x00, 0x34, 0x09, 0xe8, 0x2c, 0x9a, 0xfa, 0x1c, 0x1f, 0x40, 0x65, 0x96, 0xc6, 0xcb, + 0x24, 0x88, 0x66, 0x8e, 0xde, 0x54, 0x5b, 0x16, 0x79, 0xf4, 0x71, 0x0d, 0x94, 0xf1, 0xca, 0x31, + 0x9a, 0xa8, 0x55, 0x21, 0xca, 0x78, 0x25, 0xb2, 0xa7, 0x34, 0x9a, 0x31, 0x91, 0xc4, 0xcc, 0xb3, + 0x4b, 0xdf, 0xe7, 0xee, 0x3b, 0x04, 0xfa, 0xd9, 0x7c, 0x19, 0xbd, 0xc0, 0x0d, 0xa8, 0x86, 0x41, + 0x34, 0x12, 0x27, 0x58, 0xf6, 0x6c, 0x85, 0x41, 0x24, 0xce, 0xd0, 0xe7, 0x12, 0xa7, 0x77, 0x8f, + 0x78, 0xf1, 0x89, 0x84, 0xf4, 0xae, 0xc0, 0xdb, 0xc5, 0x12, 0x54, 0xb9, 0x84, 0x83, 0xcd, 0x25, + 0xc8, 0x02, 0x6d, 0x2f, 0x9a, 0xc4, 0xd3, 0x20, 0x9a, 0x95, 0x1b, 0x10, 0xbf, 0x1e, 0x39, 0xd5, + 0x2e, 0x91, 0xb6, 0xdb, 0x84, 0xca, 0x9a, 0xb5, 0xfd, 0x77, 0x30, 0x41, 0xfd, 0xb7, 0x47, 0x6c, + 0xe4, 0xbe, 0x84, 0x3d, 0x99, 0x8d, 0x4d, 0xbf, 0xf7, 0xcb, 0x38, 0x04, 0x63, 0x22, 0x32, 0xac, + 0x3f, 0x8c, 0xfd, 0x27, 0x9d, 0xae, 0x05, 0x39, 0xed, 0xb4, 0x7e, 0xff, 0xd0, 0x40, 0x1f, 0x1e, + 0x1a, 0xe8, 0xd3, 0x43, 0x03, 0xfd, 0x6f, 0x08, 0x76, 0x32, 0x1e, 0x1b, 0xf2, 0xaf, 0xfb, 0xc7, + 0xe7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa3, 0x6c, 0x23, 0xa6, 0x05, 0x00, 0x00, } func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { @@ -857,6 +938,58 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Exemplar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x18 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *TimeSeries) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -881,6 +1014,20 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } if len(m.Samples) > 0 { for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { { @@ -1273,6 +1420,30 @@ func (m *Sample) Size() (n int) { return n } +func (m *Exemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *TimeSeries) Size() (n int) { if m == nil { return 0 @@ -1291,6 +1462,12 @@ func (m *TimeSeries) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1697,6 +1874,121 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } return nil } +func (m *Exemplar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TimeSeries) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1794,6 +2086,40 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/prompb/types.proto b/prompb/types.proto index 259a0d40d..ee11f3a00 100644 --- a/prompb/types.proto +++ b/prompb/types.proto @@ -40,13 +40,27 @@ message MetricMetadata { message Sample { double value = 1; + // timestamp is in ms format, see pkg/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. int64 timestamp = 2; } +message Exemplar { + // Optional, can be empty. + repeated Label labels = 1 [(gogoproto.nullable) = false]; + double value = 2; + // timestamp is in ms format, see pkg/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 3; +} + // TimeSeries represents samples and labels for a single time series. message TimeSeries { + // For a timeseries to be valid, and for the samples and exemplars + // to be ingested by the remote system properly, the labels field is required. repeated Label labels = 1 [(gogoproto.nullable) = false]; repeated Sample samples = 2 [(gogoproto.nullable) = false]; + repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; } message Label { diff --git a/promql/test.go b/promql/test.go index a60b40890..acb1eb5ca 100644 --- a/promql/test.go +++ b/promql/test.go @@ -26,6 +26,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/exemplar" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/timestamp" "github.com/prometheus/prometheus/promql/parser" @@ -274,16 +275,18 @@ func (*evalCmd) testCmd() {} // loadCmd is a command that loads sequences of sample values for specific // metrics into the storage. type loadCmd struct { - gap time.Duration - metrics map[uint64]labels.Labels - defs map[uint64][]Point + gap time.Duration + metrics map[uint64]labels.Labels + defs map[uint64][]Point + exemplars map[uint64][]exemplar.Exemplar } func newLoadCmd(gap time.Duration) *loadCmd { return &loadCmd{ - gap: gap, - metrics: map[uint64]labels.Labels{}, - defs: map[uint64][]Point{}, + gap: gap, + metrics: map[uint64]labels.Labels{}, + defs: map[uint64][]Point{}, + exemplars: map[uint64][]exemplar.Exemplar{}, } } diff --git a/scrape/scrape.go b/scrape/scrape.go index 0985d2f46..20600a1e0 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -1497,7 +1497,6 @@ func yoloString(b []byte) string { // Adds samples to the appender, checking the error, and then returns the # of samples added, // whether the caller should continue to process more samples, and any sample limit errors. - func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr *error, appErrs *appendErrors) (bool, error) { switch errors.Cause(err) { case nil: diff --git a/scripts/genproto.sh b/scripts/genproto.sh index 9683b759b..1c152d020 100755 --- a/scripts/genproto.sh +++ b/scripts/genproto.sh @@ -10,8 +10,8 @@ if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then exit 255 fi -if ! [[ $(protoc --version) =~ "3.12.3" ]]; then - echo "could not find protoc 3.12.3, is it installed + in PATH?" +if ! [[ $(protoc --version) =~ "3.15.8" ]]; then + echo "could not find protoc 3.15.8, is it installed + in PATH?" exit 255 fi diff --git a/storage/interface.go b/storage/interface.go index 5ddc300b3..6f65cf069 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -16,6 +16,7 @@ package storage import ( "context" "errors" + "fmt" "github.com/prometheus/prometheus/pkg/exemplar" "github.com/prometheus/prometheus/pkg/labels" @@ -30,6 +31,8 @@ var ( ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp") ErrOutOfBounds = errors.New("out of bounds") ErrOutOfOrderExemplar = errors.New("out of order exemplar") + ErrDuplicateExemplar = errors.New("duplicate exemplar") + ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) ) // Appendable allows creating appenders. diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index a3eb35e46..1dea309c3 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -52,25 +52,30 @@ const ( type queueManagerMetrics struct { reg prometheus.Registerer - samplesTotal prometheus.Counter - metadataTotal prometheus.Counter - failedSamplesTotal prometheus.Counter - failedMetadataTotal prometheus.Counter - retriedSamplesTotal prometheus.Counter - retriedMetadataTotal prometheus.Counter - droppedSamplesTotal prometheus.Counter - enqueueRetriesTotal prometheus.Counter - sentBatchDuration prometheus.Histogram - highestSentTimestamp *maxTimestamp - pendingSamples prometheus.Gauge - shardCapacity prometheus.Gauge - numShards prometheus.Gauge - maxNumShards prometheus.Gauge - minNumShards prometheus.Gauge - desiredNumShards prometheus.Gauge - samplesBytesTotal prometheus.Counter - metadataBytesTotal prometheus.Counter - maxSamplesPerSend prometheus.Gauge + samplesTotal prometheus.Counter + exemplarsTotal prometheus.Counter + metadataTotal prometheus.Counter + failedSamplesTotal prometheus.Counter + failedExemplarsTotal prometheus.Counter + failedMetadataTotal prometheus.Counter + retriedSamplesTotal prometheus.Counter + retriedExemplarsTotal prometheus.Counter + retriedMetadataTotal prometheus.Counter + droppedSamplesTotal prometheus.Counter + droppedExemplarsTotal prometheus.Counter + enqueueRetriesTotal prometheus.Counter + sentBatchDuration prometheus.Histogram + highestSentTimestamp *maxTimestamp + pendingSamples prometheus.Gauge + pendingExemplars prometheus.Gauge + shardCapacity prometheus.Gauge + numShards prometheus.Gauge + maxNumShards prometheus.Gauge + minNumShards prometheus.Gauge + desiredNumShards prometheus.Gauge + sentBytesTotal prometheus.Counter + metadataBytesTotal prometheus.Counter + maxSamplesPerSend prometheus.Gauge } func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManagerMetrics { @@ -89,6 +94,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Help: "Total number of samples sent to remote storage.", ConstLabels: constLabels, }) + m.exemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "exemplars_total", + Help: "Total number of exemplars sent to remote storage.", + ConstLabels: constLabels, + }) m.metadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, @@ -103,6 +115,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Help: "Total number of samples which failed on send to remote storage, non-recoverable errors.", ConstLabels: constLabels, }) + m.failedExemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "exemplars_failed_total", + Help: "Total number of exemplars which failed on send to remote storage, non-recoverable errors.", + ConstLabels: constLabels, + }) m.failedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, @@ -117,6 +136,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Help: "Total number of samples which failed on send to remote storage but were retried because the send error was recoverable.", ConstLabels: constLabels, }) + m.retriedExemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "exemplars_retried_total", + Help: "Total number of exemplars which failed on send to remote storage but were retried because the send error was recoverable.", + ConstLabels: constLabels, + }) m.retriedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, @@ -128,7 +154,14 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Namespace: namespace, Subsystem: subsystem, Name: "samples_dropped_total", - Help: "Total number of samples which were dropped after being read from the WAL before being sent via remote write.", + Help: "Total number of samples which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.", + ConstLabels: constLabels, + }) + m.droppedExemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "exemplars_dropped_total", + Help: "Total number of exemplars which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.", ConstLabels: constLabels, }) m.enqueueRetriesTotal = prometheus.NewCounter(prometheus.CounterOpts{ @@ -162,6 +195,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Help: "The number of samples pending in the queues shards to be sent to the remote storage.", ConstLabels: constLabels, }) + m.pendingExemplars = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "exemplars_pending", + Help: "The number of exemplars pending in the queues shards to be sent to the remote storage.", + ConstLabels: constLabels, + }) m.shardCapacity = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, @@ -197,11 +237,11 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Help: "The number of shards that the queues shard calculation wants to run based on the rate of samples in vs. samples out.", ConstLabels: constLabels, }) - m.samplesBytesTotal = prometheus.NewCounter(prometheus.CounterOpts{ + m.sentBytesTotal = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, - Name: "samples_bytes_total", - Help: "The total number of bytes of samples sent by the queue after compression.", + Name: "bytes_total", + Help: "The total number of bytes of data (not metadata) sent by the queue after compression. Note that when exemplars over remote write is enabled the exemplars included in a remote write request count towards this metric.", ConstLabels: constLabels, }) m.metadataBytesTotal = prometheus.NewCounter(prometheus.CounterOpts{ @@ -215,7 +255,7 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Namespace: namespace, Subsystem: subsystem, Name: "max_samples_per_send", - Help: "The maximum number of samples to be sent, in a single request, to the remote storage.", + Help: "The maximum number of samples to be sent, in a single request, to the remote storage. Note that, when sending of exemplars over remote write is enabled, exemplars count towards this limt.", ConstLabels: constLabels, }) @@ -226,22 +266,27 @@ func (m *queueManagerMetrics) register() { if m.reg != nil { m.reg.MustRegister( m.samplesTotal, + m.exemplarsTotal, m.metadataTotal, m.failedSamplesTotal, + m.failedExemplarsTotal, m.failedMetadataTotal, m.retriedSamplesTotal, + m.retriedExemplarsTotal, m.retriedMetadataTotal, m.droppedSamplesTotal, + m.droppedExemplarsTotal, m.enqueueRetriesTotal, m.sentBatchDuration, m.highestSentTimestamp, m.pendingSamples, + m.pendingExemplars, m.shardCapacity, m.numShards, m.maxNumShards, m.minNumShards, m.desiredNumShards, - m.samplesBytesTotal, + m.sentBytesTotal, m.metadataBytesTotal, m.maxSamplesPerSend, ) @@ -251,22 +296,27 @@ func (m *queueManagerMetrics) register() { func (m *queueManagerMetrics) unregister() { if m.reg != nil { m.reg.Unregister(m.samplesTotal) + m.reg.Unregister(m.exemplarsTotal) m.reg.Unregister(m.metadataTotal) m.reg.Unregister(m.failedSamplesTotal) + m.reg.Unregister(m.failedExemplarsTotal) m.reg.Unregister(m.failedMetadataTotal) m.reg.Unregister(m.retriedSamplesTotal) + m.reg.Unregister(m.retriedExemplarsTotal) m.reg.Unregister(m.retriedMetadataTotal) m.reg.Unregister(m.droppedSamplesTotal) + m.reg.Unregister(m.droppedExemplarsTotal) m.reg.Unregister(m.enqueueRetriesTotal) m.reg.Unregister(m.sentBatchDuration) m.reg.Unregister(m.highestSentTimestamp) m.reg.Unregister(m.pendingSamples) + m.reg.Unregister(m.pendingExemplars) m.reg.Unregister(m.shardCapacity) m.reg.Unregister(m.numShards) m.reg.Unregister(m.maxNumShards) m.reg.Unregister(m.minNumShards) m.reg.Unregister(m.desiredNumShards) - m.reg.Unregister(m.samplesBytesTotal) + m.reg.Unregister(m.sentBytesTotal) m.reg.Unregister(m.metadataBytesTotal) m.reg.Unregister(m.maxSamplesPerSend) } @@ -295,6 +345,7 @@ type QueueManager struct { mcfg config.MetadataConfig externalLabels labels.Labels relabelConfigs []*relabel.Config + sendExemplars bool watcher *wal.Watcher metadataWatcher *MetadataWatcher @@ -312,7 +363,7 @@ type QueueManager struct { quit chan struct{} wg sync.WaitGroup - samplesIn, samplesDropped, samplesOut, samplesOutDuration *ewmaRate + dataIn, dataDropped, dataOut, dataOutDuration *ewmaRate metrics *queueManagerMetrics interner *pool @@ -336,6 +387,7 @@ func NewQueueManager( interner *pool, highestRecvTimestamp *maxTimestamp, sm ReadyScrapeManager, + enableExemplarRemoteWrite bool, ) *QueueManager { if logger == nil { logger = log.NewNopLogger() @@ -350,6 +402,7 @@ func NewQueueManager( externalLabels: externalLabels, relabelConfigs: relabelConfigs, storeClient: client, + sendExemplars: enableExemplarRemoteWrite, seriesLabels: make(map[uint64]labels.Labels), seriesSegmentIndexes: make(map[uint64]int), @@ -359,17 +412,17 @@ func NewQueueManager( reshardChan: make(chan int), quit: make(chan struct{}), - samplesIn: samplesIn, - samplesDropped: newEWMARate(ewmaWeight, shardUpdateDuration), - samplesOut: newEWMARate(ewmaWeight, shardUpdateDuration), - samplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration), + dataIn: samplesIn, + dataDropped: newEWMARate(ewmaWeight, shardUpdateDuration), + dataOut: newEWMARate(ewmaWeight, shardUpdateDuration), + dataOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration), metrics: metrics, interner: interner, highestRecvTimestamp: highestRecvTimestamp, } - t.watcher = wal.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, walDir) + t.watcher = wal.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, walDir, enableExemplarRemoteWrite) if t.mcfg.Send { t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline) } @@ -444,13 +497,14 @@ func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []p // Append queues a sample to be sent to the remote storage. Blocks until all samples are // enqueued on their shards or a shutdown signal is received. func (t *QueueManager) Append(samples []record.RefSample) bool { + var appendSample prompb.Sample outer: for _, s := range samples { t.seriesMtx.Lock() lbls, ok := t.seriesLabels[s.Ref] if !ok { t.metrics.droppedSamplesTotal.Inc() - t.samplesDropped.incr(1) + t.dataDropped.incr(1) if _, ok := t.droppedSeries[s.Ref]; !ok { level.Info(t.logger).Log("msg", "Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) } @@ -466,12 +520,56 @@ outer: return false default: } + appendSample.Value = s.V + appendSample.Timestamp = s.T + if t.shards.enqueue(s.Ref, writeSample{lbls, appendSample}) { + continue outer + } - if t.shards.enqueue(s.Ref, sample{ - labels: lbls, - t: s.T, - v: s.V, - }) { + t.metrics.enqueueRetriesTotal.Inc() + time.Sleep(time.Duration(backoff)) + backoff = backoff * 2 + if backoff > t.cfg.MaxBackoff { + backoff = t.cfg.MaxBackoff + } + } + } + return true +} + +func (t *QueueManager) AppendExemplars(exemplars []record.RefExemplar) bool { + if !t.sendExemplars { + return true + } + + var appendExemplar prompb.Exemplar +outer: + for _, e := range exemplars { + t.seriesMtx.Lock() + lbls, ok := t.seriesLabels[e.Ref] + if !ok { + t.metrics.droppedExemplarsTotal.Inc() + // Track dropped exemplars in the same EWMA for sharding calc. + t.dataDropped.incr(1) + if _, ok := t.droppedSeries[e.Ref]; !ok { + level.Info(t.logger).Log("msg", "Dropped exemplar for series that was not explicitly dropped via relabelling", "ref", e.Ref) + } + t.seriesMtx.Unlock() + continue + } + t.seriesMtx.Unlock() + // This will only loop if the queues are being resharded. + backoff := t.cfg.MinBackoff + for { + select { + case <-t.quit: + return false + default: + } + appendExemplar.Labels = labelsToLabelsProto(e.Labels, nil) + appendExemplar.Timestamp = e.T + appendExemplar.Value = e.V + if t.shards.enqueue(e.Ref, writeExemplar{lbls, appendExemplar}) { continue outer } @@ -687,27 +785,27 @@ func (t *QueueManager) shouldReshard(desiredShards int) bool { // outlined in this functions implementation. It is up to the caller to reshard, or not, // based on the return value. func (t *QueueManager) calculateDesiredShards() int { - t.samplesOut.tick() - t.samplesDropped.tick() - t.samplesOutDuration.tick() + t.dataOut.tick() + t.dataDropped.tick() + t.dataOutDuration.tick() // We use the number of incoming samples as a prediction of how much work we // will need to do next iteration. We add to this any pending samples // (received - send) so we can catch up with any backlog. We use the average // outgoing batch latency to work out how many shards we need. var ( - samplesInRate = t.samplesIn.rate() - samplesOutRate = t.samplesOut.rate() - samplesKeptRatio = samplesOutRate / (t.samplesDropped.rate() + samplesOutRate) - samplesOutDuration = t.samplesOutDuration.rate() / float64(time.Second) - samplesPendingRate = samplesInRate*samplesKeptRatio - samplesOutRate - highestSent = t.metrics.highestSentTimestamp.Get() - highestRecv = t.highestRecvTimestamp.Get() - delay = highestRecv - highestSent - samplesPending = delay * samplesInRate * samplesKeptRatio + dataInRate = t.dataIn.rate() + dataOutRate = t.dataOut.rate() + dataKeptRatio = dataOutRate / (t.dataDropped.rate() + dataOutRate) + dataOutDuration = t.dataOutDuration.rate() / float64(time.Second) + dataPendingRate = dataInRate*dataKeptRatio - dataOutRate + highestSent = t.metrics.highestSentTimestamp.Get() + highestRecv = t.highestRecvTimestamp.Get() + delay = highestRecv - highestSent + dataPending = delay * dataInRate * dataKeptRatio ) - if samplesOutRate <= 0 { + if dataOutRate <= 0 { return t.numShards } @@ -717,17 +815,17 @@ func (t *QueueManager) calculateDesiredShards() int { const integralGain = 0.1 / float64(shardUpdateDuration/time.Second) var ( - timePerSample = samplesOutDuration / samplesOutRate - desiredShards = timePerSample * (samplesInRate*samplesKeptRatio + integralGain*samplesPending) + timePerSample = dataOutDuration / dataOutRate + desiredShards = timePerSample * (dataInRate*dataKeptRatio + integralGain*dataPending) ) t.metrics.desiredNumShards.Set(desiredShards) level.Debug(t.logger).Log("msg", "QueueManager.calculateDesiredShards", - "samplesInRate", samplesInRate, - "samplesOutRate", samplesOutRate, - "samplesKeptRatio", samplesKeptRatio, - "samplesPendingRate", samplesPendingRate, - "samplesPending", samplesPending, - "samplesOutDuration", samplesOutDuration, + "dataInRate", dataInRate, + "dataOutRate", dataOutRate, + "dataKeptRatio", dataKeptRatio, + "dataPendingRate", dataPendingRate, + "dataPending", dataPending, + "dataOutDuration", dataOutDuration, "timePerSample", timePerSample, "desiredShards", desiredShards, "highestSent", highestSent, @@ -785,17 +883,24 @@ func (t *QueueManager) newShards() *shards { return s } -type sample struct { - labels labels.Labels - t int64 - v float64 +type writeSample struct { + seriesLabels labels.Labels + sample prompb.Sample +} + +type writeExemplar struct { + seriesLabels labels.Labels + exemplar prompb.Exemplar } type shards struct { mtx sync.RWMutex // With the WAL, this is never actually contended. qm *QueueManager - queues []chan sample + queues []chan interface{} + // So we can accurately track how many of each are lost during shard shutdowns. + enqueuedSamples atomic.Int64 + enqueuedExemplars atomic.Int64 // Emulate a wait group with a channel and an atomic int, as you // cannot select on a wait group. @@ -807,8 +912,9 @@ type shards struct { // Hard shutdown context is used to terminate outgoing HTTP connections // after giving them a chance to terminate. - hardShutdown context.CancelFunc - droppedOnHardShutdown atomic.Uint32 + hardShutdown context.CancelFunc + samplesDroppedOnHardShutdown atomic.Uint32 + exemplarsDroppedOnHardShutdown atomic.Uint32 } // start the shards; must be called before any call to enqueue. @@ -819,9 +925,9 @@ func (s *shards) start(n int) { s.qm.metrics.pendingSamples.Set(0) s.qm.metrics.numShards.Set(float64(n)) - newQueues := make([]chan sample, n) + newQueues := make([]chan interface{}, n) for i := 0; i < n; i++ { - newQueues[i] = make(chan sample, s.qm.cfg.Capacity) + newQueues[i] = make(chan interface{}, s.qm.cfg.Capacity) } s.queues = newQueues @@ -831,7 +937,8 @@ func (s *shards) start(n int) { s.softShutdown = make(chan struct{}) s.running.Store(int32(n)) s.done = make(chan struct{}) - s.droppedOnHardShutdown.Store(0) + s.samplesDroppedOnHardShutdown.Store(0) + s.exemplarsDroppedOnHardShutdown.Store(0) for i := 0; i < n; i++ { go s.runShard(hardShutdownCtx, i, newQueues[i]) } @@ -864,14 +971,17 @@ func (s *shards) stop() { // Force an unclean shutdown. s.hardShutdown() <-s.done - if dropped := s.droppedOnHardShutdown.Load(); dropped > 0 { + if dropped := s.samplesDroppedOnHardShutdown.Load(); dropped > 0 { level.Error(s.qm.logger).Log("msg", "Failed to flush all samples on shutdown", "count", dropped) } + if dropped := s.exemplarsDroppedOnHardShutdown.Load(); dropped > 0 { + level.Error(s.qm.logger).Log("msg", "Failed to flush all exemplars on shutdown", "count", dropped) + } } -// enqueue a sample. If we are currently in the process of shutting down or resharding, +// enqueue data (sample or exemplar). If we are currently in the process of shutting down or resharding, // will return false; in this case, you should back off and retry. -func (s *shards) enqueue(ref uint64, sample sample) bool { +func (s *shards) enqueue(ref uint64, data interface{}) bool { s.mtx.RLock() defer s.mtx.RUnlock() @@ -885,13 +995,22 @@ func (s *shards) enqueue(ref uint64, sample sample) bool { select { case <-s.softShutdown: return false - case s.queues[shard] <- sample: - s.qm.metrics.pendingSamples.Inc() + case s.queues[shard] <- data: + switch data.(type) { + case writeSample: + s.qm.metrics.pendingSamples.Inc() + s.enqueuedSamples.Inc() + case writeExemplar: + s.qm.metrics.pendingExemplars.Inc() + s.enqueuedExemplars.Inc() + default: + level.Warn(s.qm.logger).Log("msg", "Invalid object type in shards enqueue") + } return true } } -func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) { +func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface{}) { defer func() { if s.running.Dec() == 0 { close(s.done) @@ -901,14 +1020,26 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) { shardNum := strconv.Itoa(shardID) // Send batches of at most MaxSamplesPerSend samples to the remote storage. - // If we have fewer samples than that, flush them out after a deadline - // anyways. + // If we have fewer samples than that, flush them out after a deadline anyways. var ( - max = s.qm.cfg.MaxSamplesPerSend - nPending = 0 - pendingSamples = allocateTimeSeries(max) + max = s.qm.cfg.MaxSamplesPerSend + // Rough estimate, 1% of active series will contain an exemplar on each scrape. + // TODO(cstyan): Casting this many times smells, also we could get index out of bounds issues here. + maxExemplars = int(math.Max(1, float64(max/10))) + nPending, nPendingSamples, nPendingExemplars = 0, 0, 0 + sampleBuffer = allocateSampleBuffer(max) + buf []byte + pendingData []prompb.TimeSeries + exemplarBuffer [][]prompb.Exemplar ) + totalPending := max + if s.qm.sendExemplars { + exemplarBuffer = allocateExemplarBuffer(maxExemplars) + totalPending += maxExemplars + } + + pendingData = make([]prompb.TimeSeries, totalPending) timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline)) stop := func() { @@ -926,18 +1057,23 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) { case <-ctx.Done(): // In this case we drop all samples in the buffer and the queue. // Remove them from pending and mark them as failed. - droppedSamples := nPending + len(queue) + droppedSamples := nPendingSamples + int(s.enqueuedSamples.Load()) + droppedExemplars := nPendingExemplars + int(s.enqueuedExemplars.Load()) s.qm.metrics.pendingSamples.Sub(float64(droppedSamples)) + s.qm.metrics.pendingExemplars.Sub(float64(droppedExemplars)) s.qm.metrics.failedSamplesTotal.Add(float64(droppedSamples)) - s.droppedOnHardShutdown.Add(uint32(droppedSamples)) + s.qm.metrics.failedExemplarsTotal.Add(float64(droppedExemplars)) + s.samplesDroppedOnHardShutdown.Add(uint32(droppedSamples)) + s.exemplarsDroppedOnHardShutdown.Add(uint32(droppedExemplars)) return case sample, ok := <-queue: if !ok { - if nPending > 0 { - level.Debug(s.qm.logger).Log("msg", "Flushing samples to remote storage...", "count", nPending) - s.sendSamples(ctx, pendingSamples[:nPending], &buf) - s.qm.metrics.pendingSamples.Sub(float64(nPending)) + if nPendingSamples > 0 || nPendingExemplars > 0 { + level.Debug(s.qm.logger).Log("msg", "Flushing data to remote storage...", "samples", nPendingSamples, "exemplars", nPendingExemplars) + s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, &buf) + s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples)) + s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars)) level.Debug(s.qm.logger).Log("msg", "Done flushing.") } return @@ -946,25 +1082,44 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) { // Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff) // retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll // stop reading from the queue. This makes it safe to reference pendingSamples by index. - pendingSamples[nPending].Labels = labelsToLabelsProto(sample.labels, pendingSamples[nPending].Labels) - pendingSamples[nPending].Samples[0].Timestamp = sample.t - pendingSamples[nPending].Samples[0].Value = sample.v - nPending++ + switch d := sample.(type) { + case writeSample: + sampleBuffer[nPendingSamples][0] = d.sample + pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels) + pendingData[nPending].Samples = sampleBuffer[nPendingSamples] + pendingData[nPending].Exemplars = nil + nPendingSamples++ + nPending++ - if nPending >= max { - s.sendSamples(ctx, pendingSamples, &buf) + case writeExemplar: + exemplarBuffer[nPendingExemplars][0] = d.exemplar + pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels) + pendingData[nPending].Samples = nil + pendingData[nPending].Exemplars = exemplarBuffer[nPendingExemplars] + nPendingExemplars++ + nPending++ + } + + if nPendingSamples >= max || nPendingExemplars >= maxExemplars { + s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, &buf) + s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples)) + s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars)) + nPendingSamples = 0 + nPendingExemplars = 0 nPending = 0 - s.qm.metrics.pendingSamples.Sub(float64(max)) stop() timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline)) } case <-timer.C: - if nPending > 0 { - level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending samples", "samples", nPending, "shard", shardNum) - s.sendSamples(ctx, pendingSamples[:nPending], &buf) - s.qm.metrics.pendingSamples.Sub(float64(nPending)) + if nPendingSamples > 0 || nPendingExemplars > 0 { + level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum) + s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, &buf) + s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples)) + s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars)) + nPendingSamples = 0 + nPendingExemplars = 0 nPending = 0 } timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline)) @@ -972,23 +1127,24 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) { } } -func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, buf *[]byte) { +func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, buf *[]byte) { begin := time.Now() - err := s.sendSamplesWithBackoff(ctx, samples, buf) + err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, buf) if err != nil { - level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", len(samples), "err", err) - s.qm.metrics.failedSamplesTotal.Add(float64(len(samples))) + level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err) + s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount)) + s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount)) } // These counters are used to calculate the dynamic sharding, and as such // should be maintained irrespective of success or failure. - s.qm.samplesOut.incr(int64(len(samples))) - s.qm.samplesOutDuration.incr(int64(time.Since(begin))) + s.qm.dataOut.incr(int64(len(samples))) + s.qm.dataOutDuration.incr(int64(time.Since(begin))) s.qm.lastSendTimestamp.Store(time.Now().Unix()) } // sendSamples to the remote storage with backoff for recoverable errors. -func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, buf *[]byte) error { +func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, buf *[]byte) error { // Build the WriteRequest with no metadata. req, highest, err := buildWriteRequest(samples, nil, *buf) if err != nil { @@ -998,7 +1154,6 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti } reqSize := len(*buf) - sampleCount := len(samples) *buf = req // An anonymous function allows us to defer the completion of our per-try spans @@ -1009,6 +1164,9 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti defer span.Finish() span.SetTag("samples", sampleCount) + if exemplarCount > 0 { + span.SetTag("exemplars", exemplarCount) + } span.SetTag("request_size", reqSize) span.SetTag("try", try) span.SetTag("remote_name", s.qm.storeClient.Name()) @@ -1016,6 +1174,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti begin := time.Now() s.qm.metrics.samplesTotal.Add(float64(sampleCount)) + s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount)) err := s.qm.client().Store(ctx, *buf) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) @@ -1030,13 +1189,14 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti onRetry := func() { s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount)) + s.qm.metrics.retriedExemplarsTotal.Add(float64(exemplarCount)) } err = sendWriteRequestWithBackoff(ctx, s.qm.cfg, s.qm.logger, attemptStore, onRetry) if err != nil { return err } - s.qm.metrics.samplesBytesTotal.Add(float64(reqSize)) + s.qm.metrics.sentBytesTotal.Add(float64(reqSize)) s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000)) return nil } @@ -1096,10 +1256,13 @@ func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMetadata, buf []byte) ([]byte, int64, error) { var highest int64 for _, ts := range samples { - // At the moment we only ever append a TimeSeries with a single sample in it. - if ts.Samples[0].Timestamp > highest { + // At the moment we only ever append a TimeSeries with a single sample or exemplar in it. + if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest { highest = ts.Samples[0].Timestamp } + if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest { + highest = ts.Exemplars[0].Timestamp + } } req := &prompb.WriteRequest{ @@ -1121,11 +1284,18 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta return compressed, highest, nil } -func allocateTimeSeries(capacity int) []prompb.TimeSeries { - timeseries := make([]prompb.TimeSeries, capacity) - // We only ever send one sample per timeseries, so preallocate with length one. - for i := range timeseries { - timeseries[i].Samples = []prompb.Sample{{}} +func allocateSampleBuffer(capacity int) [][]prompb.Sample { + buf := make([][]prompb.Sample, capacity) + for i := range buf { + buf[i] = []prompb.Sample{{}} } - return timeseries + return buf +} + +func allocateExemplarBuffer(capacity int) [][]prompb.Exemplar { + buf := make([][]prompb.Exemplar, capacity) + for i := range buf { + buf[i] = []prompb.Exemplar{{}} + } + return buf } diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 0ac594ffa..4d8b40e98 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -60,21 +60,22 @@ func newHighestTimestampMetric() *maxTimestamp { } func TestSampleDelivery(t *testing.T) { + + testcases := []struct { + name string + samples bool + exemplars bool + }{ + {samples: true, exemplars: false, name: "samples only"}, + {samples: true, exemplars: true, name: "both samples and exemplars"}, + {samples: false, exemplars: true, name: "exemplars only"}, + } + // Let's create an even number of send batches so we don't run into the // batch timeout case. - n := config.DefaultQueueConfig.MaxSamplesPerSend * 2 - samples, series := createTimeseries(n, n) + n := 3 - c := NewTestWriteClient() - c.expectSamples(samples[:len(samples)/2], series) - - queueConfig := config.DefaultQueueConfig - queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond) - queueConfig.MaxShards = 1 - queueConfig.Capacity = len(samples) - queueConfig.MaxSamplesPerSend = len(samples) / 2 - - dir, err := ioutil.TempDir("", "TestSampleDeliver") + dir, err := ioutil.TempDir("", "TestSampleDelivery") require.NoError(t, err) defer func() { require.NoError(t, os.RemoveAll(dir)) @@ -83,13 +84,11 @@ func TestSampleDelivery(t *testing.T) { s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) defer s.Close() + queueConfig := config.DefaultQueueConfig + queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond) + queueConfig.MaxShards = 1 + writeConfig := config.DefaultRemoteWriteConfig - conf := &config.Config{ - GlobalConfig: config.DefaultGlobalConfig, - RemoteWriteConfigs: []*config.RemoteWriteConfig{ - &writeConfig, - }, - } // We need to set URL's so that metric creation doesn't panic. writeConfig.URL = &common_config.URL{ URL: &url.URL{ @@ -97,19 +96,60 @@ func TestSampleDelivery(t *testing.T) { }, } writeConfig.QueueConfig = queueConfig - require.NoError(t, s.ApplyConfig(conf)) - hash, err := toHash(writeConfig) - require.NoError(t, err) - qm := s.rws.queues[hash] - qm.SetClient(c) + writeConfig.SendExemplars = true - qm.StoreSeries(series, 0) + conf := &config.Config{ + GlobalConfig: config.DefaultGlobalConfig, + RemoteWriteConfigs: []*config.RemoteWriteConfig{ + &writeConfig, + }, + } - qm.Append(samples[:len(samples)/2]) - c.waitForExpectedSamples(t) - c.expectSamples(samples[len(samples)/2:], series) - qm.Append(samples[len(samples)/2:]) - c.waitForExpectedSamples(t) + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + + var ( + series []record.RefSeries + samples []record.RefSample + exemplars []record.RefExemplar + ) + + // Generates same series in both cases. + if tc.samples { + samples, series = createTimeseries(n, n) + } + if tc.exemplars { + exemplars, series = createExemplars(n, n) + } + + // Apply new config. + queueConfig.Capacity = len(samples) + queueConfig.MaxSamplesPerSend = len(samples) / 2 + require.NoError(t, s.ApplyConfig(conf)) + hash, err := toHash(writeConfig) + require.NoError(t, err) + qm := s.rws.queues[hash] + + c := NewTestWriteClient() + qm.SetClient(c) + + qm.StoreSeries(series, 0) + + // Send first half of data. + c.expectSamples(samples[:len(samples)/2], series) + c.expectExemplars(exemplars[:len(exemplars)/2], series) + qm.Append(samples[:len(samples)/2]) + qm.AppendExemplars(exemplars[:len(exemplars)/2]) + c.waitForExpectedData(t) + + // Send second half of data. + c.expectSamples(samples[len(samples)/2:], series) + c.expectExemplars(exemplars[len(exemplars)/2:], series) + qm.Append(samples[len(samples)/2:]) + qm.AppendExemplars(exemplars[len(exemplars)/2:]) + c.waitForExpectedData(t) + }) + } } func TestMetadataDelivery(t *testing.T) { @@ -123,7 +163,7 @@ func TestMetadataDelivery(t *testing.T) { mcfg := config.DefaultMetadataConfig metrics := newQueueManagerMetrics(nil, "", "") - m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) + m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.Start() defer m.Stop() @@ -157,7 +197,7 @@ func TestSampleDeliveryTimeout(t *testing.T) { }() metrics := newQueueManagerMetrics(nil, "", "") - m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) + m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.StoreSeries(series, 0) m.Start() defer m.Stop() @@ -165,11 +205,11 @@ func TestSampleDeliveryTimeout(t *testing.T) { // Send the samples twice, waiting for the samples in the meantime. c.expectSamples(samples, series) m.Append(samples) - c.waitForExpectedSamples(t) + c.waitForExpectedData(t) c.expectSamples(samples, series) m.Append(samples) - c.waitForExpectedSamples(t) + c.waitForExpectedData(t) } func TestSampleDeliveryOrder(t *testing.T) { @@ -203,14 +243,14 @@ func TestSampleDeliveryOrder(t *testing.T) { mcfg := config.DefaultMetadataConfig metrics := newQueueManagerMetrics(nil, "", "") - m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) + m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.StoreSeries(series, 0) m.Start() defer m.Stop() // These should be received by the client. m.Append(samples) - c.waitForExpectedSamples(t) + c.waitForExpectedData(t) } func TestShutdown(t *testing.T) { @@ -227,7 +267,7 @@ func TestShutdown(t *testing.T) { mcfg := config.DefaultMetadataConfig metrics := newQueueManagerMetrics(nil, "", "") - m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, deadline, newPool(), newHighestTimestampMetric(), nil) + m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false) n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend samples, series := createTimeseries(n, n) m.StoreSeries(series, 0) @@ -269,7 +309,7 @@ func TestSeriesReset(t *testing.T) { cfg := config.DefaultQueueConfig mcfg := config.DefaultMetadataConfig metrics := newQueueManagerMetrics(nil, "", "") - m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, deadline, newPool(), newHighestTimestampMetric(), nil) + m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false) for i := 0; i < numSegments; i++ { series := []record.RefSeries{} for j := 0; j < numSeries; j++ { @@ -302,7 +342,7 @@ func TestReshard(t *testing.T) { }() metrics := newQueueManagerMetrics(nil, "", "") - m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) + m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.StoreSeries(series, 0) m.Start() @@ -322,7 +362,7 @@ func TestReshard(t *testing.T) { time.Sleep(100 * time.Millisecond) } - c.waitForExpectedSamples(t) + c.waitForExpectedData(t) } func TestReshardRaceWithStop(t *testing.T) { @@ -337,7 +377,7 @@ func TestReshardRaceWithStop(t *testing.T) { go func() { for { metrics := newQueueManagerMetrics(nil, "", "") - m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) + m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.Start() h.Unlock() h.Lock() @@ -357,7 +397,7 @@ func TestReleaseNoninternedString(t *testing.T) { mcfg := config.DefaultMetadataConfig metrics := newQueueManagerMetrics(nil, "", "") c := NewTestWriteClient() - m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) + m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.Start() for i := 1; i < 1000; i++ { @@ -408,10 +448,10 @@ func TestShouldReshard(t *testing.T) { for _, c := range cases { metrics := newQueueManagerMetrics(nil, "", "") client := NewTestWriteClient() - m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) + m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.numShards = c.startingShards - m.samplesIn.incr(c.samplesIn) - m.samplesOut.incr(c.samplesOut) + m.dataIn.incr(c.samplesIn) + m.dataOut.incr(c.samplesOut) m.lastSendTimestamp.Store(c.lastSendTimestamp) m.Start() @@ -436,7 +476,6 @@ func createTimeseries(numSamples, numSeries int) ([]record.RefSample, []record.R T: int64(j), V: float64(i), }) - } series = append(series, record.RefSeries{ Ref: uint64(i), @@ -446,6 +485,28 @@ func createTimeseries(numSamples, numSeries int) ([]record.RefSample, []record.R return samples, series } +func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []record.RefSeries) { + exemplars := make([]record.RefExemplar, 0, numExemplars) + series := make([]record.RefSeries, 0, numSeries) + for i := 0; i < numSeries; i++ { + name := fmt.Sprintf("test_metric_%d", i) + for j := 0; j < numExemplars; j++ { + e := record.RefExemplar{ + Ref: uint64(i), + T: int64(j), + V: float64(i), + Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", i)), + } + exemplars = append(exemplars, e) + } + series = append(series, record.RefSeries{ + Ref: uint64(i), + Labels: labels.Labels{{Name: "__name__", Value: name}}, + }) + } + return exemplars, series +} + func getSeriesNameFromRef(r record.RefSeries) string { for _, l := range r.Labels { if l.Name == "__name__" { @@ -456,13 +517,15 @@ func getSeriesNameFromRef(r record.RefSeries) string { } type TestWriteClient struct { - receivedSamples map[string][]prompb.Sample - expectedSamples map[string][]prompb.Sample - receivedMetadata map[string][]prompb.MetricMetadata - withWaitGroup bool - wg sync.WaitGroup - mtx sync.Mutex - buf []byte + receivedSamples map[string][]prompb.Sample + expectedSamples map[string][]prompb.Sample + receivedExemplars map[string][]prompb.Exemplar + expectedExemplars map[string][]prompb.Exemplar + receivedMetadata map[string][]prompb.MetricMetadata + withWaitGroup bool + wg sync.WaitGroup + mtx sync.Mutex + buf []byte } func NewTestWriteClient() *TestWriteClient { @@ -494,7 +557,29 @@ func (c *TestWriteClient) expectSamples(ss []record.RefSample, series []record.R c.wg.Add(len(ss)) } -func (c *TestWriteClient) waitForExpectedSamples(tb testing.TB) { +func (c *TestWriteClient) expectExemplars(ss []record.RefExemplar, series []record.RefSeries) { + if !c.withWaitGroup { + return + } + c.mtx.Lock() + defer c.mtx.Unlock() + + c.expectedExemplars = map[string][]prompb.Exemplar{} + c.receivedExemplars = map[string][]prompb.Exemplar{} + + for _, s := range ss { + seriesName := getSeriesNameFromRef(series[s.Ref]) + e := prompb.Exemplar{ + Labels: labelsToLabelsProto(s.Labels, nil), + Timestamp: s.T, + Value: s.V, + } + c.expectedExemplars[seriesName] = append(c.expectedExemplars[seriesName], e) + } + c.wg.Add(len(ss)) +} + +func (c *TestWriteClient) waitForExpectedData(tb testing.TB) { if !c.withWaitGroup { return } @@ -504,9 +589,12 @@ func (c *TestWriteClient) waitForExpectedSamples(tb testing.TB) { for ts, expectedSamples := range c.expectedSamples { require.Equal(tb, expectedSamples, c.receivedSamples[ts], ts) } + for ts, expectedExemplar := range c.expectedExemplars { + require.Equal(tb, expectedExemplar, c.receivedExemplars[ts], ts) + } } -func (c *TestWriteClient) expectSampleCount(numSamples int) { +func (c *TestWriteClient) expectDataCount(numSamples int) { if !c.withWaitGroup { return } @@ -515,7 +603,7 @@ func (c *TestWriteClient) expectSampleCount(numSamples int) { c.wg.Add(numSamples) } -func (c *TestWriteClient) waitForExpectedSampleCount() { +func (c *TestWriteClient) waitForExpectedDataCount() { if !c.withWaitGroup { return } @@ -553,6 +641,11 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte) error { count++ c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample) } + + for _, ex := range ts.Exemplars { + count++ + c.receivedExemplars[seriesName] = append(c.receivedExemplars[seriesName], ex) + } } if c.withWaitGroup { c.wg.Add(-count) @@ -621,7 +714,7 @@ func BenchmarkSampleDelivery(b *testing.B) { defer os.RemoveAll(dir) metrics := newQueueManagerMetrics(nil, "", "") - m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) + m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) m.StoreSeries(series, 0) // These should be received by the client. @@ -630,9 +723,9 @@ func BenchmarkSampleDelivery(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - c.expectSampleCount(len(samples)) + c.expectDataCount(len(samples)) m.Append(samples) - c.waitForExpectedSampleCount() + c.waitForExpectedDataCount() } // Do not include shutdown b.StopTimer() @@ -667,7 +760,7 @@ func BenchmarkStartup(b *testing.B) { c := NewTestBlockedWriteClient() m := NewQueueManager(metrics, nil, nil, logger, dir, newEWMARate(ewmaWeight, shardUpdateDuration), - cfg, mcfg, nil, nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil) + cfg, mcfg, nil, nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false) m.watcher.SetStartTime(timestamp.Time(math.MaxInt64)) m.watcher.MaxSegment = segments[len(segments)-2] err := m.watcher.Run() @@ -719,7 +812,7 @@ func TestCalculateDesiredShards(t *testing.T) { metrics := newQueueManagerMetrics(nil, "", "") samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration) - m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) + m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false) // Need to start the queue manager so the proper metrics are initialized. // However we can stop it right away since we don't need to do any actual @@ -745,8 +838,8 @@ func TestCalculateDesiredShards(t *testing.T) { // helper function for sending samples. sendSamples := func(s int64, ts time.Duration) { pendingSamples -= s - m.samplesOut.incr(s) - m.samplesOutDuration.incr(int64(m.numShards) * int64(shardUpdateDuration)) + m.dataOut.incr(s) + m.dataOutDuration.incr(int64(m.numShards) * int64(shardUpdateDuration)) // highest sent is how far back pending samples would be at our input rate. highestSent := startedAt.Add(ts - time.Duration(pendingSamples/inputRate)*time.Second) diff --git a/storage/remote/write.go b/storage/remote/write.go index a9270630f..94d96295e 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -37,6 +37,12 @@ var ( Name: "samples_in_total", Help: "Samples in to remote storage, compare to samples out for queue managers.", }) + exemplarsIn = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "exemplars_in_total", + Help: "Exemplars in to remote storage, compare to exemplars out for queue managers.", + }) ) // WriteStorage represents all the remote write storage. @@ -169,6 +175,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { rws.interner, rws.highestTimestamp, rws.scraper, + rwConf.SendExemplars, ) // Keep track of which queues are new so we know which to start. newHashes = append(newHashes, hash) @@ -210,6 +217,7 @@ func (rws *WriteStorage) Close() error { type timestampTracker struct { writeStorage *WriteStorage samples int64 + exemplars int64 highestTimestamp int64 highestRecvTimestamp *maxTimestamp } @@ -224,14 +232,16 @@ func (t *timestampTracker) Append(_ uint64, _ labels.Labels, ts int64, _ float64 } func (t *timestampTracker) AppendExemplar(_ uint64, _ labels.Labels, _ exemplar.Exemplar) (uint64, error) { + t.exemplars++ return 0, nil } // Commit implements storage.Appender. func (t *timestampTracker) Commit() error { - t.writeStorage.samplesIn.incr(t.samples) + t.writeStorage.samplesIn.incr(t.samples + t.exemplars) samplesIn.Add(float64(t.samples)) + exemplarsIn.Add(float64(t.exemplars)) t.highestRecvTimestamp.Set(float64(t.highestTimestamp / 1000)) return nil } diff --git a/tsdb/docs/format/wal.md b/tsdb/docs/format/wal.md index af2ec6142..2d55c441b 100644 --- a/tsdb/docs/format/wal.md +++ b/tsdb/docs/format/wal.md @@ -86,3 +86,35 @@ and specify an interval for which samples of a series got deleted. │ . . . │ └─────────────────────────────────────────────────────┘ ``` + +### Exemplar records + +Exemplar records encode exemplars as a list of triples `(series_id, timestamp, value)` +plus the length of the labels list, and all the labels. +The first row stores the starting id and the starting timestamp. +Series reference and timestamp are encoded as deltas w.r.t the first exemplar. +The first exemplar record begins at the second row. + +See: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars + +``` +┌──────────────────────────────────────────────────────────────────┐ +│ type = 5 <1b> │ +├──────────────────────────────────────────────────────────────────┤ +│ ┌────────────────────┬───────────────────────────┐ │ +│ │ id <8b> │ timestamp <8b> │ │ +│ └────────────────────┴───────────────────────────┘ │ +│ ┌────────────────────┬───────────────────────────┬─────────────┐ │ +│ │ id_delta │ timestamp_delta │ value <8b> │ │ +│ ├────────────────────┴───────────────────────────┴─────────────┤ │ +│ │ n = len(labels) │ │ +│ ├──────────────────────┬───────────────────────────────────────┤ │ +│ │ len(str_1) │ str_1 │ │ +│ ├──────────────────────┴───────────────────────────────────────┤ │ +│ │ ... │ │ +│ ├───────────────────────┬──────────────────────────────────────┤ │ +│ │ len(str_2n) │ str_2n │ │ │ +│ └───────────────────────┴────────────────┴─────────────────────┘ │ +│ . . . │ +└──────────────────────────────────────────────────────────────────┘ +``` diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index a7d3182ac..6332c1fe3 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -17,6 +17,7 @@ import ( "context" "sort" "sync" + "unicode/utf8" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/pkg/exemplar" @@ -83,7 +84,7 @@ func NewCircularExemplarStorage(len int, reg prometheus.Registerer) (ExemplarSto }), outOfOrderExemplars: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_exemplar_out_of_order_exemplars_total", - Help: "Total number of out of order exemplar ingestion failed attempts", + Help: "Total number of out of order exemplar ingestion failed attempts.", }), } if reg != nil { @@ -165,6 +166,51 @@ Outer: return false } +func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar.Exemplar) error { + seriesLabels := l.String() + + // TODO(bwplotka): This lock can lock all scrapers, there might high contention on this on scale. + // Optimize by moving the lock to be per series (& benchmark it). + ce.lock.RLock() + defer ce.lock.RUnlock() + return ce.validateExemplar(seriesLabels, e, false) +} + +// Not thread safe. The append parameters tells us whether this is an external validation, or interal +// as a reuslt of an AddExemplar call, in which case we should update any relevant metrics. +func (ce *CircularExemplarStorage) validateExemplar(l string, e exemplar.Exemplar, append bool) error { + idx, ok := ce.index[l] + if !ok { + return nil + } + + // Exemplar label length does not include chars involved in text rendering such as quotes + // equals sign, or commas. See definiton of const ExemplarMaxLabelLength. + labelSetLen := 0 + for _, l := range e.Labels { + labelSetLen += utf8.RuneCountInString(l.Name) + labelSetLen += utf8.RuneCountInString(l.Value) + + if labelSetLen > exemplar.ExemplarMaxLabelSetLength { + return storage.ErrExemplarLabelLength + } + } + + // Check for duplicate vs last stored exemplar for this series. + // NB these are expected, and appending them is a no-op. + if ce.exemplars[idx.newest].exemplar.Equals(e) { + return storage.ErrDuplicateExemplar + } + + if e.Ts <= ce.exemplars[idx.newest].exemplar.Ts { + if append { + ce.outOfOrderExemplars.Inc() + } + return storage.ErrOutOfOrderExemplar + } + return nil +} + func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error { seriesLabels := l.String() @@ -173,21 +219,19 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp ce.lock.Lock() defer ce.lock.Unlock() - idx, ok := ce.index[seriesLabels] + err := ce.validateExemplar(seriesLabels, e, true) + if err != nil { + if err == storage.ErrDuplicateExemplar { + // Duplicate exemplar, noop. + return nil + } + return err + } + + _, ok := ce.index[seriesLabels] if !ok { ce.index[seriesLabels] = &indexEntry{oldest: ce.nextIndex, seriesLabels: l} } else { - // Check for duplicate vs last stored exemplar for this series. - // NB these are expected, add appending them is a no-op. - if ce.exemplars[idx.newest].exemplar.Equals(e) { - return nil - } - - if e.Ts <= ce.exemplars[idx.newest].exemplar.Ts { - ce.outOfOrderExemplars.Inc() - return storage.ErrOutOfOrderExemplar - } - ce.exemplars[ce.index[seriesLabels].newest].next = ce.nextIndex } @@ -218,13 +262,13 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp ce.seriesWithExemplarsInStorage.Set(float64(len(ce.index))) if next := ce.exemplars[ce.nextIndex]; next != nil { ce.exemplarsInStorage.Set(float64(len(ce.exemplars))) - ce.lastExemplarsTs.Set(float64(next.exemplar.Ts)) + ce.lastExemplarsTs.Set(float64(next.exemplar.Ts) / 1000) return nil } // We did not yet fill the buffer. ce.exemplarsInStorage.Set(float64(ce.nextIndex)) - ce.lastExemplarsTs.Set(float64(ce.exemplars[0].exemplar.Ts)) + ce.lastExemplarsTs.Set(float64(ce.exemplars[0].exemplar.Ts) / 1000) return nil } @@ -234,6 +278,10 @@ func (noopExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) err return nil } +func (noopExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar.Exemplar) error { + return nil +} + func (noopExemplarStorage) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) { return &noopExemplarQuerier{}, nil } diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go index 1c1780d22..cd6983ffb 100644 --- a/tsdb/exemplar_test.go +++ b/tsdb/exemplar_test.go @@ -16,6 +16,7 @@ package tsdb import ( "reflect" "strconv" + "strings" "testing" "github.com/stretchr/testify/require" @@ -25,6 +26,66 @@ import ( "github.com/prometheus/prometheus/storage" ) +// Tests the same exemplar cases as AddExemplar, but specfically the ValidateExemplar function so it can be relied on externally. +func TestValidateExemplar(t *testing.T) { + exs, err := NewCircularExemplarStorage(2, nil) + require.NoError(t, err) + es := exs.(*CircularExemplarStorage) + + l := labels.Labels{ + {Name: "service", Value: "asdf"}, + } + e := exemplar.Exemplar{ + Labels: labels.Labels{ + labels.Label{ + Name: "traceID", + Value: "qwerty", + }, + }, + Value: 0.1, + Ts: 1, + } + + require.NoError(t, es.ValidateExemplar(l, e)) + require.NoError(t, es.AddExemplar(l, e)) + + e2 := exemplar.Exemplar{ + Labels: labels.Labels{ + labels.Label{ + Name: "traceID", + Value: "zxcvb", + }, + }, + Value: 0.1, + Ts: 2, + } + + require.NoError(t, es.ValidateExemplar(l, e2)) + require.NoError(t, es.AddExemplar(l, e2)) + + require.Equal(t, es.ValidateExemplar(l, e2), storage.ErrDuplicateExemplar, "error is expected attempting to validate duplicate exemplar") + + e3 := e2 + e3.Ts = 3 + require.Equal(t, es.ValidateExemplar(l, e3), storage.ErrDuplicateExemplar, "error is expected when attempting to add duplicate exemplar, even with different timestamp") + + e3.Ts = 1 + e3.Value = 0.3 + require.Equal(t, es.ValidateExemplar(l, e3), storage.ErrOutOfOrderExemplar) + + e4 := exemplar.Exemplar{ + Labels: labels.Labels{ + labels.Label{ + Name: "a", + Value: strings.Repeat("b", exemplar.ExemplarMaxLabelSetLength), + }, + }, + Value: 0.1, + Ts: 2, + } + require.Equal(t, storage.ErrExemplarLabelLength, es.ValidateExemplar(l, e4)) +} + func TestAddExemplar(t *testing.T) { exs, err := NewCircularExemplarStorage(2, nil) require.NoError(t, err) @@ -44,8 +105,7 @@ func TestAddExemplar(t *testing.T) { Ts: 1, } - err = es.AddExemplar(l, e) - require.NoError(t, err) + require.NoError(t, es.AddExemplar(l, e)) require.Equal(t, es.index[l.String()].newest, 0, "exemplar was not stored correctly") e2 := exemplar.Exemplar{ @@ -59,23 +119,31 @@ func TestAddExemplar(t *testing.T) { Ts: 2, } - err = es.AddExemplar(l, e2) - require.NoError(t, err) + require.NoError(t, es.AddExemplar(l, e2)) require.Equal(t, es.index[l.String()].newest, 1, "exemplar was not stored correctly, location of newest exemplar for series in index did not update") require.True(t, es.exemplars[es.index[l.String()].newest].exemplar.Equals(e2), "exemplar was not stored correctly, expected %+v got: %+v", e2, es.exemplars[es.index[l.String()].newest].exemplar) - err = es.AddExemplar(l, e2) - require.NoError(t, err, "no error is expected attempting to add duplicate exemplar") + require.NoError(t, es.AddExemplar(l, e2), "no error is expected attempting to add duplicate exemplar") e3 := e2 e3.Ts = 3 - err = es.AddExemplar(l, e3) - require.NoError(t, err, "no error is expected when attempting to add duplicate exemplar, even with different timestamp") + require.NoError(t, es.AddExemplar(l, e3), "no error is expected when attempting to add duplicate exemplar, even with different timestamp") e3.Ts = 1 e3.Value = 0.3 - err = es.AddExemplar(l, e3) - require.Equal(t, err, storage.ErrOutOfOrderExemplar) + require.Equal(t, storage.ErrOutOfOrderExemplar, es.AddExemplar(l, e3)) + + e4 := exemplar.Exemplar{ + Labels: labels.Labels{ + labels.Label{ + Name: "a", + Value: strings.Repeat("b", exemplar.ExemplarMaxLabelSetLength), + }, + }, + Value: 0.1, + Ts: 2, + } + require.Equal(t, storage.ErrExemplarLabelLength, es.AddExemplar(l, e4)) } func TestStorageOverflow(t *testing.T) { diff --git a/tsdb/head.go b/tsdb/head.go index 595cd48d7..f98e21ebe 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -58,6 +58,7 @@ var ( type ExemplarStorage interface { storage.ExemplarQueryable AddExemplar(labels.Labels, exemplar.Exemplar) error + ValidateExemplar(labels.Labels, exemplar.Exemplar) error } // Head handles reads and writes of time series data within a time window. @@ -459,15 +460,17 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks // Track number of samples that referenced a series we don't know about // for error reporting. var unknownRefs atomic.Uint64 + var unknownExemplarRefs atomic.Uint64 // Start workers that each process samples for a partition of the series ID space. // They are connected through a ring of channels which ensures that all sample batches // read from the WAL are processed in order. var ( - wg sync.WaitGroup - n = runtime.GOMAXPROCS(0) - inputs = make([]chan []record.RefSample, n) - outputs = make([]chan []record.RefSample, n) + wg sync.WaitGroup + n = runtime.GOMAXPROCS(0) + inputs = make([]chan []record.RefSample, n) + outputs = make([]chan []record.RefSample, n) + exemplarsInput chan record.RefExemplar dec record.Decoder shards = make([][]record.RefSample, n) @@ -489,6 +492,11 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks return []tombstones.Stone{} }, } + exemplarsPool = sync.Pool{ + New: func() interface{} { + return []record.RefExemplar{} + }, + } ) defer func() { @@ -500,6 +508,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks for range outputs[i] { } } + close(exemplarsInput) wg.Wait() } }() @@ -516,6 +525,29 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks }(inputs[i], outputs[i]) } + wg.Add(1) + exemplarsInput = make(chan record.RefExemplar, 300) + go func(input <-chan record.RefExemplar) { + defer wg.Done() + for e := range input { + ms := h.series.getByID(e.Ref) + if ms == nil { + unknownExemplarRefs.Inc() + continue + } + + if e.T < h.minValidTime.Load() { + continue + } + // At the moment the only possible error here is out of order exemplars, which we shouldn't see when + // replaying the WAL, so lets just log the error if it's not that type. + err = h.exemplars.AddExemplar(ms.lset, exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels}) + if err != nil && err == storage.ErrOutOfOrderExemplar { + level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err) + } + } + }(exemplarsInput) + go func() { defer close(decoded) for r.Next() { @@ -557,6 +589,18 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks return } decoded <- tstones + case record.Exemplars: + exemplars := exemplarsPool.Get().([]record.RefExemplar)[:0] + exemplars, err = dec.Exemplars(rec, exemplars) + if err != nil { + decodeErr = &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode exemplars"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- exemplars default: // Noop. } @@ -646,6 +690,12 @@ Outer: } //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. tstonesPool.Put(v) + case []record.RefExemplar: + for _, e := range v { + exemplarsInput <- e + } + //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. + exemplarsPool.Put(v) default: panic(fmt.Errorf("unexpected decoded type: %T", d)) } @@ -667,14 +717,15 @@ Outer: for range outputs[i] { } } + close(exemplarsInput) wg.Wait() if r.Err() != nil { return errors.Wrap(r.Err(), "read records") } - if unknownRefs.Load() > 0 { - level.Warn(h.logger).Log("msg", "Unknown series references", "count", unknownRefs.Load()) + if unknownRefs.Load() > 0 || unknownExemplarRefs.Load() > 0 { + level.Warn(h.logger).Log("msg", "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load()) } return nil } @@ -1339,6 +1390,15 @@ func (a *headAppender) AppendExemplar(ref uint64, _ labels.Labels, e exemplar.Ex // Ensure no empty labels have gotten through. e.Labels = e.Labels.WithoutEmpty() + err := a.exemplarAppender.ValidateExemplar(s.lset, e) + if err != nil { + if err == storage.ErrDuplicateExemplar { + // Duplicate, don't return an error but don't accept the exemplar. + return 0, nil + } + return 0, err + } + a.exemplars = append(a.exemplars, exemplarWithSeriesRef{ref, e}) return s.ref, nil @@ -1382,14 +1442,36 @@ func (a *headAppender) log() error { return errors.Wrap(err, "log samples") } } + if len(a.exemplars) > 0 { + rec = enc.Exemplars(exemplarsForEncoding(a.exemplars), buf) + buf = rec[:0] + + if err := a.head.wal.Log(rec); err != nil { + return errors.Wrap(err, "log exemplars") + } + } return nil } +func exemplarsForEncoding(es []exemplarWithSeriesRef) []record.RefExemplar { + ret := make([]record.RefExemplar, 0, len(es)) + for _, e := range es { + ret = append(ret, record.RefExemplar{ + Ref: e.ref, + T: e.exemplar.Ts, + V: e.exemplar.Value, + Labels: e.exemplar.Labels, + }) + } + return ret +} + func (a *headAppender) Commit() (err error) { if a.closed { return ErrAppenderClosed } defer func() { a.closed = true }() + if err := a.log(); err != nil { _ = a.Rollback() // Most likely the same error will happen again. return errors.Wrap(err, "write to WAL") @@ -1404,7 +1486,6 @@ func (a *headAppender) Commit() (err error) { continue } level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err) - continue } } @@ -1458,7 +1539,9 @@ func (a *headAppender) Rollback() (err error) { series.Unlock() } a.head.putAppendBuffer(a.samples) + a.head.putExemplarBuffer(a.exemplars) a.samples = nil + a.exemplars = nil // Series are created in the head memory regardless of rollback. Thus we have // to log them to the WAL in any case. diff --git a/tsdb/head_test.go b/tsdb/head_test.go index dfd916b76..2afbc2081 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -51,6 +51,7 @@ func newTestHead(t testing.TB, chunkRange int64, compressWAL bool) (*Head, *wal. opts := DefaultHeadOptions() opts.ChunkRange = chunkRange opts.ChunkDirRoot = dir + opts.NumExemplars = 10 h, err := NewHead(nil, nil, wlog, opts) require.NoError(t, err) @@ -87,6 +88,8 @@ func populateTestWAL(t testing.TB, w *wal.WAL, recs []interface{}) { require.NoError(t, w.Log(enc.Samples(v, nil))) case []tombstones.Stone: require.NoError(t, w.Log(enc.Tombstones(v, nil))) + case []record.RefExemplar: + require.NoError(t, w.Log(enc.Exemplars(v, nil))) } } } @@ -148,61 +151,91 @@ func BenchmarkLoadWAL(b *testing.B) { } labelsPerSeries := 5 + // Rough estimates of most common % of samples that have an exemplar for each scrape. + exemplarsPercentages := []float64{0, 0.5, 1, 5} + lastExemplarsPerSeries := -1 for _, c := range cases { - b.Run(fmt.Sprintf("batches=%d,seriesPerBatch=%d,samplesPerSeries=%d", c.batches, c.seriesPerBatch, c.samplesPerSeries), - func(b *testing.B) { - dir, err := ioutil.TempDir("", "test_load_wal") - require.NoError(b, err) - defer func() { - require.NoError(b, os.RemoveAll(dir)) - }() - - w, err := wal.New(nil, nil, dir, false) - require.NoError(b, err) - - // Write series. - refSeries := make([]record.RefSeries, 0, c.seriesPerBatch) - for k := 0; k < c.batches; k++ { - refSeries = refSeries[:0] - for i := k * c.seriesPerBatch; i < (k+1)*c.seriesPerBatch; i++ { - lbls := make(map[string]string, labelsPerSeries) - lbls[defaultLabelName] = strconv.Itoa(i) - for j := 1; len(lbls) < labelsPerSeries; j++ { - lbls[defaultLabelName+strconv.Itoa(j)] = defaultLabelValue + strconv.Itoa(j) - } - refSeries = append(refSeries, record.RefSeries{Ref: uint64(i) * 100, Labels: labels.FromMap(lbls)}) - } - populateTestWAL(b, w, []interface{}{refSeries}) - } - - // Write samples. - refSamples := make([]record.RefSample, 0, c.seriesPerBatch) - for i := 0; i < c.samplesPerSeries; i++ { - for j := 0; j < c.batches; j++ { - refSamples = refSamples[:0] - for k := j * c.seriesPerBatch; k < (j+1)*c.seriesPerBatch; k++ { - refSamples = append(refSamples, record.RefSample{ - Ref: uint64(k) * 100, - T: int64(i) * 10, - V: float64(i) * 100, - }) - } - populateTestWAL(b, w, []interface{}{refSamples}) - } - } - - b.ResetTimer() - - // Load the WAL. - for i := 0; i < b.N; i++ { - opts := DefaultHeadOptions() - opts.ChunkRange = 1000 - opts.ChunkDirRoot = w.Dir() - h, err := NewHead(nil, nil, w, opts) + for _, p := range exemplarsPercentages { + exemplarsPerSeries := int(math.RoundToEven(float64(c.samplesPerSeries) * p / 100)) + // For tests with low samplesPerSeries we could end up testing with 0 exemplarsPerSeries + // multiple times without this check. + if exemplarsPerSeries == lastExemplarsPerSeries { + continue + } + lastExemplarsPerSeries = exemplarsPerSeries + // fmt.Println("exemplars per series: ", exemplarsPerSeries) + b.Run(fmt.Sprintf("batches=%d,seriesPerBatch=%d,samplesPerSeries=%d,exemplarsPerSeries=%d", c.batches, c.seriesPerBatch, c.samplesPerSeries, exemplarsPerSeries), + func(b *testing.B) { + dir, err := ioutil.TempDir("", "test_load_wal") require.NoError(b, err) - h.Init(0) - } - }) + defer func() { + require.NoError(b, os.RemoveAll(dir)) + }() + + w, err := wal.New(nil, nil, dir, false) + require.NoError(b, err) + + // Write series. + refSeries := make([]record.RefSeries, 0, c.seriesPerBatch) + for k := 0; k < c.batches; k++ { + refSeries = refSeries[:0] + for i := k * c.seriesPerBatch; i < (k+1)*c.seriesPerBatch; i++ { + lbls := make(map[string]string, labelsPerSeries) + lbls[defaultLabelName] = strconv.Itoa(i) + for j := 1; len(lbls) < labelsPerSeries; j++ { + lbls[defaultLabelName+strconv.Itoa(j)] = defaultLabelValue + strconv.Itoa(j) + } + refSeries = append(refSeries, record.RefSeries{Ref: uint64(i) * 100, Labels: labels.FromMap(lbls)}) + } + populateTestWAL(b, w, []interface{}{refSeries}) + } + + // Write samples. + refSamples := make([]record.RefSample, 0, c.seriesPerBatch) + for i := 0; i < c.samplesPerSeries; i++ { + for j := 0; j < c.batches; j++ { + refSamples = refSamples[:0] + for k := j * c.seriesPerBatch; k < (j+1)*c.seriesPerBatch; k++ { + refSamples = append(refSamples, record.RefSample{ + Ref: uint64(k) * 100, + T: int64(i) * 10, + V: float64(i) * 100, + }) + } + populateTestWAL(b, w, []interface{}{refSamples}) + } + } + + // Write samples. + refExemplars := make([]record.RefExemplar, 0, c.seriesPerBatch) + for i := 0; i < exemplarsPerSeries; i++ { + for j := 0; j < c.batches; j++ { + refExemplars = refExemplars[:0] + for k := j * c.seriesPerBatch; k < (j+1)*c.seriesPerBatch; k++ { + refExemplars = append(refExemplars, record.RefExemplar{ + Ref: uint64(k) * 100, + T: int64(i) * 10, + V: float64(i) * 100, + Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", i)), + }) + } + populateTestWAL(b, w, []interface{}{refExemplars}) + } + } + + b.ResetTimer() + + // Load the WAL. + for i := 0; i < b.N; i++ { + opts := DefaultHeadOptions() + opts.ChunkRange = 1000 + opts.ChunkDirRoot = w.Dir() + h, err := NewHead(nil, nil, w, opts) + require.NoError(b, err) + h.Init(0) + } + }) + } } } @@ -233,6 +266,9 @@ func TestHead_ReadWAL(t *testing.T) { []tombstones.Stone{ {Ref: 0, Intervals: []tombstones.Interval{{Mint: 99, Maxt: 101}}}, }, + []record.RefExemplar{ + {Ref: 10, T: 100, V: 1, Labels: labels.FromStrings("traceID", "asdf")}, + }, } head, w := newTestHead(t, 1000, compress) @@ -266,6 +302,12 @@ func TestHead_ReadWAL(t *testing.T) { require.Equal(t, []sample{{100, 2}, {101, 5}}, expandChunk(s10.iterator(0, nil, head.chunkDiskMapper, nil))) require.Equal(t, []sample{{101, 6}}, expandChunk(s50.iterator(0, nil, head.chunkDiskMapper, nil))) require.Equal(t, []sample{{100, 3}, {101, 7}}, expandChunk(s100.iterator(0, nil, head.chunkDiskMapper, nil))) + + q, err := head.ExemplarQuerier(context.Background()) + require.NoError(t, err) + e, err := q.Select(0, 1000, []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "1")}) + require.NoError(t, err) + require.Equal(t, e[0].Exemplars[0], exemplar.Exemplar{Ts: 100, Value: 1, Labels: labels.FromStrings("traceID", "asdf")}) }) } } diff --git a/tsdb/record/record.go b/tsdb/record/record.go index 408882e83..b4ee77f0f 100644 --- a/tsdb/record/record.go +++ b/tsdb/record/record.go @@ -37,6 +37,8 @@ const ( Samples Type = 2 // Tombstones is used to match WAL records of type Tombstones. Tombstones Type = 3 + // Exemplars is used to match WAL records of type Exemplars. + Exemplars Type = 4 ) var ( @@ -57,6 +59,14 @@ type RefSample struct { V float64 } +// RefExemplar is an exemplar with it's labels, timestamp, value the exemplar was collected/observed with, and a reference to a series. +type RefExemplar struct { + Ref uint64 + T int64 + V float64 + Labels labels.Labels +} + // Decoder decodes series, sample, and tombstone records. // The zero value is ready to use. type Decoder struct { @@ -69,7 +79,7 @@ func (d *Decoder) Type(rec []byte) Type { return Unknown } switch t := Type(rec[0]); t { - case Series, Samples, Tombstones: + case Series, Samples, Tombstones, Exemplars: return t } return Unknown @@ -166,6 +176,48 @@ func (d *Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombston return tstones, nil } +func (d *Decoder) Exemplars(rec []byte, exemplars []RefExemplar) ([]RefExemplar, error) { + dec := encoding.Decbuf{B: rec} + t := Type(dec.Byte()) + if t != Exemplars { + return nil, errors.New("invalid record type") + } + if dec.Len() == 0 { + return exemplars, nil + } + var ( + baseRef = dec.Be64() + baseTime = dec.Be64int64() + ) + for len(dec.B) > 0 && dec.Err() == nil { + dref := dec.Varint64() + dtime := dec.Varint64() + val := dec.Be64() + + lset := make(labels.Labels, dec.Uvarint()) + for i := range lset { + lset[i].Name = dec.UvarintStr() + lset[i].Value = dec.UvarintStr() + } + sort.Sort(lset) + + exemplars = append(exemplars, RefExemplar{ + Ref: baseRef + uint64(dref), + T: baseTime + dtime, + V: math.Float64frombits(val), + Labels: lset, + }) + } + + if dec.Err() != nil { + return nil, errors.Wrapf(dec.Err(), "decode error after %d exemplars", len(exemplars)) + } + if len(dec.B) > 0 { + return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return exemplars, nil +} + // Encoder encodes series, sample, and tombstones records. // The zero value is ready to use. type Encoder struct { @@ -226,3 +278,33 @@ func (e *Encoder) Tombstones(tstones []tombstones.Stone, b []byte) []byte { } return buf.Get() } + +func (e *Encoder) Exemplars(exemplars []RefExemplar, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(Exemplars)) + + if len(exemplars) == 0 { + return buf.Get() + } + + // Store base timestamp and base reference number of first sample. + // All samples encode their timestamp and ref as delta to those. + first := exemplars[0] + + buf.PutBE64(first.Ref) + buf.PutBE64int64(first.T) + + for _, ex := range exemplars { + buf.PutVarint64(int64(ex.Ref) - int64(first.Ref)) + buf.PutVarint64(ex.T - first.T) + buf.PutBE64(math.Float64bits(ex.V)) + + buf.PutUvarint(len(ex.Labels)) + for _, l := range ex.Labels { + buf.PutUvarintStr(l.Name) + buf.PutUvarintStr(l.Value) + } + } + + return buf.Get() +} diff --git a/tsdb/record/record_test.go b/tsdb/record/record_test.go index 73f35478e..f69989d50 100644 --- a/tsdb/record/record_test.go +++ b/tsdb/record/record_test.go @@ -74,6 +74,15 @@ func TestRecord_EncodeDecode(t *testing.T) { {Ref: 13, Intervals: tombstones.Intervals{{Mint: -1000, Maxt: -11}}}, {Ref: 13, Intervals: tombstones.Intervals{{Mint: 5000, Maxt: 1000}}}, }, decTstones) + + exemplars := []RefExemplar{ + {Ref: 0, T: 12423423, V: 1.2345, Labels: labels.FromStrings("traceID", "qwerty")}, + {Ref: 123, T: -1231, V: -123, Labels: labels.FromStrings("traceID", "asdf")}, + {Ref: 2, T: 0, V: 99999, Labels: labels.FromStrings("traceID", "zxcv")}, + } + decExemplars, err := dec.Exemplars(enc.Exemplars(exemplars, nil), nil) + require.NoError(t, err) + require.Equal(t, exemplars, decExemplars) } // TestRecord_Corrupted ensures that corrupted records return the correct error. @@ -117,6 +126,16 @@ func TestRecord_Corrupted(t *testing.T) { _, err := dec.Tombstones(corrupted, nil) require.Equal(t, err, encoding.ErrInvalidSize) }) + + t.Run("Test corrupted exemplar record", func(t *testing.T) { + exemplars := []RefExemplar{ + {Ref: 0, T: 12423423, V: 1.2345, Labels: labels.FromStrings("traceID", "asdf")}, + } + + corrupted := enc.Exemplars(exemplars, nil)[:8] + _, err := dec.Exemplars(corrupted, nil) + require.Equal(t, errors.Cause(err), encoding.ErrInvalidSize) + }) } func TestRecord_Type(t *testing.T) { diff --git a/tsdb/wal/checkpoint.go b/tsdb/wal/checkpoint.go index a264e1e95..1f7681670 100644 --- a/tsdb/wal/checkpoint.go +++ b/tsdb/wal/checkpoint.go @@ -40,9 +40,11 @@ type CheckpointStats struct { DroppedSeries int DroppedSamples int DroppedTombstones int + DroppedExemplars int TotalSeries int // Processed series including dropped ones. TotalSamples int // Processed samples including dropped ones. TotalTombstones int // Processed tombstones including dropped ones. + TotalExemplars int // Processed exemplars including dropped ones. } // LastCheckpoint returns the directory name and index of the most recent checkpoint. @@ -144,16 +146,17 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id uint64) bo r := NewReader(sgmReader) var ( - series []record.RefSeries - samples []record.RefSample - tstones []tombstones.Stone - dec record.Decoder - enc record.Encoder - buf []byte - recs [][]byte + series []record.RefSeries + samples []record.RefSample + tstones []tombstones.Stone + exemplars []record.RefExemplar + dec record.Decoder + enc record.Encoder + buf []byte + recs [][]byte ) for r.Next() { - series, samples, tstones = series[:0], samples[:0], tstones[:0] + series, samples, tstones, exemplars = series[:0], samples[:0], tstones[:0], exemplars[:0] // We don't reset the buffer since we batch up multiple records // before writing them to the checkpoint. @@ -219,6 +222,23 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id uint64) bo stats.TotalTombstones += len(tstones) stats.DroppedTombstones += len(tstones) - len(repl) + case record.Exemplars: + exemplars, err = dec.Exemplars(rec, exemplars) + if err != nil { + return nil, errors.Wrap(err, "decode exemplars") + } + // Drop irrelevant exemplars in place. + repl := exemplars[:0] + for _, e := range exemplars { + if e.T >= mint { + repl = append(repl, e) + } + } + if len(repl) > 0 { + buf = enc.Exemplars(repl, buf) + } + stats.TotalExemplars += len(exemplars) + stats.DroppedExemplars += len(exemplars) - len(repl) default: // Unknown record type, probably from a future Prometheus version. continue diff --git a/tsdb/wal/checkpoint_test.go b/tsdb/wal/checkpoint_test.go index 034d95917..c843aa12c 100644 --- a/tsdb/wal/checkpoint_test.go +++ b/tsdb/wal/checkpoint_test.go @@ -177,6 +177,11 @@ func TestCheckpoint(t *testing.T) { }, nil) require.NoError(t, w.Log(b)) + b = enc.Exemplars([]record.RefExemplar{ + {Ref: 1, T: last, V: float64(i), Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", i))}, + }, nil) + require.NoError(t, w.Log(b)) + last += 100 } require.NoError(t, w.Close()) @@ -215,6 +220,12 @@ func TestCheckpoint(t *testing.T) { for _, s := range samples { require.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp") } + case record.Exemplars: + exemplars, err := dec.Exemplars(rec, nil) + require.NoError(t, err) + for _, e := range exemplars { + require.GreaterOrEqual(t, e.T, last/2, "exemplar with wrong timestamp") + } } } require.NoError(t, r.Err()) diff --git a/tsdb/wal/watcher.go b/tsdb/wal/watcher.go index 8670567f1..5deb608c3 100644 --- a/tsdb/wal/watcher.go +++ b/tsdb/wal/watcher.go @@ -46,6 +46,7 @@ const ( // and it is left to the implementer to make sure they are safe. type WriteTo interface { Append([]record.RefSample) bool + AppendExemplars([]record.RefExemplar) bool StoreSeries([]record.RefSeries, int) // SeriesReset is called after reading a checkpoint to allow the deletion // of all series created in a segment lower than the argument. @@ -66,6 +67,7 @@ type Watcher struct { logger log.Logger walDir string lastCheckpoint string + sendExemplars bool metrics *WatcherMetrics readerMetrics *LiveReaderMetrics @@ -136,7 +138,7 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics { } // NewWatcher creates a new WAL watcher for a given WriteTo. -func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, walDir string) *Watcher { +func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, walDir string, sendExemplars bool) *Watcher { if logger == nil { logger = log.NewNopLogger() } @@ -147,8 +149,10 @@ func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logge readerMetrics: readerMetrics, walDir: path.Join(walDir, "wal"), name: name, - quit: make(chan struct{}), - done: make(chan struct{}), + sendExemplars: sendExemplars, + + quit: make(chan struct{}), + done: make(chan struct{}), MaxSegment: -1, } @@ -462,10 +466,11 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { var ( - dec record.Decoder - series []record.RefSeries - samples []record.RefSample - send []record.RefSample + dec record.Decoder + series []record.RefSeries + samples []record.RefSample + send []record.RefSample + exemplars []record.RefExemplar ) for r.Next() && !isClosed(w.quit) { rec := r.Record() @@ -507,6 +512,23 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { send = send[:0] } + case record.Exemplars: + // Skip if experimental "exemplars over remote write" is not enabled. + if !w.sendExemplars { + break + } + // If we're not tailing a segment we can ignore any exemplars records we see. + // This speeds up replay of the WAL significantly. + if !tail { + break + } + exemplars, err := dec.Exemplars(rec, exemplars[:0]) + if err != nil { + w.recordDecodeFailsMetric.Inc() + return err + } + w.writer.AppendExemplars(exemplars) + case record.Tombstones: default: diff --git a/tsdb/wal/watcher_test.go b/tsdb/wal/watcher_test.go index 162752080..03234c5db 100644 --- a/tsdb/wal/watcher_test.go +++ b/tsdb/wal/watcher_test.go @@ -50,6 +50,7 @@ func retry(t *testing.T, interval time.Duration, n int, f func() bool) { type writeToMock struct { samplesAppended int + exemplarsAppended int seriesLock sync.Mutex seriesSegmentIndexes map[uint64]int } @@ -59,6 +60,11 @@ func (wtm *writeToMock) Append(s []record.RefSample) bool { return true } +func (wtm *writeToMock) AppendExemplars(e []record.RefExemplar) bool { + wtm.exemplarsAppended += len(e) + return true +} + func (wtm *writeToMock) StoreSeries(series []record.RefSeries, index int) { wtm.seriesLock.Lock() defer wtm.seriesLock.Unlock() @@ -95,6 +101,7 @@ func TestTailSamples(t *testing.T) { pageSize := 32 * 1024 const seriesCount = 10 const samplesCount = 250 + const exemplarsCount = 25 for _, compress := range []bool{false, true} { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { now := time.Now() @@ -138,6 +145,19 @@ func TestTailSamples(t *testing.T) { }, nil) require.NoError(t, w.Log(sample)) } + + for j := 0; j < exemplarsCount; j++ { + inner := rand.Intn(ref + 1) + exemplar := enc.Exemplars([]record.RefExemplar{ + { + Ref: uint64(inner), + T: now.UnixNano() + 1, + V: float64(i), + Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", inner)), + }, + }, nil) + require.NoError(t, w.Log(exemplar)) + } } // Start read after checkpoint, no more data written. @@ -145,7 +165,7 @@ func TestTailSamples(t *testing.T) { require.NoError(t, err) wt := newWriteToMock() - watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir) + watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, true) watcher.SetStartTime(now) // Set the Watcher's metrics so they're not nil pointers. @@ -162,11 +182,13 @@ func TestTailSamples(t *testing.T) { expectedSeries := seriesCount expectedSamples := seriesCount * samplesCount + expectedExemplars := seriesCount * exemplarsCount retry(t, defaultRetryInterval, defaultRetries, func() bool { return wt.checkNumLabels() >= expectedSeries }) - require.Equal(t, expectedSeries, wt.checkNumLabels()) - require.Equal(t, expectedSamples, wt.samplesAppended) + require.Equal(t, expectedSeries, wt.checkNumLabels(), "did not receive the expected number of series") + require.Equal(t, expectedSamples, wt.samplesAppended, "did not receive the expected number of samples") + require.Equal(t, expectedExemplars, wt.exemplarsAppended, "did not receive the expected number of exemplars") }) } } @@ -229,7 +251,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) { require.NoError(t, err) wt := newWriteToMock() - watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir) + watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false) go watcher.Start() expected := seriesCount @@ -322,7 +344,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) { _, _, err = Segments(w.Dir()) require.NoError(t, err) wt := newWriteToMock() - watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir) + watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false) go watcher.Start() expected := seriesCount * 2 @@ -392,7 +414,7 @@ func TestReadCheckpoint(t *testing.T) { require.NoError(t, err) wt := newWriteToMock() - watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir) + watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false) go watcher.Start() expectedSeries := seriesCount @@ -465,7 +487,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) { } wt := newWriteToMock() - watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir) + watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false) watcher.MaxSegment = -1 // Set the Watcher's metrics so they're not nil pointers. @@ -541,7 +563,7 @@ func TestCheckpointSeriesReset(t *testing.T) { require.NoError(t, err) wt := newWriteToMock() - watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir) + watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false) watcher.MaxSegment = -1 go watcher.Start() From 996848ef40650b6d95a41cb33f1ecbedb7ce088e Mon Sep 17 00:00:00 2001 From: Hu Shuai Date: Sat, 8 May 2021 11:45:29 +0800 Subject: [PATCH 18/34] Fix golint issue Signed-off-by: Hu Shuai --- storage/interface.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/interface.go b/storage/interface.go index 6f65cf069..5583dcf02 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -124,7 +124,7 @@ type ExemplarQueryable interface { ExemplarQuerier(ctx context.Context) (ExemplarQuerier, error) } -// Querier provides reading access to time series data. +// ExemplarQuerier provides reading access to time series data. type ExemplarQuerier interface { // Select all the exemplars that match the matchers. // Within a single slice of matchers, it is an intersection. Between the slices, it is a union. From aedd4fa95cf304870da35ab6ee7376a38d8e0c49 Mon Sep 17 00:00:00 2001 From: Chris Marchbanks Date: Sun, 9 May 2021 16:10:58 -0600 Subject: [PATCH 19/34] Cut v2.27.0-rc.0 (#8793) Signed-off-by: Chris Marchbanks --- CHANGELOG.md | 21 +++++++++++++++++++++ VERSION | 2 +- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b0e2462e..c6414e5b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,24 @@ +## 2.27.0-rc.0 / 2021-05-09 + +* [FEATURE] Promtool: Retroactive rule evaluation functionality. #7675 +* [FEATURE] Configuration: Environment variable expansion for external labels. Behind `--enable-feature=expand-external-labels` flag. #8649 +* [FEATURE] TSDB: Add a flag(`-storage.tsdb.max-chunk-size`) to control chunk allocation size for small Prometheus instances. #8478 +* [FEATURE] UI: Add a dark theme. #8604 +* [FEATURE] AWS Lightsail Discovery: Add AWS Lightsail Discovery. #8693 +* [FEATURE] Docker Discovery: Add Docker Service Discovery. #8629 +* [FEATURE] OAuth: Allow OAuth 2.0 to be used anywhere an HTTP client is used. #8761 +* [FEATURE] Remote Write: Send exemplars via remote write. Experimental and disabled by default. #8296 +* [ENHANCEMENT] Digital Ocean Discovery: Add `__meta_digitalocean_vpc` label. #8642 +* [ENHANCEMENT] Scaleway Discovery: Read Scaleway secret from a file. #8643 +* [ENHANCEMENT] Scrape: Add configurable limits for label size and count. #8777 +* [ENHANCEMENT] UI: Add 16w and 26w time range steps. #8656 +* [ENHANCEMENT] Templating: Enable parsing strings in `humanize` functions. #8682 +* [BUGFIX] UI: Provide errors instead of blank page on TSDB Status Page. #8654 #8659 +* [BUGFIX] TSDB: Do not panic when writing very large records to the WAL. #8790 +* [BUGFIX] TSDB: Avoid panic when mmaped memory is referenced after the file is closed. #8723 +* [BUGFIX] Scaleway Discovery: Fix nil pointer dereference. #8737 +* [BUGFIX] Consul Discovery: Restart no longer required after config update with no targets. #8766 + ## 2.26.0 / 2021-03-31 Prometheus is now built and supporting Go 1.16 (#8544). This reverts the memory release pattern added in Go 1.12. This makes common RSS usage metrics showing more accurate number for actual memory used by Prometheus. You can read more details [here](https://www.bwplotka.dev/2019/golang-memory-monitoring/). diff --git a/VERSION b/VERSION index 7a25c70f9..d079cc4ab 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.26.0 +2.27.0-rc.0 From e313ffa8abf61b5e2df3e917e2f31a899c91b66c Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Mon, 10 May 2021 23:33:26 +0200 Subject: [PATCH 20/34] Fix "instant selector vector" typo in error messages (#8800) Signed-off-by: Julius Volz --- promql/parser/parse.go | 4 ++-- promql/parser/parse_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 6b81f35f9..db63e3d20 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -703,7 +703,7 @@ func (p *parser) addOffset(e Node, offset time.Duration) { orgoffsetp = &s.OriginalOffset endPosp = &s.EndPos default: - p.addParseErrf(e.PositionRange(), "offset modifier must be preceded by an instant selector vector or range vector selector or a subquery") + p.addParseErrf(e.PositionRange(), "offset modifier must be preceded by an instant vector selector or range vector selector or a subquery") return } @@ -778,7 +778,7 @@ func (p *parser) getAtModifierVars(e Node) (**int64, *ItemType, *Pos, bool) { timestampp = &s.Timestamp endPosp = &s.EndPos default: - p.addParseErrf(e.PositionRange(), "@ modifier must be preceded by an instant selector vector or range vector selector or a subquery") + p.addParseErrf(e.PositionRange(), "@ modifier must be preceded by an instant vector selector or range vector selector or a subquery") return nil, nil, nil, false } diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index d5dcf9ed9..9b3a8c84b 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -528,7 +528,7 @@ var testExpr = []struct { }, { input: "1 offset 1d", fail: true, - errMsg: "1:1: parse error: offset modifier must be preceded by an instant selector vector or range vector selector or a subquery", + errMsg: "1:1: parse error: offset modifier must be preceded by an instant vector selector or range vector selector or a subquery", }, { input: "foo offset 1s offset 2s", fail: true, @@ -2270,7 +2270,7 @@ var testExpr = []struct { }, { input: `rate(some_metric[5m]) @ 1234`, fail: true, - errMsg: "1:1: parse error: @ modifier must be preceded by an instant selector vector or range vector selector or a subquery", + errMsg: "1:1: parse error: @ modifier must be preceded by an instant vector selector or range vector selector or a subquery", }, // Test function calls. { From 277bac622ac74240bbe6b75cbe96fe3472a7dc48 Mon Sep 17 00:00:00 2001 From: ide-rea <30512600+ide-rea@users.noreply.github.com> Date: Wed, 12 May 2021 22:47:05 +0800 Subject: [PATCH 21/34] validate exemplar labelSet length first (#8816) * ignore check exemplar labelSet length when append Signed-off-by: XiaoYu Zhang * validate exemplar labelSet length firstly Signed-off-by: XiaoYu Zhang --- tsdb/exemplar.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tsdb/exemplar.go b/tsdb/exemplar.go index 6332c1fe3..d5b7be57f 100644 --- a/tsdb/exemplar.go +++ b/tsdb/exemplar.go @@ -176,16 +176,11 @@ func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar. return ce.validateExemplar(seriesLabels, e, false) } -// Not thread safe. The append parameters tells us whether this is an external validation, or interal -// as a reuslt of an AddExemplar call, in which case we should update any relevant metrics. +// Not thread safe. The append parameters tells us whether this is an external validation, or internal +// as a result of an AddExemplar call, in which case we should update any relevant metrics. func (ce *CircularExemplarStorage) validateExemplar(l string, e exemplar.Exemplar, append bool) error { - idx, ok := ce.index[l] - if !ok { - return nil - } - // Exemplar label length does not include chars involved in text rendering such as quotes - // equals sign, or commas. See definiton of const ExemplarMaxLabelLength. + // equals sign, or commas. See definition of const ExemplarMaxLabelLength. labelSetLen := 0 for _, l := range e.Labels { labelSetLen += utf8.RuneCountInString(l.Name) @@ -196,6 +191,11 @@ func (ce *CircularExemplarStorage) validateExemplar(l string, e exemplar.Exempla } } + idx, ok := ce.index[l] + if !ok { + return nil + } + // Check for duplicate vs last stored exemplar for this series. // NB these are expected, and appending them is a no-op. if ce.exemplars[idx.newest].exemplar.Equals(e) { From 24c9b61221f7006e87cd62b9fe2901d43e19ed53 Mon Sep 17 00:00:00 2001 From: Chris Marchbanks Date: Wed, 12 May 2021 11:32:12 -0600 Subject: [PATCH 22/34] Release 2.27.0 (#8814) Signed-off-by: Chris Marchbanks --- CHANGELOG.md | 4 ++-- VERSION | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c6414e5b5..0b0b79431 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,8 @@ -## 2.27.0-rc.0 / 2021-05-09 +## 2.27.0 / 2021-05-12 * [FEATURE] Promtool: Retroactive rule evaluation functionality. #7675 * [FEATURE] Configuration: Environment variable expansion for external labels. Behind `--enable-feature=expand-external-labels` flag. #8649 -* [FEATURE] TSDB: Add a flag(`-storage.tsdb.max-chunk-size`) to control chunk allocation size for small Prometheus instances. #8478 +* [FEATURE] TSDB: Add a flag(`--storage.tsdb.max-block-chunk-segment-size`) to control the max chunks file size of the blocks for small Prometheus instances. #8478 * [FEATURE] UI: Add a dark theme. #8604 * [FEATURE] AWS Lightsail Discovery: Add AWS Lightsail Discovery. #8693 * [FEATURE] Docker Discovery: Add Docker Service Discovery. #8629 diff --git a/VERSION b/VERSION index d079cc4ab..a5f3e61bd 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.27.0-rc.0 +2.27.0 From be9c870b06fd0d032e9c83fd247fbe681b1bef8e Mon Sep 17 00:00:00 2001 From: kcx2366425574 Date: Thu, 13 May 2021 17:26:44 +0800 Subject: [PATCH 23/34] remove the param that is not used Signed-off-by: kcx2366425574 --- tsdb/wal.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tsdb/wal.go b/tsdb/wal.go index 8d49c8432..54b08f80c 100644 --- a/tsdb/wal.go +++ b/tsdb/wal.go @@ -64,7 +64,7 @@ type walMetrics struct { corruptions prometheus.Counter } -func newWalMetrics(wal *SegmentWAL, r prometheus.Registerer) *walMetrics { +func newWalMetrics(r prometheus.Registerer) *walMetrics { m := &walMetrics{} m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ @@ -192,7 +192,7 @@ func OpenSegmentWAL(dir string, logger log.Logger, flushInterval time.Duration, segmentSize: walSegmentSizeBytes, crc32: newCRC32(), } - w.metrics = newWalMetrics(w, r) + w.metrics = newWalMetrics(r) fns, err := sequenceFiles(w.dirFile.Name()) if err != nil { From 24869ff2d0ff8c057ab3fb6a336ec503fb9f7cad Mon Sep 17 00:00:00 2001 From: kjinan <2008kongxiangsheng@163.com> Date: Fri, 14 May 2021 09:34:11 +0800 Subject: [PATCH 24/34] typos correct Signed-off-by: kjinan <2008kongxiangsheng@163.com> --- tsdb/head_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 2afbc2081..6ae7c5104 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -2042,7 +2042,7 @@ func TestHeadMintAfterTruncation(t *testing.T) { require.Equal(t, int64(4000), head.MinTime()) require.Equal(t, int64(4000), head.minValidTime.Load()) - // After truncation outside the appendable windown if the actual min time + // After truncation outside the appendable window if the actual min time // is in the appendable window then we should leave mint at the start of appendable window. require.NoError(t, head.Truncate(5000)) require.Equal(t, head.appendableMinValidTime(), head.MinTime()) From e1774b6f83a7554c91fa52bc420982dd0ac5a1e1 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Fri, 14 May 2021 22:32:15 +0200 Subject: [PATCH 25/34] Fix the computation of prometheus_sd_discovered_targets prometheus_sd_discovered_targets is wrongly calculated when there are multiple SD configurations in place. One discovery manager can have multiple groups coming from multiple service discoveries. When multiple service discovery configs are used, we do not compute the metric correctly, and instead just set the metric to one of the service discoveries. Signed-off-by: Julien Pivotto --- discovery/manager.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/discovery/manager.go b/discovery/manager.go index e50d2ee1a..a4208532e 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -279,15 +279,17 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group { defer m.mtx.RUnlock() tSets := map[string][]*targetgroup.Group{} + n := map[string]int{} for pkey, tsets := range m.targets { - var n int for _, tg := range tsets { // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' // to signal that it needs to stop all scrape loops for this target set. tSets[pkey.setName] = append(tSets[pkey.setName], tg) - n += len(tg.Targets) + n[pkey.setName] += len(tg.Targets) } - discoveredTargets.WithLabelValues(m.name, pkey.setName).Set(float64(n)) + } + for setName, v := range n { + discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v)) } return tSets } From 0ffcddbee8dd12739daabbe7c262ee889798a09f Mon Sep 17 00:00:00 2001 From: Sandro Date: Sun, 16 May 2021 05:22:50 +0200 Subject: [PATCH 26/34] Fix indentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sandro Jäckel --- docs/configuration/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 62208ab77..71e32f14e 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -2076,7 +2076,7 @@ kubernetes_sd_configs: # List of Lightsail service discovery configurations. lightsail_sd_configs: - [ - ... ] + [ - ... ] # List of Marathon service discovery configurations. marathon_sd_configs: From db7f0bcec27bd8aeebad6b08ac849516efa9ae02 Mon Sep 17 00:00:00 2001 From: Julien Pivotto Date: Tue, 18 May 2021 14:47:45 +0200 Subject: [PATCH 27/34] Merge pull request from GHSA-vx57-7f4q-fpc7 * Do not remove /new because it is not part of the route parameter (CVE-2021-29622) Signed-off-by: Julien Pivotto * Release 2.27.1 Signed-off-by: Julien Pivotto --- CHANGELOG.md | 12 ++++++++++++ VERSION | 2 +- web/web.go | 2 +- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b0b79431..b6b4c4071 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,15 @@ +## 2.27.1 / 2021-05-18 + +This release contains a bug fix for a security issue in the API endpoint. An +attacker can craft a special URL that redirects a user to any endpoint via an +HTTP 302 response. See the [security advisory][GHSA-vx57-7f4q-fpc7] for more details. + +[GHSA-vx57-7f4q-fpc7]:https://github.com/prometheus/prometheus/security/advisories/GHSA-vx57-7f4q-fpc7 + +This vulnerability has been reported by Aaron Devaney from MDSec. + +* [BUGFIX] SECURITY: Fix arbitrary redirects under the /new endpoint (CVE-2021-29622) + ## 2.27.0 / 2021-05-12 * [FEATURE] Promtool: Retroactive rule evaluation functionality. #7675 diff --git a/VERSION b/VERSION index a5f3e61bd..f0465234b 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.27.0 +2.27.1 diff --git a/web/web.go b/web/web.go index bdc5c44e5..374627a9e 100644 --- a/web/web.go +++ b/web/web.go @@ -354,7 +354,7 @@ func New(logger log.Logger, o *Options) *Handler { // Redirect the original React UI's path (under "/new") to its new path at the root. router.Get("/new/*path", func(w http.ResponseWriter, r *http.Request) { p := route.Param(r.Context(), "path") - http.Redirect(w, r, path.Join(o.ExternalURL.Path, strings.TrimPrefix(p, "/new"))+"?"+r.URL.RawQuery, http.StatusFound) + http.Redirect(w, r, path.Join(o.ExternalURL.Path, p)+"?"+r.URL.RawQuery, http.StatusFound) }) router.Get("/classic/alerts", readyf(h.alerts)) From 7e7efaba32311b53846c4e931824b535b4331e7d Mon Sep 17 00:00:00 2001 From: Matthias Loibl Date: Tue, 18 May 2021 18:37:16 +0200 Subject: [PATCH 28/34] storage: Split chunks if more than 120 samples (#8582) * storage: Split chunks if more than 120 samples Signed-off-by: Matthias Loibl * storage: Don't set maxt which is overwritten right away Signed-off-by: Matthias Loibl * storage: Improve comments on merge_test Signed-off-by: Matthias Loibl * storage: Improve comments and move code closer to usage Signed-off-by: Matthias Loibl * tsdb/tsdbutil: Add comment for GenerateSamples Signed-off-by: Matthias Loibl --- storage/merge_test.go | 21 +++++++++++++++++++++ storage/series.go | 28 ++++++++++++++++++++++++++-- tsdb/tsdbutil/chunks.go | 12 ++++++++++++ 3 files changed, 59 insertions(+), 2 deletions(-) diff --git a/storage/merge_test.go b/storage/merge_test.go index 2ed68879a..90f5725e1 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -465,6 +465,27 @@ func TestCompactingChunkSeriesMerger(t *testing.T) { []tsdbutil.Sample{sample{31, 31}, sample{35, 35}}, ), }, + { + name: "110 overlapping", + input: []ChunkSeries{ + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 110)), // [0 - 110) + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(60, 50)), // [60 - 110) + }, + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), + tsdbutil.GenerateSamples(0, 110), + ), + }, + { + name: "150 overlapping samples, split chunk", + input: []ChunkSeries{ + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 90)), // [0 - 90) + NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(60, 90)), // [90 - 150) + }, + expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), + tsdbutil.GenerateSamples(0, 120), + tsdbutil.GenerateSamples(120, 30), + ), + }, } { t.Run(tc.name, func(t *testing.T) { merged := m(tc.input...) diff --git a/storage/series.go b/storage/series.go index c5440a45a..5f16d053c 100644 --- a/storage/series.go +++ b/storage/series.go @@ -217,7 +217,8 @@ type seriesToChunkEncoder struct { Series } -// TODO(bwplotka): Currently encoder will just naively build one chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 +const seriesToChunkEncoderSplit = 120 + func (s *seriesToChunkEncoder) Iterator() chunks.Iterator { chk := chunkenc.NewXORChunk() app, err := chk.Appender() @@ -227,8 +228,28 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator { mint := int64(math.MaxInt64) maxt := int64(math.MinInt64) + chks := []chunks.Meta{} + + i := 0 seriesIter := s.Series.Iterator() for seriesIter.Next() { + // Create a new chunk if too many samples in the current one. + if i >= seriesToChunkEncoderSplit { + chks = append(chks, chunks.Meta{ + MinTime: mint, + MaxTime: maxt, + Chunk: chk, + }) + chk = chunkenc.NewXORChunk() + app, err = chk.Appender() + if err != nil { + return errChunksIterator{err: err} + } + mint = int64(math.MaxInt64) + // maxt is immediately overwritten below which is why setting it here won't make a difference. + i = 0 + } + t, v := seriesIter.At() app.Append(t, v) @@ -236,16 +257,19 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator { if mint == math.MaxInt64 { mint = t } + i++ } if err := seriesIter.Err(); err != nil { return errChunksIterator{err: err} } - return NewListChunkSeriesIterator(chunks.Meta{ + chks = append(chks, chunks.Meta{ MinTime: mint, MaxTime: maxt, Chunk: chk, }) + + return NewListChunkSeriesIterator(chks...) } type errChunksIterator struct { diff --git a/tsdb/tsdbutil/chunks.go b/tsdb/tsdbutil/chunks.go index 47760453e..5ae58b0a8 100644 --- a/tsdb/tsdbutil/chunks.go +++ b/tsdb/tsdbutil/chunks.go @@ -65,3 +65,15 @@ func PopulatedChunk(numSamples int, minTime int64) chunks.Meta { } return ChunkFromSamples(samples) } + +// GenerateSamples starting at start and counting up numSamples. +func GenerateSamples(start int, numSamples int) []Sample { + samples := make([]Sample, 0, numSamples) + for i := start; i < start+numSamples; i++ { + samples = append(samples, sample{ + t: int64(i), + v: float64(i), + }) + } + return samples +} From 0a8912433a457b54a871df75e72afc3e45a31d5c Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Tue, 18 May 2021 12:38:37 -0400 Subject: [PATCH 29/34] allow compact series merger to be configurable (#8836) Signed-off-by: yeya24 --- tsdb/block_test.go | 4 ++-- tsdb/blockwriter.go | 2 +- tsdb/compact.go | 16 +++++++++++----- tsdb/compact_test.go | 12 ++++++------ tsdb/db.go | 3 ++- 5 files changed, 22 insertions(+), 15 deletions(-) diff --git a/tsdb/block_test.go b/tsdb/block_test.go index 3c2990cf3..c292cdcf5 100644 --- a/tsdb/block_test.go +++ b/tsdb/block_test.go @@ -323,7 +323,7 @@ func TestBlockSize(t *testing.T) { require.NoError(t, err) require.Equal(t, expAfterDelete, actAfterDelete, "after a delete reported block size doesn't match actual disk size") - c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil) + c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil, nil) require.NoError(t, err) blockDirAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil) require.NoError(t, err) @@ -426,7 +426,7 @@ func createBlock(tb testing.TB, dir string, series []storage.Series) string { } func createBlockFromHead(tb testing.TB, dir string, head *Head) string { - compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil) + compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil) require.NoError(tb, err) require.NoError(tb, os.MkdirAll(dir, 0777)) diff --git a/tsdb/blockwriter.go b/tsdb/blockwriter.go index 2d19c7705..95d6f2854 100644 --- a/tsdb/blockwriter.go +++ b/tsdb/blockwriter.go @@ -100,7 +100,7 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { nil, w.logger, []int64{w.blockSize}, - chunkenc.NewPool()) + chunkenc.NewPool(), nil) if err != nil { return ulid.ULID{}, errors.Wrap(err, "create leveled compactor") } diff --git a/tsdb/compact.go b/tsdb/compact.go index 9befdc552..ca7baa191 100644 --- a/tsdb/compact.go +++ b/tsdb/compact.go @@ -82,6 +82,7 @@ type LeveledCompactor struct { chunkPool chunkenc.Pool ctx context.Context maxBlockChunkSegmentSize int64 + mergeFunc storage.VerticalChunkSeriesMergeFunc } type compactorMetrics struct { @@ -145,11 +146,11 @@ func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics { } // NewLeveledCompactor returns a LeveledCompactor. -func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool) (*LeveledCompactor, error) { - return NewLeveledCompactorWithChunkSize(ctx, r, l, ranges, pool, chunks.DefaultChunkSegmentSize) +func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { + return NewLeveledCompactorWithChunkSize(ctx, r, l, ranges, pool, chunks.DefaultChunkSegmentSize, mergeFunc) } -func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64) (*LeveledCompactor, error) { +func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { if len(ranges) == 0 { return nil, errors.Errorf("at least one range must be provided") } @@ -159,6 +160,9 @@ func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Register if l == nil { l = log.NewNopLogger() } + if mergeFunc == nil { + mergeFunc = storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge) + } return &LeveledCompactor{ ranges: ranges, chunkPool: pool, @@ -166,6 +170,7 @@ func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Register metrics: newCompactorMetrics(r), ctx: ctx, maxBlockChunkSegmentSize: maxBlockChunkSegmentSize, + mergeFunc: mergeFunc, }, nil } @@ -746,8 +751,9 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, set := sets[0] if len(sets) > 1 { - // Merge series using compacting chunk series merger. - set = storage.NewMergeChunkSeriesSet(sets, storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge)) + // Merge series using specified chunk series merger. + // The default one is the compacting series merger. + set = storage.NewMergeChunkSeriesSet(sets, c.mergeFunc) } // Iterate over all sorted chunk series. diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index a08464216..255a4c14d 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -158,7 +158,7 @@ func TestNoPanicFor0Tombstones(t *testing.T) { }, } - c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{50}, nil) + c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{50}, nil, nil) require.NoError(t, err) c.plan(metas) @@ -172,7 +172,7 @@ func TestLeveledCompactor_plan(t *testing.T) { 180, 540, 1620, - }, nil) + }, nil, nil) require.NoError(t, err) cases := map[string]struct { @@ -381,7 +381,7 @@ func TestRangeWithFailedCompactionWontGetSelected(t *testing.T) { 240, 720, 2160, - }, nil) + }, nil, nil) require.NoError(t, err) cases := []struct { @@ -431,7 +431,7 @@ func TestCompactionFailWillCleanUpTempDir(t *testing.T) { 240, 720, 2160, - }, nil) + }, nil, nil) require.NoError(t, err) tmpdir, err := ioutil.TempDir("", "test") @@ -940,7 +940,7 @@ func TestCompaction_populateBlock(t *testing.T) { blocks = append(blocks, &mockBReader{ir: ir, cr: cr, mint: mint, maxt: maxt}) } - c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{0}, nil) + c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{0}, nil, nil) require.NoError(t, err) meta := &BlockMeta{ @@ -1065,7 +1065,7 @@ func BenchmarkCompaction(b *testing.B) { blockDirs = append(blockDirs, block.Dir()) } - c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil) + c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil, nil) require.NoError(b, err) b.ResetTimer() diff --git a/tsdb/db.go b/tsdb/db.go index d6036f5e5..2bb56a85c 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -370,6 +370,7 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { db.logger, ExponentialBlockRanges(DefaultOptions().MinBlockDuration, 3, 5), chunkenc.NewPool(), + nil, ) if err != nil { return errors.Wrap(err, "create leveled compactor") @@ -648,7 +649,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs var err error ctx, cancel := context.WithCancel(context.Background()) - db.compactor, err = NewLeveledCompactorWithChunkSize(ctx, r, l, rngs, db.chunkPool, opts.MaxBlockChunkSegmentSize) + db.compactor, err = NewLeveledCompactorWithChunkSize(ctx, r, l, rngs, db.chunkPool, opts.MaxBlockChunkSegmentSize, nil) if err != nil { cancel() return nil, errors.Wrap(err, "create leveled compactor") From e6bb865ad44a8e145850f6541aabba9a044bfc94 Mon Sep 17 00:00:00 2001 From: Julius Volz Date: Tue, 18 May 2021 21:43:38 +0200 Subject: [PATCH 30/34] Add upcoming release shepherds and new releases (#8842) Signed-off-by: Julius Volz --- RELEASE.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index 916c5ff69..55c171d50 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -32,8 +32,10 @@ Release cadence of first pre-releases being cut is 6 weeks. | v2.25 | 2021-02-10 | Julien Pivotto (GitHub: @roidelapluie) | | v2.26 | 2021-03-24 | Bartek Plotka (GitHub: @bwplotka) | | v2.27 | 2021-05-05 | Chris Marchbanks (GitHub: @csmarchbanks) | -| v2.28 | 2021-06-16 | **searching for volunteer** | -| v2.29 | 2021-07-28 | **searching for volunteer** | +| v2.28 | 2021-06-16 | Julius Volz (GitHub: @juliusv) | +| v2.29 | 2021-07-28 | Frederic Branczyk (GitHub: @brancz) | +| v2.30 | 2021-09-08 | **searching for volunteer** | +| v2.31 | 2021-10-20 | **searching for volunteer** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. From d95b0972505f61be0b23b4dfedb8b4816392491b Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Wed, 19 May 2021 08:01:35 -0400 Subject: [PATCH 31/34] expose seriesToChunkEncoder (#8845) Signed-off-by: yeya24 --- storage/merge.go | 2 +- storage/series.go | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/storage/merge.go b/storage/merge.go index fca052ca1..81e45d55d 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -667,7 +667,7 @@ func (c *compactChunkIterator) Next() bool { } // Add last as it's not yet included in overlap. We operate on same series, so labels does not matter here. - iter = (&seriesToChunkEncoder{Series: c.mergeFunc(append(overlapping, newChunkToSeriesDecoder(nil, c.curr))...)}).Iterator() + iter = NewSeriesToChunkEncoder(c.mergeFunc(append(overlapping, newChunkToSeriesDecoder(nil, c.curr))...)).Iterator() if !iter.Next() { if c.err = iter.Err(); c.err != nil { return false diff --git a/storage/series.go b/storage/series.go index 5f16d053c..d92b4fd18 100644 --- a/storage/series.go +++ b/storage/series.go @@ -204,9 +204,7 @@ func (c *seriesSetToChunkSet) Next() bool { } func (c *seriesSetToChunkSet) At() ChunkSeries { - return &seriesToChunkEncoder{ - Series: c.SeriesSet.At(), - } + return NewSeriesToChunkEncoder(c.SeriesSet.At()) } func (c *seriesSetToChunkSet) Err() error { @@ -219,6 +217,11 @@ type seriesToChunkEncoder struct { const seriesToChunkEncoderSplit = 120 +// NewSeriesToChunkEncoder encodes samples to chunks with 120 samples limit. +func NewSeriesToChunkEncoder(series Series) ChunkSeries { + return &seriesToChunkEncoder{series} +} + func (s *seriesToChunkEncoder) Iterator() chunks.Iterator { chk := chunkenc.NewXORChunk() app, err := chk.Appender() From e1370eecde7af30c8867089df3e93acee5e32df5 Mon Sep 17 00:00:00 2001 From: kjinan <2008kongxiangsheng@163.com> Date: Thu, 20 May 2021 09:51:45 +0800 Subject: [PATCH 32/34] typos correct Signed-off-by: kjinan <2008kongxiangsheng@163.com> --- tsdb/exemplar_test.go | 2 +- tsdb/index/index_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tsdb/exemplar_test.go b/tsdb/exemplar_test.go index cd6983ffb..55e6fd375 100644 --- a/tsdb/exemplar_test.go +++ b/tsdb/exemplar_test.go @@ -26,7 +26,7 @@ import ( "github.com/prometheus/prometheus/storage" ) -// Tests the same exemplar cases as AddExemplar, but specfically the ValidateExemplar function so it can be relied on externally. +// Tests the same exemplar cases as AddExemplar, but specifically the ValidateExemplar function so it can be relied on externally. func TestValidateExemplar(t *testing.T) { exs, err := NewCircularExemplarStorage(2, nil) require.NoError(t, err) diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index 255536e6f..0e75d3758 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -217,7 +217,7 @@ func TestIndexRW_Postings(t *testing.T) { } require.NoError(t, p.Err()) - // The label incides are no longer used, so test them by hand here. + // The label indices are no longer used, so test them by hand here. labelIndices := map[string][]string{} require.NoError(t, ReadOffsetTable(ir.b, ir.toc.LabelIndicesTable, func(key []string, off uint64, _ int) error { if len(key) != 1 { From 446e5cc160d62fb0ed357453ea3e6db7ce4acbe9 Mon Sep 17 00:00:00 2001 From: Ikko Ashimine Date: Thu, 20 May 2021 20:32:03 +0900 Subject: [PATCH 33/34] Fix typo in storage.md mulitple -> multiple Signed-off-by: Ikko Ashimine --- docs/storage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage.md b/docs/storage.md index 1b4f6ebca..6950487b8 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -186,4 +186,4 @@ The output of `promtool tsdb create-blocks-from rules` command is a directory th - All rules in the recording rule files will be evaluated. - If the `interval` is set in the recording rule file that will take priority over the `eval-interval` flag in the rule backfill command. - Alerts are currently ignored if they are in the recording rule file. -- Rules in the same group cannot see the results of previous rules. Meaning that rules that refer to other rules being backfilled is not supported. A workaround is to backfill mulitple times and create the dependent data first (and move dependent data to the Prometheus server data dir so that it is accessible from the Prometheus API). +- Rules in the same group cannot see the results of previous rules. Meaning that rules that refer to other rules being backfilled is not supported. A workaround is to backfill multiple times and create the dependent data first (and move dependent data to the Prometheus server data dir so that it is accessible from the Prometheus API). From 1838068db5dfa9d2318b0f6dcd5eae690a5cfeb5 Mon Sep 17 00:00:00 2001 From: Augustin Husson Date: Thu, 20 May 2021 23:00:15 +0200 Subject: [PATCH 34/34] bump codemirror-promql to v0.16.0 (#8856) Signed-off-by: Augustin Husson --- web/ui/react-app/package.json | 2 +- web/ui/react-app/yarn.lock | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index d46aeaa5d..25d27cef8 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -21,7 +21,7 @@ "@fortawesome/react-fontawesome": "^0.1.4", "@reach/router": "^1.2.1", "bootstrap": "^4.6.0", - "codemirror-promql": "^0.15.0", + "codemirror-promql": "^0.16.0", "css.escape": "^1.5.1", "downshift": "^3.4.8", "enzyme-to-json": "^3.4.3", diff --git a/web/ui/react-app/yarn.lock b/web/ui/react-app/yarn.lock index 2212719c9..6a103d511 100644 --- a/web/ui/react-app/yarn.lock +++ b/web/ui/react-app/yarn.lock @@ -3460,12 +3460,12 @@ coa@^2.0.2: chalk "^2.4.1" q "^1.1.2" -codemirror-promql@^0.15.0: - version "0.15.0" - resolved "https://registry.yarnpkg.com/codemirror-promql/-/codemirror-promql-0.15.0.tgz#dd6365ea5c2d18421d225cef12b74e64d8cab280" - integrity sha512-u5f6Narj8Kx79AHMPlr8vogGUhinZfsZVT00R7wStquDA3kRTvxfEBYK77UtWNNJshxC1B3EZnHzXN2K9RzVXw== +codemirror-promql@^0.16.0: + version "0.16.0" + resolved "https://registry.yarnpkg.com/codemirror-promql/-/codemirror-promql-0.16.0.tgz#c51ca8ce1772a228ebfdbc8431550b2385670d46" + integrity sha512-/npytsj103ccWSMWffxibDuS+0p8DFneB7eRdUQULNVOsF+QSif2fqohjz6C9OmM+r58hA5qmdBUdhtogqsvEQ== dependencies: - lezer-promql "^0.18.0" + lezer-promql "^0.19.0" lru-cache "^6.0.0" collection-visit@^1.0.0: @@ -7457,10 +7457,10 @@ levn@^0.3.0, levn@~0.3.0: prelude-ls "~1.1.2" type-check "~0.3.2" -lezer-promql@^0.18.0: - version "0.18.0" - resolved "https://registry.yarnpkg.com/lezer-promql/-/lezer-promql-0.18.0.tgz#7eea8cb02f8203043560415d7a436e9903176ab2" - integrity sha512-4ZQGyiU4JoL14rhtuAEmlSKHhu0dcBiLsqjF+RyouZNojUiLh6vyBFwrtPgAdD58s4j8+J21dOO/yUnaJvoGkw== +lezer-promql@^0.19.0: + version "0.19.0" + resolved "https://registry.yarnpkg.com/lezer-promql/-/lezer-promql-0.19.0.tgz#0cedaead2d7b3700a25c09e5ae85568979806710" + integrity sha512-divgYjuKw4ESDWVCXg7FipH2dF4vq0aWTb0QCyIGz5NHTLx6H+tVC7IlMkQSqsK8t/6qhgxh6A9s6XrE4ZFFJQ== lezer-tree@^0.13.0, lezer-tree@^0.13.2: version "0.13.2"