mirror of https://github.com/prometheus/prometheus
Merge branch 'prometheus:main' into docs-deriv
commit
0a7baf8112
|
@ -12,9 +12,9 @@ executors:
|
|||
golang:
|
||||
docker:
|
||||
- image: quay.io/prometheus/golang-builder:1.17-base
|
||||
golang_115:
|
||||
golang_oldest:
|
||||
docker:
|
||||
- image: quay.io/prometheus/golang-builder:1.15-base
|
||||
- image: quay.io/prometheus/golang-builder:1.16-base
|
||||
|
||||
jobs:
|
||||
test_go:
|
||||
|
@ -89,10 +89,11 @@ jobs:
|
|||
GOGC: "20"
|
||||
GOOPTS: "-p 2"
|
||||
|
||||
test_tsdb_go115:
|
||||
executor: golang_115
|
||||
test_golang_oldest:
|
||||
executor: golang_oldest
|
||||
steps:
|
||||
- checkout
|
||||
- run: make build
|
||||
- run: go test ./tsdb/...
|
||||
- run: go test ./tsdb/ -test.tsdb-isolation=false
|
||||
|
||||
|
@ -128,7 +129,7 @@ workflows:
|
|||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
- test_tsdb_go115:
|
||||
- test_golang_oldest:
|
||||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
|
|
|
@ -3,17 +3,16 @@ updates:
|
|||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/web/ui"
|
||||
open-pull-requests-limit: 0
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
interval: "monthly"
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
interval: "monthly"
|
||||
|
|
30
CHANGELOG.md
30
CHANGELOG.md
|
@ -1,3 +1,31 @@
|
|||
## 2.33.0-rc.0 / 2022-01-12
|
||||
|
||||
* [CHANGE] PromQL: Promote negative offset and `@` modifer to stable features. #10121
|
||||
* [CHANGE] Web: Promote remote-write-receiver to stable. #10119
|
||||
* [FEATURE] Config: Add `stripPort` template function. #10002
|
||||
* [FEATURE] Promtool: Add cardinality analysis to `check metrics`, enabled by flag `--extended`. #10045
|
||||
* [FEATURE] SD: Enable target discovery in own K8s namespace. #9881
|
||||
* [FEATURE] SD: Add provider ID label in K8s SD. #9603
|
||||
* [FEATURE] Web: Add limit field to the rules API. #10152
|
||||
* [ENHANCEMENT] Remote-write: Avoid allocations by buffering concrete structs instead of interfaces. #9934
|
||||
* [ENHANCEMENT] Remote-write: Log time series details for out-of-order samples in remote write receiver. #9894
|
||||
* [ENHANCEMENT] Remote-write: Shard up more when backlogged. #9274
|
||||
* [ENHANCEMENT] TSDB: Use simpler map key to improve exemplar ingest performance. #10111
|
||||
* [ENHANCEMENT] TSDB: Avoid allocations when popping from the intersected postings heap. #10092
|
||||
* [ENHANCEMENT] TSDB: Make chunk writing non-blocking, avoiding latency spikes in remote-write. #10051
|
||||
* [ENHANCEMENT] TSDB: Improve label matching performance. #9907
|
||||
* [ENHANCEMENT] UI: Optimize the service discovery page and add a search bar. #10131
|
||||
* [ENHANCEMENT] UI: Optimize the target page and add a search bar. #10103
|
||||
* [BUGFIX] Promtool: Make exit codes more consistent. #9861
|
||||
* [BUGFIX] Promtool: Fix flakiness of rule testing. #8818
|
||||
* [BUGFIX] Remote-write: Update `prometheus_remote_storage_queue_highest_sent_timestamp_seconds` metric when write irrecoverably fails. #10102
|
||||
* [BUGFIX] Storage: Avoid panic in `BufferedSeriesIterator`. #9945
|
||||
* [BUGFIX] TSDB: CompactBlockMetas should produce correct mint/maxt for overlapping blocks. #10108
|
||||
* [BUGFIX] TSDB: Fix logging of exemplar storage size. #9938
|
||||
* [BUGFIX] UI: Fix overlapping click targets for the alert state checkboxes. #10136
|
||||
* [BUGFIX] UI: Fix _Unhealthy_ filter on target page to actually display only _Unhealthy_ targets. #10103
|
||||
* [BUGFIX] UI: Fix autocompletion when expression is empty. #10053
|
||||
|
||||
## 2.32.1 / 2021-12-17
|
||||
|
||||
* [BUGFIX] Scrape: Fix reporting metrics when sample limit is reached during the report. #9996
|
||||
|
@ -13,7 +41,7 @@ Enable with `--enable-feature=agent`.
|
|||
|
||||
Learn more about the Prometheus Agent in our [blog post](https://prometheus.io/blog/2021/11/16/agent/).
|
||||
|
||||
* [CHANGE] remote-write: Change default max retry time from 100ms to 5 seconds. #9634
|
||||
* [CHANGE] Remote-write: Change default max retry time from 100ms to 5 seconds. #9634
|
||||
* [FEATURE] Agent: New mode of operation optimized for remote-write only scenarios, without local storage. Enable with `--enable-feature=agent`. #8785 #9851 #9664 #9939 #9941 #9943
|
||||
* [FEATURE] Promtool: Add `promtool check service-discovery` command. #8970
|
||||
* [FEATURE] UI: Add search in metrics dropdown. #9629
|
||||
|
|
|
@ -56,7 +56,7 @@ Prometheus will now be reachable at http://localhost:9090/.
|
|||
### Building from source
|
||||
|
||||
To build Prometheus from source code, You need:
|
||||
* Go [version 1.14 or greater](https://golang.org/doc/install).
|
||||
* Go [version 1.16 or greater](https://golang.org/doc/install).
|
||||
* NodeJS [version 16 or greater](https://nodejs.org/).
|
||||
* npm [version 7 or greater](https://www.npmjs.com/).
|
||||
|
||||
|
|
12
RELEASE.md
12
RELEASE.md
|
@ -37,7 +37,9 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
|||
| v2.30 | 2021-09-08 | Ganesh Vernekar (GitHub: @codesome) |
|
||||
| v2.31 | 2021-10-20 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.32 | 2021-12-01 | Julius Volz (GitHub: @juliusv) |
|
||||
| v2.33 | 2022-01-12 | **searching for volunteer** |
|
||||
| v2.33 | 2022-01-12 | Björn Rabenstein (GitHub: @beorn7) |
|
||||
| v2.34 | 2022-02-23 | Chris Marchbanks (GitHub: @csmarchbanks) |
|
||||
| v2.35 | 2022-04-06 | **searching for volunteer** |
|
||||
|
||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||
|
||||
|
@ -70,7 +72,7 @@ If a bug fix got accidentally merged into main after non-bug-fix changes in main
|
|||
|
||||
Maintaining the release branches for older minor releases happens on a best effort basis.
|
||||
|
||||
### 0. Updating dependencies
|
||||
### 0. Updating dependencies and promoting/demoting experimental features
|
||||
|
||||
A few days before a major or minor release, consider updating the dependencies.
|
||||
|
||||
|
@ -85,6 +87,10 @@ you can skip the dependency update or only update select dependencies. In such a
|
|||
case, you have to create an issue or pull request in the GitHub project for
|
||||
later follow-up.
|
||||
|
||||
This is also a good time to consider any experimental features and feature
|
||||
flags for promotion to stable or for deprecation or ultimately removal. Do any
|
||||
of these in pull requests, one per feature.
|
||||
|
||||
#### Updating Go dependencies
|
||||
|
||||
```
|
||||
|
@ -155,3 +161,5 @@ For release candidate versions (`v2.16.0-rc.0`), run the benchmark for 3 days us
|
|||
If the release has happened in the latest release branch, merge the changes into main.
|
||||
|
||||
Once the binaries have been uploaded, announce the release on `prometheus-announce@googlegroups.com`. (Please do not use `prometheus-users@googlegroups.com` for announcements anymore.) Check out previous announcement mails for inspiration.
|
||||
|
||||
Finally, in case there is no release shepherd listed for the next release yet, find a volunteer.
|
||||
|
|
|
@ -149,8 +149,6 @@ type flagConfig struct {
|
|||
featureList []string
|
||||
// These options are extracted from featureList
|
||||
// for ease of use.
|
||||
enablePromQLAtModifier bool
|
||||
enablePromQLNegativeOffset bool
|
||||
enableExpandExternalLabels bool
|
||||
enableNewSDManager bool
|
||||
|
||||
|
@ -166,15 +164,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
opts := strings.Split(f, ",")
|
||||
for _, o := range opts {
|
||||
switch o {
|
||||
case "promql-at-modifier":
|
||||
c.enablePromQLAtModifier = true
|
||||
level.Info(logger).Log("msg", "Experimental promql-at-modifier enabled")
|
||||
case "promql-negative-offset":
|
||||
c.enablePromQLNegativeOffset = true
|
||||
level.Info(logger).Log("msg", "Experimental promql-negative-offset enabled")
|
||||
case "remote-write-receiver":
|
||||
c.web.RemoteWriteReceiver = true
|
||||
level.Info(logger).Log("msg", "Experimental remote-write-receiver enabled")
|
||||
c.web.EnableRemoteWriteReceiver = true
|
||||
level.Warn(logger).Log("msg", "Remote write receiver enabled via feature flag remote-write-receiver. This is DEPRECATED. Use --web.enable-remote-write-receiver.")
|
||||
case "expand-external-labels":
|
||||
c.enableExpandExternalLabels = true
|
||||
level.Info(logger).Log("msg", "Experimental expand-external-labels enabled")
|
||||
|
@ -195,6 +187,8 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
level.Info(logger).Log("msg", "Experimental agent mode enabled.")
|
||||
case "":
|
||||
continue
|
||||
case "promql-at-modifier", "promql-negative-offset":
|
||||
level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o)
|
||||
default:
|
||||
level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o)
|
||||
}
|
||||
|
@ -263,6 +257,9 @@ func main() {
|
|||
a.Flag("web.enable-admin-api", "Enable API endpoints for admin control actions.").
|
||||
Default("false").BoolVar(&cfg.web.EnableAdminAPI)
|
||||
|
||||
a.Flag("web.enable-remote-write-receiver", "Enable API endpoint accepting remote write requests.").
|
||||
Default("false").BoolVar(&cfg.web.EnableRemoteWriteReceiver)
|
||||
|
||||
a.Flag("web.console.templates", "Path to the console template directory, available at /consoles.").
|
||||
Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath)
|
||||
|
||||
|
@ -299,7 +296,7 @@ func main() {
|
|||
serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
|
||||
SetValue(&newFlagRetentionDuration)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\".").
|
||||
serverOnlyFlag(a, "storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\". Based on powers-of-2, so 1KB is 1024B.").
|
||||
BytesVar(&cfg.tsdb.MaxBytes)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
|
||||
|
@ -381,7 +378,7 @@ func main() {
|
|||
serverOnlyFlag(a, "query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return.").
|
||||
Default("50000000").IntVar(&cfg.queryMaxSamples)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver, extra-scrape-metrics, new-service-discovery-manager. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||
|
@ -567,8 +564,10 @@ func main() {
|
|||
ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, log.With(logger, "component", "activeQueryTracker")),
|
||||
LookbackDelta: time.Duration(cfg.lookbackDelta),
|
||||
NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get,
|
||||
EnableAtModifier: cfg.enablePromQLAtModifier,
|
||||
EnableNegativeOffset: cfg.enablePromQLNegativeOffset,
|
||||
// EnableAtModifier and EnableNegativeOffset have to be
|
||||
// always on for regular PromQL as of Prometheus v2.33.
|
||||
EnableAtModifier: true,
|
||||
EnableNegativeOffset: true,
|
||||
}
|
||||
|
||||
queryEngine = promql.NewEngine(opts)
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net/http"
|
||||
|
@ -27,6 +28,7 @@ import (
|
|||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
|
@ -43,6 +45,9 @@ import (
|
|||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/file"
|
||||
|
@ -56,6 +61,13 @@ import (
|
|||
"github.com/prometheus/prometheus/scrape"
|
||||
)
|
||||
|
||||
const (
|
||||
successExitCode = 0
|
||||
failureExitCode = 1
|
||||
// Exit code 3 is used for "one or more lint issues detected".
|
||||
lintErrExitCode = 3
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := kingpin.New(filepath.Base(os.Args[0]), "Tooling for the Prometheus monitoring system.").UsageWriter(os.Stdout)
|
||||
app.Version(version.Print("promtool"))
|
||||
|
@ -88,6 +100,7 @@ func main() {
|
|||
).Required().ExistingFiles()
|
||||
|
||||
checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage)
|
||||
checkMetricsExtended := checkCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool()
|
||||
agentMode := checkConfigCmd.Flag("agent", "Check config file for Prometheus in Agent mode.").Bool()
|
||||
|
||||
queryCmd := app.Command("query", "Run query against a Prometheus server.")
|
||||
|
@ -190,17 +203,14 @@ func main() {
|
|||
p = &promqlPrinter{}
|
||||
}
|
||||
|
||||
var queryOpts promql.LazyLoaderOpts
|
||||
for _, f := range *featureList {
|
||||
opts := strings.Split(f, ",")
|
||||
for _, o := range opts {
|
||||
switch o {
|
||||
case "promql-at-modifier":
|
||||
queryOpts.EnableAtModifier = true
|
||||
case "promql-negative-offset":
|
||||
queryOpts.EnableNegativeOffset = true
|
||||
case "":
|
||||
continue
|
||||
case "promql-at-modifier", "promql-negative-offset":
|
||||
fmt.Printf(" WARNING: Option for --enable-feature is a no-op after promotion to a stable feature: %q\n", o)
|
||||
default:
|
||||
fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o)
|
||||
}
|
||||
|
@ -221,7 +231,7 @@ func main() {
|
|||
os.Exit(CheckRules(*ruleFiles...))
|
||||
|
||||
case checkMetricsCmd.FullCommand():
|
||||
os.Exit(CheckMetrics())
|
||||
os.Exit(CheckMetrics(*checkMetricsExtended))
|
||||
|
||||
case queryInstantCmd.FullCommand():
|
||||
os.Exit(QueryInstant(*queryInstantServer, *queryInstantExpr, *queryInstantTime, p))
|
||||
|
@ -245,7 +255,13 @@ func main() {
|
|||
os.Exit(QueryLabels(*queryLabelsServer, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p))
|
||||
|
||||
case testRulesCmd.FullCommand():
|
||||
os.Exit(RulesUnitTest(queryOpts, *testRulesFiles...))
|
||||
os.Exit(RulesUnitTest(
|
||||
promql.LazyLoaderOpts{
|
||||
EnableAtModifier: true,
|
||||
EnableNegativeOffset: true,
|
||||
},
|
||||
*testRulesFiles...),
|
||||
)
|
||||
|
||||
case tsdbBenchWriteCmd.FullCommand():
|
||||
os.Exit(checkErr(benchmarkWrite(*benchWriteOutPath, *benchSamplesFile, *benchWriteNumMetrics, *benchWriteNumScrapes)))
|
||||
|
@ -298,9 +314,9 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, files ...string) int {
|
|||
}
|
||||
}
|
||||
if failed {
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
return 0
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
// CheckWebConfig validates web configuration files.
|
||||
|
@ -316,9 +332,9 @@ func CheckWebConfig(files ...string) int {
|
|||
fmt.Fprintln(os.Stderr, f, "SUCCESS")
|
||||
}
|
||||
if failed {
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
return 0
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
func checkFileExists(fn string) error {
|
||||
|
@ -521,9 +537,9 @@ func CheckRules(files ...string) int {
|
|||
fmt.Println()
|
||||
}
|
||||
if failed {
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
return 0
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
func checkRules(filename string) (int, []error) {
|
||||
|
@ -531,7 +547,7 @@ func checkRules(filename string) (int, []error) {
|
|||
|
||||
rgs, errs := rulefmt.ParseFile(filename)
|
||||
if errs != nil {
|
||||
return 0, errs
|
||||
return successExitCode, errs
|
||||
}
|
||||
|
||||
numRules := 0
|
||||
|
@ -622,12 +638,14 @@ $ curl -s http://localhost:9090/metrics | promtool check metrics
|
|||
`)
|
||||
|
||||
// CheckMetrics performs a linting pass on input metrics.
|
||||
func CheckMetrics() int {
|
||||
l := promlint.New(os.Stdin)
|
||||
func CheckMetrics(extended bool) int {
|
||||
var buf bytes.Buffer
|
||||
tee := io.TeeReader(os.Stdin, &buf)
|
||||
l := promlint.New(tee)
|
||||
problems, err := l.Lint()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error while linting:", err)
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
for _, p := range problems {
|
||||
|
@ -635,10 +653,71 @@ func CheckMetrics() int {
|
|||
}
|
||||
|
||||
if len(problems) > 0 {
|
||||
return 3
|
||||
return lintErrExitCode
|
||||
}
|
||||
|
||||
return 0
|
||||
if extended {
|
||||
stats, total, err := checkMetricsExtended(&buf)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return failureExitCode
|
||||
}
|
||||
w := tabwriter.NewWriter(os.Stdout, 4, 4, 4, ' ', tabwriter.TabIndent)
|
||||
fmt.Fprintf(w, "Metric\tCardinality\tPercentage\t\n")
|
||||
for _, stat := range stats {
|
||||
fmt.Fprintf(w, "%s\t%d\t%.2f%%\t\n", stat.name, stat.cardinality, stat.percentage*100)
|
||||
}
|
||||
fmt.Fprintf(w, "Total\t%d\t%.f%%\t\n", total, 100.)
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
type metricStat struct {
|
||||
name string
|
||||
cardinality int
|
||||
percentage float64
|
||||
}
|
||||
|
||||
func checkMetricsExtended(r io.Reader) ([]metricStat, int, error) {
|
||||
p := expfmt.TextParser{}
|
||||
metricFamilies, err := p.TextToMetricFamilies(r)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("error while parsing text to metric families: %w", err)
|
||||
}
|
||||
|
||||
var total int
|
||||
stats := make([]metricStat, 0, len(metricFamilies))
|
||||
for _, mf := range metricFamilies {
|
||||
var cardinality int
|
||||
switch mf.GetType() {
|
||||
case dto.MetricType_COUNTER, dto.MetricType_GAUGE, dto.MetricType_UNTYPED:
|
||||
cardinality = len(mf.Metric)
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
// Histogram metrics includes sum, count, buckets.
|
||||
buckets := len(mf.Metric[0].Histogram.Bucket)
|
||||
cardinality = len(mf.Metric) * (2 + buckets)
|
||||
case dto.MetricType_SUMMARY:
|
||||
// Summary metrics includes sum, count, quantiles.
|
||||
quantiles := len(mf.Metric[0].Summary.Quantile)
|
||||
cardinality = len(mf.Metric) * (2 + quantiles)
|
||||
default:
|
||||
cardinality = len(mf.Metric)
|
||||
}
|
||||
stats = append(stats, metricStat{name: mf.GetName(), cardinality: cardinality})
|
||||
total += cardinality
|
||||
}
|
||||
|
||||
for i := range stats {
|
||||
stats[i].percentage = float64(stats[i].cardinality) / float64(total)
|
||||
}
|
||||
|
||||
sort.SliceStable(stats, func(i, j int) bool {
|
||||
return stats[i].cardinality > stats[j].cardinality
|
||||
})
|
||||
|
||||
return stats, total, nil
|
||||
}
|
||||
|
||||
// QueryInstant performs an instant query against a Prometheus server.
|
||||
|
@ -654,7 +733,7 @@ func QueryInstant(url *url.URL, query, evalTime string, p printer) int {
|
|||
c, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
eTime := time.Now()
|
||||
|
@ -662,7 +741,7 @@ func QueryInstant(url *url.URL, query, evalTime string, p printer) int {
|
|||
eTime, err = parseTime(evalTime)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error parsing evaluation time:", err)
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -678,7 +757,7 @@ func QueryInstant(url *url.URL, query, evalTime string, p printer) int {
|
|||
|
||||
p.printValue(val)
|
||||
|
||||
return 0
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
// QueryRange performs a range query against a Prometheus server.
|
||||
|
@ -703,7 +782,7 @@ func QueryRange(url *url.URL, headers map[string]string, query, start, end strin
|
|||
c, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
var stime, etime time.Time
|
||||
|
@ -714,7 +793,7 @@ func QueryRange(url *url.URL, headers map[string]string, query, start, end strin
|
|||
etime, err = parseTime(end)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error parsing end time:", err)
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -724,13 +803,13 @@ func QueryRange(url *url.URL, headers map[string]string, query, start, end strin
|
|||
stime, err = parseTime(start)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error parsing start time:", err)
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
}
|
||||
|
||||
if !stime.Before(etime) {
|
||||
fmt.Fprintln(os.Stderr, "start time is not before end time")
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
if step == 0 {
|
||||
|
@ -751,7 +830,7 @@ func QueryRange(url *url.URL, headers map[string]string, query, start, end strin
|
|||
}
|
||||
|
||||
p.printValue(val)
|
||||
return 0
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
// QuerySeries queries for a series against a Prometheus server.
|
||||
|
@ -767,13 +846,13 @@ func QuerySeries(url *url.URL, matchers []string, start, end string, p printer)
|
|||
c, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
stime, etime, err := parseStartTimeAndEndTime(start, end)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
// Run query against client.
|
||||
|
@ -787,7 +866,7 @@ func QuerySeries(url *url.URL, matchers []string, start, end string, p printer)
|
|||
}
|
||||
|
||||
p.printSeries(val)
|
||||
return 0
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
// QueryLabels queries for label values against a Prometheus server.
|
||||
|
@ -803,13 +882,13 @@ func QueryLabels(url *url.URL, name, start, end string, p printer) int {
|
|||
c, err := api.NewClient(config)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
stime, etime, err := parseStartTimeAndEndTime(start, end)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
// Run query against client.
|
||||
|
@ -826,7 +905,7 @@ func QueryLabels(url *url.URL, name, start, end string, p printer) int {
|
|||
}
|
||||
|
||||
p.printLabelValues(val)
|
||||
return 0
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
func handleAPIError(err error) int {
|
||||
|
@ -837,7 +916,7 @@ func handleAPIError(err error) int {
|
|||
fmt.Fprintln(os.Stderr, "query error:", err)
|
||||
}
|
||||
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
func parseStartTimeAndEndTime(start, end string) (time.Time, time.Time, error) {
|
||||
|
@ -929,9 +1008,9 @@ func debugPprof(url string) int {
|
|||
endPointGroups: pprofEndpoints,
|
||||
}); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error completing debug command:", err)
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
return 0
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
func debugMetrics(url string) int {
|
||||
|
@ -941,9 +1020,9 @@ func debugMetrics(url string) int {
|
|||
endPointGroups: metricsEndpoints,
|
||||
}); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error completing debug command:", err)
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
return 0
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
func debugAll(url string) int {
|
||||
|
@ -953,9 +1032,9 @@ func debugAll(url string) int {
|
|||
endPointGroups: allEndpoints,
|
||||
}); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "error completing debug command:", err)
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
return 0
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
type printer interface {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -322,3 +323,39 @@ func TestAuthorizationConfig(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckMetricsExtended(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Skipping on windows")
|
||||
}
|
||||
|
||||
f, err := os.Open("testdata/metrics-test.prom")
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
|
||||
stats, total, err := checkMetricsExtended(f)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 27, total)
|
||||
require.Equal(t, []metricStat{
|
||||
{
|
||||
name: "prometheus_tsdb_compaction_chunk_size_bytes",
|
||||
cardinality: 15,
|
||||
percentage: float64(15) / float64(27),
|
||||
},
|
||||
{
|
||||
name: "go_gc_duration_seconds",
|
||||
cardinality: 7,
|
||||
percentage: float64(7) / float64(27),
|
||||
},
|
||||
{
|
||||
name: "net_conntrack_dialer_conn_attempted_total",
|
||||
cardinality: 4,
|
||||
percentage: float64(4) / float64(27),
|
||||
},
|
||||
{
|
||||
name: "go_info",
|
||||
cardinality: 1,
|
||||
percentage: float64(1) / float64(27),
|
||||
},
|
||||
}, stats)
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration) int {
|
|||
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Cannot load config", err)
|
||||
return 2
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
var scrapeConfig *config.ScrapeConfig
|
||||
|
@ -63,7 +63,7 @@ func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration) int {
|
|||
for _, job := range jobs {
|
||||
fmt.Fprintf(os.Stderr, "\t%s\n", job)
|
||||
}
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
targetGroupChan := make(chan []*targetgroup.Group)
|
||||
|
@ -74,7 +74,7 @@ func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration) int {
|
|||
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger})
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Could not create new discoverer", err)
|
||||
return 2
|
||||
return failureExitCode
|
||||
}
|
||||
go d.Run(ctx, targetGroupChan)
|
||||
}
|
||||
|
@ -100,11 +100,11 @@ outerLoop:
|
|||
res, err := json.MarshalIndent(results, "", " ")
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Could not marshal result json: %s", err)
|
||||
return 2
|
||||
return failureExitCode
|
||||
}
|
||||
|
||||
fmt.Printf("%s", res)
|
||||
return 0
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult {
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
# Evaluate once every 100d to avoid this taking too long.
|
||||
evaluation_interval: 100d
|
||||
|
||||
rule_files:
|
||||
- rules.yml
|
||||
|
||||
tests:
|
||||
- interval: 100d
|
||||
input_series:
|
||||
- series: test
|
||||
# Max time in time.Duration is 106751d from 1970 (2^63/10^9), i.e. 2262.
|
||||
# We use the nearest 100 days to that to ensure the unit tests can fully
|
||||
# cover the expected range.
|
||||
values: '0+1x1067'
|
||||
|
||||
promql_expr_test:
|
||||
- expr: timestamp(test)
|
||||
eval_time: 0m
|
||||
exp_samples:
|
||||
- value: 0
|
||||
- expr: test
|
||||
eval_time: 100d # one evaluation_interval.
|
||||
exp_samples:
|
||||
- labels: test
|
||||
value: 1
|
||||
- expr: timestamp(test)
|
||||
eval_time: 106700d
|
||||
exp_samples:
|
||||
- value: 9218880000 # 106700d -> seconds.
|
||||
- expr: fixed_data
|
||||
eval_time: 106700d
|
||||
exp_samples:
|
||||
- labels: fixed_data
|
||||
value: 1
|
|
@ -0,0 +1,35 @@
|
|||
# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
|
||||
# TYPE go_gc_duration_seconds summary
|
||||
go_gc_duration_seconds{quantile="0"} 2.391e-05
|
||||
go_gc_duration_seconds{quantile="0.25"} 9.4402e-05
|
||||
go_gc_duration_seconds{quantile="0.5"} 0.000118953
|
||||
go_gc_duration_seconds{quantile="0.75"} 0.000145884
|
||||
go_gc_duration_seconds{quantile="1"} 0.005201208
|
||||
go_gc_duration_seconds_sum 0.036134048
|
||||
go_gc_duration_seconds_count 232
|
||||
# HELP prometheus_tsdb_compaction_chunk_size_bytes Final size of chunks on their first compaction
|
||||
# TYPE prometheus_tsdb_compaction_chunk_size_bytes histogram
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="32"} 662
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="48"} 1460
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="72"} 2266
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="108"} 3958
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="162"} 4861
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="243"} 5721
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="364.5"} 10493
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="546.75"} 12464
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="820.125"} 13254
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1230.1875"} 13699
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1845.28125"} 13806
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="2767.921875"} 13852
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="+Inf"} 13867
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_sum 3.886707e+06
|
||||
prometheus_tsdb_compaction_chunk_size_bytes_count 13867
|
||||
# HELP net_conntrack_dialer_conn_attempted_total Total number of connections attempted by the given dialer a given name.
|
||||
# TYPE net_conntrack_dialer_conn_attempted_total counter
|
||||
net_conntrack_dialer_conn_attempted_total{dialer_name="blackbox"} 5210
|
||||
net_conntrack_dialer_conn_attempted_total{dialer_name="default"} 0
|
||||
net_conntrack_dialer_conn_attempted_total{dialer_name="node"} 21
|
||||
net_conntrack_dialer_conn_attempted_total{dialer_name="prometheus"} 21
|
||||
# HELP go_info Information about the Go environment.
|
||||
# TYPE go_info gauge
|
||||
go_info{version="go1.17"} 1
|
|
@ -56,9 +56,9 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, files ...string) int {
|
|||
fmt.Println()
|
||||
}
|
||||
if failed {
|
||||
return 1
|
||||
return failureExitCode
|
||||
}
|
||||
return 0
|
||||
return successExitCode
|
||||
}
|
||||
|
||||
func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts) []error {
|
||||
|
|
|
@ -36,6 +36,13 @@ func TestRulesUnitTest(t *testing.T) {
|
|||
},
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "Long evaluation interval",
|
||||
args: args{
|
||||
files: []string{"./testdata/long-period.yml"},
|
||||
},
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "Bad input series",
|
||||
args: args{
|
||||
|
|
|
@ -668,13 +668,16 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
|||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
|
||||
var totalUpdatesCount int
|
||||
provUpdates := make(chan []*targetgroup.Group)
|
||||
for _, up := range tc.updates {
|
||||
go newMockDiscoveryProvider(up...).Run(ctx, provUpdates)
|
||||
if len(up) > 0 {
|
||||
totalUpdatesCount += len(up)
|
||||
}
|
||||
}
|
||||
provUpdates := make(chan []*targetgroup.Group, totalUpdatesCount)
|
||||
|
||||
for _, up := range tc.updates {
|
||||
go newMockDiscoveryProvider(up...).Run(ctx, provUpdates)
|
||||
}
|
||||
|
||||
for x := 0; x < totalUpdatesCount; x++ {
|
||||
select {
|
||||
|
|
|
@ -668,13 +668,16 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
|||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
|
||||
var totalUpdatesCount int
|
||||
provUpdates := make(chan []*targetgroup.Group)
|
||||
for _, up := range tc.updates {
|
||||
go newMockDiscoveryProvider(up...).Run(ctx, provUpdates)
|
||||
if len(up) > 0 {
|
||||
totalUpdatesCount += len(up)
|
||||
}
|
||||
}
|
||||
provUpdates := make(chan []*targetgroup.Group, totalUpdatesCount)
|
||||
|
||||
for _, up := range tc.updates {
|
||||
go newMockDiscoveryProvider(up...).Run(ctx, provUpdates)
|
||||
}
|
||||
|
||||
for x := 0; x < totalUpdatesCount; x++ {
|
||||
select {
|
||||
|
|
|
@ -11,13 +11,6 @@ Their behaviour can change in future releases which will be communicated via the
|
|||
You can enable them using the `--enable-feature` flag with a comma separated list of features.
|
||||
They may be enabled by default in future versions.
|
||||
|
||||
## `@` Modifier in PromQL
|
||||
|
||||
`--enable-feature=promql-at-modifier`
|
||||
|
||||
The `@` modifier lets you specify the evaluation time for instant vector selectors,
|
||||
range vector selectors, and subqueries. More details can be found [here](querying/basics.md#modifier).
|
||||
|
||||
## Expand environment variables in external labels
|
||||
|
||||
`--enable-feature=expand-external-labels`
|
||||
|
@ -26,26 +19,14 @@ Replace `${var}` or `$var` in the [`external_labels`](configuration/configuratio
|
|||
values according to the values of the current environment variables. References
|
||||
to undefined variables are replaced by the empty string.
|
||||
|
||||
## Negative offset in PromQL
|
||||
|
||||
This negative offset is disabled by default since it breaks the invariant
|
||||
that PromQL does not look ahead of the evaluation time for samples.
|
||||
|
||||
`--enable-feature=promql-negative-offset`
|
||||
|
||||
In contrast to the positive offset modifier, the negative offset modifier lets
|
||||
one shift a vector selector into the future. An example in which one may want
|
||||
to use a negative offset is reviewing past data and making temporal comparisons
|
||||
with more recent data.
|
||||
|
||||
More details can be found [here](querying/basics.md#offset-modifier).
|
||||
|
||||
## Remote Write Receiver
|
||||
|
||||
`--enable-feature=remote-write-receiver`
|
||||
|
||||
The remote write receiver allows Prometheus to accept remote write requests from other Prometheus servers. More details can be found [here](storage.md#overview).
|
||||
|
||||
Activating the remote write receiver via a feature flag is deprecated. Use `--web.enable-remote-write-receiver` instead. This feature flag will be ignored in future versions of Prometheus.
|
||||
|
||||
## Exemplars storage
|
||||
|
||||
`--enable-feature=exemplar-storage`
|
||||
|
|
|
@ -81,7 +81,7 @@ navigating to its metrics endpoint:
|
|||
|
||||
Let us explore data that Prometheus has collected about itself. To
|
||||
use Prometheus's built-in expression browser, navigate to
|
||||
http://localhost:9090/graph and choose the "Console" view within the "Graph" tab.
|
||||
http://localhost:9090/graph and choose the "Table" view within the "Graph" tab.
|
||||
|
||||
As you can gather from [localhost:9090/metrics](http://localhost:9090/metrics),
|
||||
one metric that Prometheus exports about itself is named
|
||||
|
|
|
@ -620,6 +620,7 @@ $ curl http://localhost:9090/api/v1/rules
|
|||
],
|
||||
"file": "/rules.yaml",
|
||||
"interval": 60,
|
||||
"limit": 0,
|
||||
"name": "example"
|
||||
}
|
||||
]
|
||||
|
@ -1145,3 +1146,17 @@ $ curl -XPOST http://localhost:9090/api/v1/admin/tsdb/clean_tombstones
|
|||
```
|
||||
|
||||
*New in v2.1 and supports PUT from v2.9*
|
||||
|
||||
## Remote Write Receiver
|
||||
|
||||
Prometheus can be configured as a receiver for the Prometheus remote write
|
||||
protocol. This is not considered an efficient way of ingesting samples. Use it
|
||||
with caution for specific low-volume use cases. It is not suitable for
|
||||
replacing the ingestion via scraping and turning Prometheus into a push-based
|
||||
metrics collection system.
|
||||
|
||||
Enable the remote write receiver by setting
|
||||
`--web.enable-remote-write-receiver`. When enabled, the remote write receiver
|
||||
endpoint is `/api/v1/write`. Find more details [here](../storage.md#overview).
|
||||
|
||||
*New in v2.33*
|
||||
|
|
|
@ -104,14 +104,15 @@ against regular expressions. The following label matching operators exist:
|
|||
* `=~`: Select labels that regex-match the provided string.
|
||||
* `!~`: Select labels that do not regex-match the provided string.
|
||||
|
||||
Regex matches are fully anchored. A match of `env=~"foo"` is treated as `env=~"^foo$"`.
|
||||
|
||||
For example, this selects all `http_requests_total` time series for `staging`,
|
||||
`testing`, and `development` environments and HTTP methods other than `GET`.
|
||||
|
||||
http_requests_total{environment=~"staging|testing|development",method!="GET"}
|
||||
|
||||
Label matchers that match empty label values also select all time series that
|
||||
do not have the specific label set at all. Regex-matches are fully anchored. It
|
||||
is possible to have multiple matchers for the same label name.
|
||||
do not have the specific label set at all. It is possible to have multiple matchers for the same label name.
|
||||
|
||||
Vector selectors must either specify a name or at least one label matcher
|
||||
that does not match the empty string. The following expression is illegal:
|
||||
|
@ -209,9 +210,7 @@ can be specified:
|
|||
|
||||
rate(http_requests_total[5m] offset -1w)
|
||||
|
||||
This feature is enabled by setting `--enable-feature=promql-negative-offset`
|
||||
flag. See [feature flags](../feature_flags.md) for more details about
|
||||
this flag.
|
||||
Note that this allows a query to look ahead of its evaluation time.
|
||||
|
||||
### @ modifier
|
||||
|
||||
|
@ -249,10 +248,6 @@ These 2 queries will produce the same result.
|
|||
# offset before @
|
||||
http_requests_total offset 5m @ 1609746000
|
||||
|
||||
This modifier is disabled by default since it breaks the invariant that PromQL
|
||||
does not look ahead of the evaluation time for samples. It can be enabled by setting
|
||||
`--enable-feature=promql-at-modifier` flag. See [feature flags](../feature_flags.md) for more details about this flag.
|
||||
|
||||
Additionally, `start()` and `end()` can also be used as values for the `@` modifier as special values.
|
||||
|
||||
For a range query, they resolve to the start and end of the range query respectively and remain the same for all steps.
|
||||
|
@ -262,6 +257,8 @@ For an instant query, `start()` and `end()` both resolve to the evaluation time.
|
|||
http_requests_total @ start()
|
||||
rate(http_requests_total[5m] @ end())
|
||||
|
||||
Note that the `@` modifier allows a query to look ahead of its evaluation time.
|
||||
|
||||
## Subquery
|
||||
|
||||
Subquery allows you to run an instant query for a given range and resolution. The result of a subquery is a range vector.
|
||||
|
|
|
@ -82,7 +82,7 @@ Prometheus has several flags that configure local storage. The most important ar
|
|||
|
||||
* `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`.
|
||||
* `--storage.tsdb.retention.time`: When to remove old data. Defaults to `15d`. Overrides `storage.tsdb.retention` if this flag is set to anything other than default.
|
||||
* `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. The oldest data will be removed first. Defaults to `0` or disabled. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Only the persistent blocks are deleted to honor this retention although WAL and m-mapped chunks are counted in the total size. So the minimum requirement for the disk is the peak space taken by the `wal` (the WAL and Checkpoint) and `chunks_head` (m-mapped Head chunks) directory combined (peaks every 2 hours).
|
||||
* `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain. The oldest data will be removed first. Defaults to `0` or disabled. Units supported: B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only the persistent blocks are deleted to honor this retention although WAL and m-mapped chunks are counted in the total size. So the minimum requirement for the disk is the peak space taken by the `wal` (the WAL and Checkpoint) and `chunks_head` (m-mapped Head chunks) directory combined (peaks every 2 hours).
|
||||
* `--storage.tsdb.retention`: Deprecated in favor of `storage.tsdb.retention.time`.
|
||||
* `--storage.tsdb.wal-compression`: Enables compression of the write-ahead log (WAL). Depending on your data, you can expect the WAL size to be halved with little extra cpu load. This flag was introduced in 2.11.0 and enabled by default in 2.20.0. Note that once enabled, downgrading Prometheus to a version below 2.11.0 will require deleting the WAL.
|
||||
|
||||
|
@ -129,7 +129,7 @@ The read and write protocols both use a snappy-compressed protocol buffer encodi
|
|||
|
||||
For details on configuring remote storage integrations in Prometheus, see the [remote write](configuration/configuration.md#remote_write) and [remote read](configuration/configuration.md#remote_read) sections of the Prometheus configuration documentation.
|
||||
|
||||
The built-in remote write receiver can be enabled by setting the `--enable-feature=remote-write-receiver` command line flag. When enabled, the remote write receiver endpoint is `/api/v1/write`.
|
||||
The built-in remote write receiver can be enabled by setting the `--web.enable-remote-write-receiver` command line flag. When enabled, the remote write receiver endpoint is `/api/v1/write`.
|
||||
|
||||
For details on the request and response messages, see the [remote storage protocol buffer definitions](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto).
|
||||
|
||||
|
|
32
go.mod
32
go.mod
|
@ -1,15 +1,15 @@
|
|||
module github.com/prometheus/prometheus
|
||||
|
||||
go 1.14
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go v60.1.0+incompatible
|
||||
github.com/Azure/azure-sdk-for-go v61.1.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.11.23
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.17
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a
|
||||
github.com/aws/aws-sdk-go v1.42.24
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
|
||||
github.com/aws/aws-sdk-go v1.42.31
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/containerd/containerd v1.5.7 // indirect
|
||||
github.com/dennwc/varint v1.0.0
|
||||
|
@ -27,7 +27,7 @@ require (
|
|||
github.com/go-zookeeper/zk v1.0.2
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/google/pprof v0.0.0-20211122183932-1daafda22083
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd
|
||||
github.com/gophercloud/gophercloud v0.24.0
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.12.0
|
||||
|
@ -36,7 +36,7 @@ require (
|
|||
github.com/json-iterator/go v1.1.12
|
||||
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
|
||||
github.com/linode/linodego v1.2.1
|
||||
github.com/miekg/dns v1.1.44
|
||||
github.com/miekg/dns v1.1.45
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
|
@ -55,27 +55,27 @@ require (
|
|||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
|
||||
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/uber/jaeger-client-go v2.29.1+incompatible
|
||||
github.com/uber/jaeger-client-go v2.30.0+incompatible
|
||||
github.com/uber/jaeger-lib v2.4.1+incompatible
|
||||
go.uber.org/atomic v1.9.0
|
||||
go.uber.org/goleak v1.1.12
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63
|
||||
golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11
|
||||
golang.org/x/tools v0.1.8
|
||||
google.golang.org/api v0.63.0
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa
|
||||
golang.org/x/tools v0.1.9-0.20211209172050-90a85b2969be
|
||||
google.golang.org/api v0.64.0
|
||||
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb
|
||||
google.golang.org/protobuf v1.27.1
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
k8s.io/api v0.23.0
|
||||
k8s.io/apimachinery v0.23.0
|
||||
k8s.io/client-go v0.23.0
|
||||
k8s.io/api v0.22.4
|
||||
k8s.io/apimachinery v0.22.4
|
||||
k8s.io/client-go v0.22.4
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.30.0
|
||||
k8s.io/klog/v2 v2.40.1
|
||||
)
|
||||
|
||||
replace (
|
||||
|
|
88
go.sum
88
go.sum
|
@ -52,8 +52,8 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
|||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v60.1.0+incompatible h1:j6y8ddurcaiyLfwBwPmJFaunp6BDzyQTuAgMrm1r++o=
|
||||
github.com/Azure/azure-sdk-for-go v60.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v61.1.0+incompatible h1:Qbz3jdfkXIPjZECEuk2E7i3iLhC9Ul74pG5mQRQC+z4=
|
||||
github.com/Azure/azure-sdk-for-go v61.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
|
@ -74,8 +74,8 @@ github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQW
|
|||
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.17 h1:esOPl2dhcz9P3jqBSJ8tPGEj2EqzPPT6zfyuloiogKY=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.3/go.mod h1:4bJZhUhcq8LB20TruwHbAQsmUs2Xh+QR7utuJpLXX3A=
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
|
@ -154,8 +154,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
|
|||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc=
|
||||
github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
|
||||
|
@ -188,8 +188,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
|
|||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.42.24 h1:pDUeL5+HaEK+CBKsnzmjZCpLmfRek9JLMM/KhjiQorU=
|
||||
github.com/aws/aws-sdk-go v1.42.24/go.mod h1:gyRszuZ/icHmHAVE4gc/r+cfCmhA1AD+vqfWbgI+eHs=
|
||||
github.com/aws/aws-sdk-go v1.42.31 h1:tSv/YzjrFlbSqWmov9quBxrSNXLPUjJI7nPEB57S1+M=
|
||||
github.com/aws/aws-sdk-go v1.42.31/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
|
||||
github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps=
|
||||
|
@ -349,7 +349,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
|
|||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
|
@ -424,8 +423,8 @@ github.com/envoyproxy/protoc-gen-validate v0.6.2 h1:JiO+kJTpmYGjEodY7O1Zk8oZcNz1
|
|||
github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
|
@ -443,7 +442,6 @@ github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5
|
|||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
||||
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
|
||||
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
|
@ -721,8 +719,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20211122183932-1daafda22083 h1:c8EUapQFi+kjzedr4c6WqbwMdmB95+oDBWZ5XFHFYxY=
|
||||
github.com/google/pprof v0.0.0-20211122183932-1daafda22083/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -975,8 +973,8 @@ github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju
|
|||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.44 h1:4rpqcegYPVkvIeVhITrKP1sRR3KjfRc1nrOPMUZmLyc=
|
||||
github.com/miekg/dns v1.1.44/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/miekg/dns v1.1.45 h1:g5fRIhm9nx7g8osrAvgb16QJfmyMsyOCb+J7LSv+Qzk=
|
||||
github.com/miekg/dns v1.1.45/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE=
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||
|
@ -1293,8 +1291,8 @@ github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyu
|
|||
github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E=
|
||||
github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-client-go v2.29.1+incompatible h1:R9ec3zO3sGpzs0abd43Y+fBZRJ9uiH6lXyR/+u6brW4=
|
||||
github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
|
||||
github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
|
||||
github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
|
@ -1419,9 +1417,9 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e h1:MUP6MR3rJ7Gk9LEia0LP2ytiH6MuCfs7qYz+47jGdD8=
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
|
@ -1437,7 +1435,6 @@ golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
|
@ -1535,11 +1532,11 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98 h1:+6WJMRLHlD7X7frgp7TUZ36RnQzSf9wVVTNakEp+nqY=
|
||||
golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1678,18 +1675,16 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1771,7 +1766,6 @@ golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjs
|
|||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200422205258-72e4a01eba43/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
|
@ -1791,8 +1785,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w=
|
||||
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.9-0.20211209172050-90a85b2969be h1:JRBiPXZpZ1FsceyPRRme0vX394zXC3xlhqu705k9nzM=
|
||||
golang.org/x/tools v0.1.9-0.20211209172050-90a85b2969be/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1801,10 +1795,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
|
|||
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
|
||||
gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM=
|
||||
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
|
||||
gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
|
||||
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
|
@ -1837,8 +1829,8 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv
|
|||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
||||
google.golang.org/api v0.63.0 h1:n2bqqK895ygnBpdPDYetfy23K7fJ22wsrZKCyfuRkkA=
|
||||
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
|
||||
google.golang.org/api v0.64.0 h1:l3pi8ncrQgB9+ncFw3A716L8lWujnXniBYbxWqqy6tE=
|
||||
google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1914,8 +1906,8 @@ google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEc
|
|||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb h1:ZrsicilzPCS/Xr8qtBZZLpy4P9TYXAfl49ctG1/5tgw=
|
||||
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
|
@ -2026,14 +2018,14 @@ k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY=
|
|||
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
|
||||
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
|
||||
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
|
||||
k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro=
|
||||
k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg=
|
||||
k8s.io/api v0.22.4 h1:UvyHW0ezB2oIgHAxlYoo6UJQObYXU7awuNarwoHEOjw=
|
||||
k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk=
|
||||
k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0=
|
||||
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
|
||||
k8s.io/apimachinery v0.23.0 h1:mIfWRMjBuMdolAWJ3Fd+aPTMv3X9z+waiARMpvvb0HQ=
|
||||
k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc=
|
||||
k8s.io/apimachinery v0.22.4 h1:9uwcvPpukBw/Ri0EUmWz+49cnFtaoiyEhQTK+xOe7Ck=
|
||||
k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0=
|
||||
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
|
||||
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
|
||||
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
|
||||
|
@ -2041,8 +2033,8 @@ k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo=
|
|||
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
|
||||
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
|
||||
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
|
||||
k8s.io/client-go v0.23.0 h1:vcsOqyPq7XV3QmQRCBH/t9BICJM9Q1M18qahjv+rebY=
|
||||
k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA=
|
||||
k8s.io/client-go v0.22.4 h1:aAQ1Wk+I3bjCNk35YWUqbaueqrIonkfDPJSPDDe8Kfg=
|
||||
k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA=
|
||||
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
|
||||
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
|
||||
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
|
||||
|
@ -2052,26 +2044,22 @@ k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
|
|||
k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80=
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs=
|
||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
|
|
|
@ -222,10 +222,17 @@ type EngineOpts struct {
|
|||
// a subquery in milliseconds if no step in range vector was specified `[30m:<step>]`.
|
||||
NoStepSubqueryIntervalFn func(rangeMillis int64) int64
|
||||
|
||||
// EnableAtModifier if true enables @ modifier. Disabled otherwise.
|
||||
// EnableAtModifier if true enables @ modifier. Disabled otherwise. This
|
||||
// is supposed to be enabled for regular PromQL (as of Prometheus v2.33)
|
||||
// but the option to disable it is still provided here for those using
|
||||
// the Engine outside of Prometheus.
|
||||
EnableAtModifier bool
|
||||
|
||||
// EnableNegativeOffset if true enables negative (-) offset values. Disabled otherwise.
|
||||
// EnableNegativeOffset if true enables negative (-) offset
|
||||
// values. Disabled otherwise. This is supposed to be enabled for
|
||||
// regular PromQL (as of Prometheus v2.33) but the option to disable it
|
||||
// is still provided here for those using the Engine outside of
|
||||
// Prometheus.
|
||||
EnableNegativeOffset bool
|
||||
}
|
||||
|
||||
|
|
|
@ -613,6 +613,7 @@ func (t *Test) clear() {
|
|||
Timeout: 100 * time.Second,
|
||||
NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(1 * time.Minute) },
|
||||
EnableAtModifier: true,
|
||||
EnableNegativeOffset: true,
|
||||
}
|
||||
|
||||
t.queryEngine = NewEngine(opts)
|
||||
|
@ -680,7 +681,9 @@ type LazyLoader struct {
|
|||
|
||||
// LazyLoaderOpts are options for the lazy loader.
|
||||
type LazyLoaderOpts struct {
|
||||
// Disabled PromQL engine features.
|
||||
// Both of these must be set to true for regular PromQL (as of
|
||||
// Prometheus v2.33). They can still be disabled here for legacy and
|
||||
// other uses.
|
||||
EnableAtModifier, EnableNegativeOffset bool
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,14 @@ eval instant at 10s metric offset 50s @ 100
|
|||
metric{job="1"} 5
|
||||
metric{job="2"} 10
|
||||
|
||||
eval instant at 10s metric @ 0 offset -50s
|
||||
metric{job="1"} 5
|
||||
metric{job="2"} 10
|
||||
|
||||
eval instant at 10s metric offset -50s @ 0
|
||||
metric{job="1"} 5
|
||||
metric{job="2"} 10
|
||||
|
||||
eval instant at 10s -metric @ 100
|
||||
{job="1"} -10
|
||||
{job="2"} -20
|
||||
|
|
|
@ -29,6 +29,12 @@ eval instant at 18000s rate(http_requests{instance!="3"}[1m] offset 10000s)
|
|||
{job="api-server", instance="0", group="canary"} 3
|
||||
{job="api-server", instance="1", group="canary"} 4
|
||||
|
||||
eval instant at 4000s rate(http_requests{instance!="3"}[1m] offset -4000s)
|
||||
{job="api-server", instance="0", group="production"} 1
|
||||
{job="api-server", instance="1", group="production"} 2
|
||||
{job="api-server", instance="0", group="canary"} 3
|
||||
{job="api-server", instance="1", group="canary"} 4
|
||||
|
||||
eval instant at 18000s rate(http_requests[40s]) - rate(http_requests[1m] offset 10000s)
|
||||
{job="api-server", instance="0", group="production"} 2
|
||||
{job="api-server", instance="1", group="production"} 1
|
||||
|
|
|
@ -15,6 +15,7 @@ package remote
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
@ -830,14 +831,12 @@ func (t *QueueManager) calculateDesiredShards() int {
|
|||
return t.numShards
|
||||
}
|
||||
|
||||
// When behind we will try to catch up on a proporation of samples per tick.
|
||||
// This works similarly to an integral accumulator in that pending samples
|
||||
// is the result of the error integral.
|
||||
const integralGain = 0.1 / float64(shardUpdateDuration/time.Second)
|
||||
|
||||
var (
|
||||
// When behind we will try to catch up on 5% of samples per second.
|
||||
backlogCatchup = 0.05 * dataPending
|
||||
// Calculate Time to send one sample, averaged across all sends done this tick.
|
||||
timePerSample = dataOutDuration / dataOutRate
|
||||
desiredShards = timePerSample * (dataInRate*dataKeptRatio + integralGain*dataPending)
|
||||
desiredShards = timePerSample * (dataInRate*dataKeptRatio + backlogCatchup)
|
||||
)
|
||||
t.metrics.desiredNumShards.Set(desiredShards)
|
||||
level.Debug(t.logger).Log("msg", "QueueManager.calculateDesiredShards",
|
||||
|
@ -860,11 +859,13 @@ func (t *QueueManager) calculateDesiredShards() int {
|
|||
)
|
||||
level.Debug(t.logger).Log("msg", "QueueManager.updateShardsLoop",
|
||||
"lowerBound", lowerBound, "desiredShards", desiredShards, "upperBound", upperBound)
|
||||
|
||||
desiredShards = math.Ceil(desiredShards) // Round up to be on the safe side.
|
||||
if lowerBound <= desiredShards && desiredShards <= upperBound {
|
||||
return t.numShards
|
||||
}
|
||||
|
||||
numShards := int(math.Ceil(desiredShards))
|
||||
numShards := int(desiredShards)
|
||||
// Do not downshard if we are more than ten seconds back.
|
||||
if numShards < t.numShards && delay > 10.0 {
|
||||
level.Debug(t.logger).Log("msg", "Not downsharding due to being too far behind")
|
||||
|
@ -1311,12 +1312,16 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
|||
}
|
||||
|
||||
err = sendWriteRequestWithBackoff(ctx, s.qm.cfg, s.qm.logger, attemptStore, onRetry)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
// When there is resharding, we cancel the context for this queue, which means the data is not sent.
|
||||
// So we exit early to not update the metrics.
|
||||
return err
|
||||
}
|
||||
|
||||
s.qm.metrics.sentBytesTotal.Add(float64(reqSize))
|
||||
s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000))
|
||||
return nil
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l log.Logger, attempt func(int) error, onRetry func()) error {
|
||||
|
|
|
@ -356,6 +356,7 @@ func TestReshardRaceWithStop(t *testing.T) {
|
|||
|
||||
cfg := config.DefaultQueueConfig
|
||||
mcfg := config.DefaultMetadataConfig
|
||||
exitCh := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
metrics := newQueueManagerMetrics(nil, "", "")
|
||||
|
@ -364,6 +365,12 @@ func TestReshardRaceWithStop(t *testing.T) {
|
|||
h.Unlock()
|
||||
h.Lock()
|
||||
m.Stop()
|
||||
|
||||
select {
|
||||
case exitCh <- struct{}{}:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -372,6 +379,7 @@ func TestReshardRaceWithStop(t *testing.T) {
|
|||
m.reshardChan <- i
|
||||
h.Unlock()
|
||||
}
|
||||
<-exitCh
|
||||
}
|
||||
|
||||
func TestReleaseNoninternedString(t *testing.T) {
|
||||
|
@ -900,6 +908,189 @@ func TestCalculateDesiredShards(t *testing.T) {
|
|||
require.Equal(t, int64(0), pendingSamples, "Remote write never caught up, there are still %d pending samples.", pendingSamples)
|
||||
}
|
||||
|
||||
func TestCalculateDesiredShardsDetail(t *testing.T) {
|
||||
c := NewTestWriteClient()
|
||||
cfg := config.DefaultQueueConfig
|
||||
mcfg := config.DefaultMetadataConfig
|
||||
|
||||
dir, err := ioutil.TempDir("", "TestCalculateDesiredShards")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, os.RemoveAll(dir))
|
||||
}()
|
||||
|
||||
metrics := newQueueManagerMetrics(nil, "", "")
|
||||
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
|
||||
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
prevShards int
|
||||
dataIn int64 // Quantities normalised to seconds.
|
||||
dataOut int64
|
||||
dataDropped int64
|
||||
dataOutDuration float64
|
||||
backlog float64
|
||||
expectedShards int
|
||||
}{
|
||||
{
|
||||
name: "nothing in or out 1",
|
||||
prevShards: 1,
|
||||
expectedShards: 1, // Shards stays the same.
|
||||
},
|
||||
{
|
||||
name: "nothing in or out 10",
|
||||
prevShards: 10,
|
||||
expectedShards: 10, // Shards stays the same.
|
||||
},
|
||||
{
|
||||
name: "steady throughput",
|
||||
prevShards: 1,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 1,
|
||||
expectedShards: 1,
|
||||
},
|
||||
{
|
||||
name: "scale down",
|
||||
prevShards: 10,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 5,
|
||||
expectedShards: 5,
|
||||
},
|
||||
{
|
||||
name: "scale down constrained",
|
||||
prevShards: 7,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 5,
|
||||
expectedShards: 7,
|
||||
},
|
||||
{
|
||||
name: "scale up",
|
||||
prevShards: 1,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 10,
|
||||
expectedShards: 10,
|
||||
},
|
||||
{
|
||||
name: "scale up constrained",
|
||||
prevShards: 8,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 10,
|
||||
expectedShards: 8,
|
||||
},
|
||||
{
|
||||
name: "backlogged 20s",
|
||||
prevShards: 2,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 2,
|
||||
backlog: 20,
|
||||
expectedShards: 4,
|
||||
},
|
||||
{
|
||||
name: "backlogged 90s",
|
||||
prevShards: 4,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 4,
|
||||
backlog: 90,
|
||||
expectedShards: 22,
|
||||
},
|
||||
{
|
||||
name: "backlog reduced",
|
||||
prevShards: 22,
|
||||
dataIn: 10,
|
||||
dataOut: 20,
|
||||
dataOutDuration: 4,
|
||||
backlog: 10,
|
||||
expectedShards: 3,
|
||||
},
|
||||
{
|
||||
name: "backlog eliminated",
|
||||
prevShards: 3,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 2,
|
||||
backlog: 0,
|
||||
expectedShards: 2, // Shard back down.
|
||||
},
|
||||
{
|
||||
name: "slight slowdown",
|
||||
prevShards: 1,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 1.2,
|
||||
expectedShards: 2, // 1.2 is rounded up to 2.
|
||||
},
|
||||
{
|
||||
name: "bigger slowdown",
|
||||
prevShards: 1,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 1.4,
|
||||
expectedShards: 2,
|
||||
},
|
||||
{
|
||||
name: "speed up",
|
||||
prevShards: 2,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 1.2,
|
||||
backlog: 0,
|
||||
expectedShards: 2, // No reaction - 1.2 is rounded up to 2.
|
||||
},
|
||||
{
|
||||
name: "speed up more",
|
||||
prevShards: 2,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 0.9,
|
||||
backlog: 0,
|
||||
expectedShards: 1,
|
||||
},
|
||||
{
|
||||
name: "marginal decision A",
|
||||
prevShards: 3,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 2.01,
|
||||
backlog: 0,
|
||||
expectedShards: 3, // 2.01 rounds up to 3.
|
||||
},
|
||||
{
|
||||
name: "marginal decision B",
|
||||
prevShards: 3,
|
||||
dataIn: 10,
|
||||
dataOut: 10,
|
||||
dataOutDuration: 1.99,
|
||||
backlog: 0,
|
||||
expectedShards: 2, // 1.99 rounds up to 2.
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
m.numShards = tc.prevShards
|
||||
forceEMWA(samplesIn, tc.dataIn*int64(shardUpdateDuration/time.Second))
|
||||
samplesIn.tick()
|
||||
forceEMWA(m.dataOut, tc.dataOut*int64(shardUpdateDuration/time.Second))
|
||||
forceEMWA(m.dataDropped, tc.dataDropped*int64(shardUpdateDuration/time.Second))
|
||||
forceEMWA(m.dataOutDuration, int64(tc.dataOutDuration*float64(shardUpdateDuration)))
|
||||
m.highestRecvTimestamp.value = tc.backlog // Not Set() because it can only increase value.
|
||||
|
||||
require.Equal(t, tc.expectedShards, m.calculateDesiredShards())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func forceEMWA(r *ewmaRate, rate int64) {
|
||||
r.init = false
|
||||
r.newEvents.Store(rate)
|
||||
}
|
||||
|
||||
func TestQueueManagerMetrics(t *testing.T) {
|
||||
reg := prometheus.NewPedanticRegistry()
|
||||
metrics := newQueueManagerMetrics(reg, "name", "http://localhost:1234")
|
||||
|
|
|
@ -0,0 +1,184 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package chunks
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
)
|
||||
|
||||
type chunkWriteJob struct {
|
||||
cutFile bool
|
||||
seriesRef HeadSeriesRef
|
||||
mint int64
|
||||
maxt int64
|
||||
chk chunkenc.Chunk
|
||||
ref ChunkDiskMapperRef
|
||||
callback func(error)
|
||||
}
|
||||
|
||||
// chunkWriteQueue is a queue for writing chunks to disk in a non-blocking fashion.
|
||||
// Chunks that shall be written get added to the queue, which is consumed asynchronously.
|
||||
// Adding jobs to the job is non-blocking as long as the queue isn't full.
|
||||
type chunkWriteQueue struct {
|
||||
jobs chan chunkWriteJob
|
||||
|
||||
chunkRefMapMtx sync.RWMutex
|
||||
chunkRefMap map[ChunkDiskMapperRef]chunkenc.Chunk
|
||||
|
||||
isRunningMtx sync.Mutex // Protects the isRunning property.
|
||||
isRunning bool // Used to prevent that new jobs get added to the queue when the chan is already closed.
|
||||
|
||||
workerWg sync.WaitGroup
|
||||
|
||||
writeChunk writeChunkF
|
||||
|
||||
// Keeping three separate counters instead of only a single CounterVec to improve the performance of the critical
|
||||
// addJob() method which otherwise would need to perform a WithLabelValues call on the CounterVec.
|
||||
adds prometheus.Counter
|
||||
gets prometheus.Counter
|
||||
completed prometheus.Counter
|
||||
}
|
||||
|
||||
// writeChunkF is a function which writes chunks, it is dynamic to allow mocking in tests.
|
||||
type writeChunkF func(HeadSeriesRef, int64, int64, chunkenc.Chunk, ChunkDiskMapperRef, bool) error
|
||||
|
||||
func newChunkWriteQueue(reg prometheus.Registerer, size int, writeChunk writeChunkF) *chunkWriteQueue {
|
||||
counters := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_chunk_write_queue_operations_total",
|
||||
Help: "Number of operations on the chunk_write_queue.",
|
||||
},
|
||||
[]string{"operation"},
|
||||
)
|
||||
|
||||
q := &chunkWriteQueue{
|
||||
jobs: make(chan chunkWriteJob, size),
|
||||
chunkRefMap: make(map[ChunkDiskMapperRef]chunkenc.Chunk, size),
|
||||
writeChunk: writeChunk,
|
||||
|
||||
adds: counters.WithLabelValues("add"),
|
||||
gets: counters.WithLabelValues("get"),
|
||||
completed: counters.WithLabelValues("complete"),
|
||||
}
|
||||
|
||||
if reg != nil {
|
||||
reg.MustRegister(counters)
|
||||
}
|
||||
|
||||
q.start()
|
||||
return q
|
||||
}
|
||||
|
||||
func (c *chunkWriteQueue) start() {
|
||||
c.workerWg.Add(1)
|
||||
go func() {
|
||||
defer c.workerWg.Done()
|
||||
|
||||
for job := range c.jobs {
|
||||
c.processJob(job)
|
||||
}
|
||||
}()
|
||||
|
||||
c.isRunningMtx.Lock()
|
||||
c.isRunning = true
|
||||
c.isRunningMtx.Unlock()
|
||||
}
|
||||
|
||||
func (c *chunkWriteQueue) processJob(job chunkWriteJob) {
|
||||
err := c.writeChunk(job.seriesRef, job.mint, job.maxt, job.chk, job.ref, job.cutFile)
|
||||
if job.callback != nil {
|
||||
job.callback(err)
|
||||
}
|
||||
|
||||
c.chunkRefMapMtx.Lock()
|
||||
defer c.chunkRefMapMtx.Unlock()
|
||||
|
||||
delete(c.chunkRefMap, job.ref)
|
||||
|
||||
c.completed.Inc()
|
||||
}
|
||||
|
||||
func (c *chunkWriteQueue) addJob(job chunkWriteJob) (err error) {
|
||||
defer func() {
|
||||
if err == nil {
|
||||
c.adds.Inc()
|
||||
}
|
||||
}()
|
||||
|
||||
c.isRunningMtx.Lock()
|
||||
defer c.isRunningMtx.Unlock()
|
||||
|
||||
if !c.isRunning {
|
||||
return errors.New("queue is not started")
|
||||
}
|
||||
|
||||
c.chunkRefMapMtx.Lock()
|
||||
c.chunkRefMap[job.ref] = job.chk
|
||||
c.chunkRefMapMtx.Unlock()
|
||||
|
||||
c.jobs <- job
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *chunkWriteQueue) get(ref ChunkDiskMapperRef) chunkenc.Chunk {
|
||||
c.chunkRefMapMtx.RLock()
|
||||
defer c.chunkRefMapMtx.RUnlock()
|
||||
|
||||
chk, ok := c.chunkRefMap[ref]
|
||||
if ok {
|
||||
c.gets.Inc()
|
||||
}
|
||||
|
||||
return chk
|
||||
}
|
||||
|
||||
func (c *chunkWriteQueue) stop() {
|
||||
c.isRunningMtx.Lock()
|
||||
defer c.isRunningMtx.Unlock()
|
||||
|
||||
if !c.isRunning {
|
||||
return
|
||||
}
|
||||
|
||||
c.isRunning = false
|
||||
|
||||
close(c.jobs)
|
||||
|
||||
c.workerWg.Wait()
|
||||
}
|
||||
|
||||
func (c *chunkWriteQueue) queueIsEmpty() bool {
|
||||
return c.queueSize() == 0
|
||||
}
|
||||
|
||||
func (c *chunkWriteQueue) queueIsFull() bool {
|
||||
// When the queue is full and blocked on the writer the chunkRefMap has one more job than the cap of the jobCh
|
||||
// because one job is currently being processed and blocked in the writer.
|
||||
return c.queueSize() == cap(c.jobs)+1
|
||||
}
|
||||
|
||||
func (c *chunkWriteQueue) queueSize() int {
|
||||
c.chunkRefMapMtx.Lock()
|
||||
defer c.chunkRefMapMtx.Unlock()
|
||||
|
||||
// Looking at chunkRefMap instead of jobCh because the job is popped from the chan before it has
|
||||
// been fully processed, it remains in the chunkRefMap until the processing is complete.
|
||||
return len(c.chunkRefMap)
|
||||
}
|
|
@ -0,0 +1,268 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package chunks
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
)
|
||||
|
||||
func TestChunkWriteQueue_GettingChunkFromQueue(t *testing.T) {
|
||||
var blockWriterWg sync.WaitGroup
|
||||
blockWriterWg.Add(1)
|
||||
|
||||
// blockingChunkWriter blocks until blockWriterWg is done.
|
||||
blockingChunkWriter := func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _ bool) error {
|
||||
blockWriterWg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
q := newChunkWriteQueue(nil, 1000, blockingChunkWriter)
|
||||
|
||||
defer q.stop()
|
||||
defer blockWriterWg.Done()
|
||||
|
||||
testChunk := chunkenc.NewXORChunk()
|
||||
var ref ChunkDiskMapperRef
|
||||
job := chunkWriteJob{
|
||||
chk: testChunk,
|
||||
ref: ref,
|
||||
}
|
||||
require.NoError(t, q.addJob(job))
|
||||
|
||||
// Retrieve chunk from the queue.
|
||||
gotChunk := q.get(ref)
|
||||
require.Equal(t, testChunk, gotChunk)
|
||||
}
|
||||
|
||||
func TestChunkWriteQueue_WritingThroughQueue(t *testing.T) {
|
||||
var (
|
||||
gotSeriesRef HeadSeriesRef
|
||||
gotMint, gotMaxt int64
|
||||
gotChunk chunkenc.Chunk
|
||||
gotRef ChunkDiskMapperRef
|
||||
gotCutFile bool
|
||||
)
|
||||
|
||||
blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, cutFile bool) error {
|
||||
gotSeriesRef = seriesRef
|
||||
gotMint = mint
|
||||
gotMaxt = maxt
|
||||
gotChunk = chunk
|
||||
gotRef = ref
|
||||
gotCutFile = cutFile
|
||||
return nil
|
||||
}
|
||||
|
||||
q := newChunkWriteQueue(nil, 1000, blockingChunkWriter)
|
||||
defer q.stop()
|
||||
|
||||
seriesRef := HeadSeriesRef(1)
|
||||
var mint, maxt int64 = 2, 3
|
||||
chunk := chunkenc.NewXORChunk()
|
||||
ref := newChunkDiskMapperRef(321, 123)
|
||||
cutFile := true
|
||||
awaitCb := make(chan struct{})
|
||||
require.NoError(t, q.addJob(chunkWriteJob{seriesRef: seriesRef, mint: mint, maxt: maxt, chk: chunk, ref: ref, cutFile: cutFile, callback: func(err error) {
|
||||
close(awaitCb)
|
||||
}}))
|
||||
<-awaitCb
|
||||
|
||||
// Compare whether the write function has received all job attributes correctly.
|
||||
require.Equal(t, seriesRef, gotSeriesRef)
|
||||
require.Equal(t, mint, gotMint)
|
||||
require.Equal(t, maxt, gotMaxt)
|
||||
require.Equal(t, chunk, gotChunk)
|
||||
require.Equal(t, ref, gotRef)
|
||||
require.Equal(t, cutFile, gotCutFile)
|
||||
}
|
||||
|
||||
func TestChunkWriteQueue_WrappingAroundSizeLimit(t *testing.T) {
|
||||
sizeLimit := 100
|
||||
unblockChunkWriterCh := make(chan struct{}, sizeLimit)
|
||||
|
||||
// blockingChunkWriter blocks until the unblockChunkWriterCh channel returns a value.
|
||||
blockingChunkWriter := func(seriesRef HeadSeriesRef, mint, maxt int64, chunk chunkenc.Chunk, ref ChunkDiskMapperRef, cutFile bool) error {
|
||||
<-unblockChunkWriterCh
|
||||
return nil
|
||||
}
|
||||
|
||||
q := newChunkWriteQueue(nil, sizeLimit, blockingChunkWriter)
|
||||
defer q.stop()
|
||||
// Unblock writers when shutting down.
|
||||
defer close(unblockChunkWriterCh)
|
||||
|
||||
var chunkRef ChunkDiskMapperRef
|
||||
var callbackWg sync.WaitGroup
|
||||
addChunk := func() {
|
||||
callbackWg.Add(1)
|
||||
require.NoError(t, q.addJob(chunkWriteJob{
|
||||
ref: chunkRef,
|
||||
callback: func(err error) {
|
||||
callbackWg.Done()
|
||||
},
|
||||
}))
|
||||
chunkRef++
|
||||
}
|
||||
|
||||
unblockChunkWriter := func() {
|
||||
unblockChunkWriterCh <- struct{}{}
|
||||
}
|
||||
|
||||
// Fill the queue to the middle of the size limit.
|
||||
for job := 0; job < sizeLimit/2; job++ {
|
||||
addChunk()
|
||||
}
|
||||
|
||||
// Consume the jobs.
|
||||
for job := 0; job < sizeLimit/2; job++ {
|
||||
unblockChunkWriter()
|
||||
}
|
||||
|
||||
// Add jobs until the queue is full.
|
||||
// Note that one more job than <sizeLimit> can be added because one will be processed by the worker already
|
||||
// and it will block on the chunk write function.
|
||||
for job := 0; job < sizeLimit+1; job++ {
|
||||
addChunk()
|
||||
}
|
||||
|
||||
// The queue should be full.
|
||||
require.True(t, q.queueIsFull())
|
||||
|
||||
// Adding another job should block as long as no job from the queue gets consumed.
|
||||
addedJob := atomic.NewBool(false)
|
||||
go func() {
|
||||
addChunk()
|
||||
addedJob.Store(true)
|
||||
}()
|
||||
|
||||
// Wait for 10ms while the adding of a new job is blocked.
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
require.False(t, addedJob.Load())
|
||||
|
||||
// Consume one job from the queue.
|
||||
unblockChunkWriter()
|
||||
|
||||
// Wait until the job has been added to the queue.
|
||||
require.Eventually(t, func() bool { return addedJob.Load() }, time.Second, time.Millisecond*10)
|
||||
|
||||
// The queue should be full again.
|
||||
require.True(t, q.queueIsFull())
|
||||
|
||||
// Consume <sizeLimit>+1 jobs from the queue.
|
||||
// To drain the queue we need to consume <sizeLimit>+1 jobs because 1 job
|
||||
// is already in the state of being processed.
|
||||
for job := 0; job < sizeLimit+1; job++ {
|
||||
require.False(t, q.queueIsEmpty())
|
||||
unblockChunkWriter()
|
||||
}
|
||||
|
||||
// Wait until all jobs have been processed.
|
||||
callbackWg.Wait()
|
||||
require.True(t, q.queueIsEmpty())
|
||||
}
|
||||
|
||||
func TestChunkWriteQueue_HandlerErrorViaCallback(t *testing.T) {
|
||||
testError := errors.New("test error")
|
||||
chunkWriter := func(_ HeadSeriesRef, _, _ int64, _ chunkenc.Chunk, _ ChunkDiskMapperRef, _ bool) error {
|
||||
return testError
|
||||
}
|
||||
|
||||
awaitCb := make(chan struct{})
|
||||
var gotError error
|
||||
callback := func(err error) {
|
||||
gotError = err
|
||||
close(awaitCb)
|
||||
}
|
||||
|
||||
q := newChunkWriteQueue(nil, 1, chunkWriter)
|
||||
defer q.stop()
|
||||
|
||||
job := chunkWriteJob{callback: callback}
|
||||
require.NoError(t, q.addJob(job))
|
||||
|
||||
<-awaitCb
|
||||
|
||||
require.Equal(t, testError, gotError)
|
||||
}
|
||||
|
||||
func BenchmarkChunkWriteQueue_addJob(b *testing.B) {
|
||||
for _, withReads := range []bool{false, true} {
|
||||
b.Run(fmt.Sprintf("with reads %t", withReads), func(b *testing.B) {
|
||||
for _, concurrentWrites := range []int{1, 10, 100, 1000} {
|
||||
b.Run(fmt.Sprintf("%d concurrent writes", concurrentWrites), func(b *testing.B) {
|
||||
issueReadSignal := make(chan struct{})
|
||||
q := newChunkWriteQueue(nil, 1000, func(ref HeadSeriesRef, i, i2 int64, chunk chunkenc.Chunk, ref2 ChunkDiskMapperRef, b bool) error {
|
||||
if withReads {
|
||||
select {
|
||||
case issueReadSignal <- struct{}{}:
|
||||
default:
|
||||
// Can't write to issueReadSignal, don't block but omit read instead.
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
b.Cleanup(func() {
|
||||
// Stopped already, so no more writes will happen.
|
||||
close(issueReadSignal)
|
||||
})
|
||||
b.Cleanup(q.stop)
|
||||
|
||||
start := sync.WaitGroup{}
|
||||
start.Add(1)
|
||||
|
||||
jobs := make(chan chunkWriteJob, b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
jobs <- chunkWriteJob{
|
||||
seriesRef: HeadSeriesRef(i),
|
||||
ref: ChunkDiskMapperRef(i),
|
||||
}
|
||||
}
|
||||
close(jobs)
|
||||
|
||||
go func() {
|
||||
for range issueReadSignal {
|
||||
// We don't care about the ID we're getting, we just want to grab the lock.
|
||||
_ = q.get(ChunkDiskMapperRef(0))
|
||||
}
|
||||
}()
|
||||
|
||||
done := sync.WaitGroup{}
|
||||
done.Add(concurrentWrites)
|
||||
for w := 0; w < concurrentWrites; w++ {
|
||||
go func() {
|
||||
start.Wait()
|
||||
for j := range jobs {
|
||||
_ = q.addJob(j)
|
||||
}
|
||||
done.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
start.Done()
|
||||
done.Wait()
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -26,7 +26,9 @@ import (
|
|||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/dennwc/varint"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
@ -66,6 +68,8 @@ const (
|
|||
MaxWriteBufferSize = 8 * 1024 * 1024 // 8 MiB.
|
||||
// DefaultWriteBufferSize is the default write buffer size.
|
||||
DefaultWriteBufferSize = 4 * 1024 * 1024 // 4 MiB.
|
||||
// DefaultWriteQueueSize is the default size of the in-memory queue used before flushing chunks to the disk.
|
||||
DefaultWriteQueueSize = 1000
|
||||
)
|
||||
|
||||
// ChunkDiskMapperRef represents the location of a head chunk on disk.
|
||||
|
@ -77,10 +81,10 @@ func newChunkDiskMapperRef(seq, offset uint64) ChunkDiskMapperRef {
|
|||
return ChunkDiskMapperRef((seq << 32) | offset)
|
||||
}
|
||||
|
||||
func (ref ChunkDiskMapperRef) Unpack() (sgmIndex, chkStart int) {
|
||||
sgmIndex = int(ref >> 32)
|
||||
chkStart = int((ref << 32) >> 32)
|
||||
return sgmIndex, chkStart
|
||||
func (ref ChunkDiskMapperRef) Unpack() (seq, offset int) {
|
||||
seq = int(ref >> 32)
|
||||
offset = int((ref << 32) >> 32)
|
||||
return seq, offset
|
||||
}
|
||||
|
||||
// CorruptionErr is an error that's returned when corruption is encountered.
|
||||
|
@ -94,19 +98,98 @@ func (e *CorruptionErr) Error() string {
|
|||
return errors.Wrapf(e.Err, "corruption in head chunk file %s", segmentFile(e.Dir, e.FileIndex)).Error()
|
||||
}
|
||||
|
||||
// chunkPos keeps track of the position in the head chunk files.
|
||||
// chunkPos is not thread-safe, a lock must be used to protect it.
|
||||
type chunkPos struct {
|
||||
seq uint64 // Index of chunk file.
|
||||
offset uint64 // Offset within chunk file.
|
||||
cutFile bool // When true then the next chunk will be written to a new file.
|
||||
}
|
||||
|
||||
// getNextChunkRef takes a chunk and returns the chunk reference which will refer to it once it has been written.
|
||||
// getNextChunkRef also decides whether a new file should be cut before writing this chunk, and it returns the decision via the second return value.
|
||||
// The order of calling getNextChunkRef must be the order in which chunks are written to the disk.
|
||||
func (f *chunkPos) getNextChunkRef(chk chunkenc.Chunk) (chkRef ChunkDiskMapperRef, cutFile bool) {
|
||||
chkLen := uint64(len(chk.Bytes()))
|
||||
bytesToWrite := f.bytesToWriteForChunk(chkLen)
|
||||
|
||||
if f.shouldCutNewFile(chkLen) {
|
||||
f.toNewFile()
|
||||
f.cutFile = false
|
||||
cutFile = true
|
||||
}
|
||||
|
||||
chkOffset := f.offset
|
||||
f.offset += bytesToWrite
|
||||
|
||||
return newChunkDiskMapperRef(f.seq, chkOffset), cutFile
|
||||
}
|
||||
|
||||
// toNewFile updates the seq/offset position to point to the beginning of a new chunk file.
|
||||
func (f *chunkPos) toNewFile() {
|
||||
f.seq++
|
||||
f.offset = SegmentHeaderSize
|
||||
}
|
||||
|
||||
// cutFileOnNextChunk triggers that the next chunk will be written in to a new file.
|
||||
// Not thread safe, a lock must be held when calling this.
|
||||
func (f *chunkPos) cutFileOnNextChunk() {
|
||||
f.cutFile = true
|
||||
}
|
||||
|
||||
// initSeq sets the sequence number of the head chunk file.
|
||||
// Should only be used for initialization, after that the sequence number will be managed by chunkPos.
|
||||
func (f *chunkPos) initSeq(seq uint64) {
|
||||
f.seq = seq
|
||||
}
|
||||
|
||||
// shouldCutNewFile returns whether a new file should be cut based on the file size.
|
||||
// The read or write lock on chunkPos must be held when calling this.
|
||||
func (f *chunkPos) shouldCutNewFile(chunkSize uint64) bool {
|
||||
if f.cutFile {
|
||||
return true
|
||||
}
|
||||
|
||||
return f.offset == 0 || // First head chunk file.
|
||||
f.offset+chunkSize+MaxHeadChunkMetaSize > MaxHeadChunkFileSize // Exceeds the max head chunk file size.
|
||||
}
|
||||
|
||||
// bytesToWriteForChunk returns the number of bytes that will need to be written for the given chunk size,
|
||||
// including all meta data before and after the chunk data.
|
||||
// Head chunk format: https://github.com/prometheus/prometheus/blob/main/tsdb/docs/format/head_chunks.md#chunk
|
||||
func (f *chunkPos) bytesToWriteForChunk(chkLen uint64) uint64 {
|
||||
// Headers.
|
||||
bytes := uint64(SeriesRefSize) + 2*MintMaxtSize + ChunkEncodingSize
|
||||
|
||||
// Size of chunk length encoded as uvarint.
|
||||
bytes += uint64(varint.UvarintSize(chkLen))
|
||||
|
||||
// Chunk length.
|
||||
bytes += chkLen
|
||||
|
||||
// crc32.
|
||||
bytes += CRCSize
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
// ChunkDiskMapper is for writing the Head block chunks to the disk
|
||||
// and access chunks via mmapped file.
|
||||
type ChunkDiskMapper struct {
|
||||
curFileNumBytes atomic.Int64 // Bytes written in current open file.
|
||||
|
||||
/// Writer.
|
||||
dir *os.File
|
||||
writeBufferSize int
|
||||
|
||||
curFile *os.File // File being written to.
|
||||
curFileSequence int // Index of current open file being appended to.
|
||||
curFileOffset atomic.Uint64 // Bytes written in current open file.
|
||||
curFileMaxt int64 // Used for the size retention.
|
||||
|
||||
// The values in evtlPos represent the file position which will eventually be
|
||||
// reached once the content of the write queue has been fully processed.
|
||||
evtlPosMtx sync.Mutex
|
||||
evtlPos chunkPos
|
||||
|
||||
byteBuf [MaxHeadChunkMetaSize]byte // Buffer used to write the header of the chunk.
|
||||
chkWriter *bufio.Writer // Writer for the current open file.
|
||||
crc32 hash.Hash
|
||||
|
@ -128,6 +211,8 @@ type ChunkDiskMapper struct {
|
|||
// This is done after iterating through all the chunks in those files using the IterateAllChunks method.
|
||||
fileMaxtSet bool
|
||||
|
||||
writeQueue *chunkWriteQueue
|
||||
|
||||
closed bool
|
||||
}
|
||||
|
||||
|
@ -141,7 +226,7 @@ type mmappedChunkFile struct {
|
|||
// using the default head chunk file duration.
|
||||
// NOTE: 'IterateAllChunks' method needs to be called at least once after creating ChunkDiskMapper
|
||||
// to set the maxt of all the file.
|
||||
func NewChunkDiskMapper(dir string, pool chunkenc.Pool, writeBufferSize int) (*ChunkDiskMapper, error) {
|
||||
func NewChunkDiskMapper(reg prometheus.Registerer, dir string, pool chunkenc.Pool, writeBufferSize, writeQueueSize int) (*ChunkDiskMapper, error) {
|
||||
// Validate write buffer size.
|
||||
if writeBufferSize < MinWriteBufferSize || writeBufferSize > MaxWriteBufferSize {
|
||||
return nil, errors.Errorf("ChunkDiskMapper write buffer size should be between %d and %d (actual: %d)", MinWriteBufferSize, MaxWriteBufferSize, writeBufferSize)
|
||||
|
@ -165,6 +250,7 @@ func NewChunkDiskMapper(dir string, pool chunkenc.Pool, writeBufferSize int) (*C
|
|||
crc32: newCRC32(),
|
||||
chunkBuffer: newChunkBuffer(),
|
||||
}
|
||||
m.writeQueue = newChunkWriteQueue(reg, writeQueueSize, m.writeChunk)
|
||||
|
||||
if m.pool == nil {
|
||||
m.pool = chunkenc.NewPool()
|
||||
|
@ -235,6 +321,8 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) {
|
|||
}
|
||||
}
|
||||
|
||||
cdm.evtlPos.initSeq(uint64(lastSeq))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -287,17 +375,44 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro
|
|||
|
||||
// WriteChunk writes the chunk to the disk.
|
||||
// The returned chunk ref is the reference from where the chunk encoding starts for the chunk.
|
||||
func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk) (chkRef ChunkDiskMapperRef, err error) {
|
||||
func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, callback func(err error)) (chkRef ChunkDiskMapperRef) {
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil && callback != nil {
|
||||
callback(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// cdm.evtlPosMtx must be held to serialize the calls to .getNextChunkRef() and .addJob().
|
||||
cdm.evtlPosMtx.Lock()
|
||||
defer cdm.evtlPosMtx.Unlock()
|
||||
|
||||
ref, cutFile := cdm.evtlPos.getNextChunkRef(chk)
|
||||
err = cdm.writeQueue.addJob(chunkWriteJob{
|
||||
cutFile: cutFile,
|
||||
seriesRef: seriesRef,
|
||||
mint: mint,
|
||||
maxt: maxt,
|
||||
chk: chk,
|
||||
ref: ref,
|
||||
callback: callback,
|
||||
})
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
func (cdm *ChunkDiskMapper) writeChunk(seriesRef HeadSeriesRef, mint, maxt int64, chk chunkenc.Chunk, ref ChunkDiskMapperRef, cutFile bool) (err error) {
|
||||
cdm.writePathMtx.Lock()
|
||||
defer cdm.writePathMtx.Unlock()
|
||||
|
||||
if cdm.closed {
|
||||
return 0, ErrChunkDiskMapperClosed
|
||||
return ErrChunkDiskMapperClosed
|
||||
}
|
||||
|
||||
if cdm.shouldCutNewFile(len(chk.Bytes())) {
|
||||
if err := cdm.cut(); err != nil {
|
||||
return 0, err
|
||||
if cutFile {
|
||||
err := cdm.cutAndExpectRef(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -305,15 +420,13 @@ func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64
|
|||
// so no need to flush here, as we have to flush at the end (to not keep partial chunks in buffer).
|
||||
if len(chk.Bytes())+MaxHeadChunkMetaSize < cdm.writeBufferSize && cdm.chkWriter.Available() < MaxHeadChunkMetaSize+len(chk.Bytes()) {
|
||||
if err := cdm.flushBuffer(); err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
cdm.crc32.Reset()
|
||||
bytesWritten := 0
|
||||
|
||||
chkRef = newChunkDiskMapperRef(uint64(cdm.curFileSequence), uint64(cdm.curFileSize()))
|
||||
|
||||
binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(seriesRef))
|
||||
bytesWritten += SeriesRefSize
|
||||
binary.BigEndian.PutUint64(cdm.byteBuf[bytesWritten:], uint64(mint))
|
||||
|
@ -326,59 +439,73 @@ func (cdm *ChunkDiskMapper) WriteChunk(seriesRef HeadSeriesRef, mint, maxt int64
|
|||
bytesWritten += n
|
||||
|
||||
if err := cdm.writeAndAppendToCRC32(cdm.byteBuf[:bytesWritten]); err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
if err := cdm.writeAndAppendToCRC32(chk.Bytes()); err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
if err := cdm.writeCRC32(); err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
|
||||
if maxt > cdm.curFileMaxt {
|
||||
cdm.curFileMaxt = maxt
|
||||
}
|
||||
|
||||
cdm.chunkBuffer.put(chkRef, chk)
|
||||
cdm.chunkBuffer.put(ref, chk)
|
||||
|
||||
if len(chk.Bytes())+MaxHeadChunkMetaSize >= cdm.writeBufferSize {
|
||||
// The chunk was bigger than the buffer itself.
|
||||
// Flushing to not keep partial chunks in buffer.
|
||||
if err := cdm.flushBuffer(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return chkRef, nil
|
||||
}
|
||||
|
||||
// shouldCutNewFile returns whether a new file should be cut, based on time and size retention.
|
||||
// Size retention: because depending on the system architecture, there is a limit on how big of a file we can m-map.
|
||||
// Time retention: so that we can delete old chunks with some time guarantee in low load environments.
|
||||
func (cdm *ChunkDiskMapper) shouldCutNewFile(chunkSize int) bool {
|
||||
return cdm.curFileSize() == 0 || // First head chunk file.
|
||||
cdm.curFileSize()+int64(chunkSize+MaxHeadChunkMetaSize) > MaxHeadChunkFileSize // Exceeds the max head chunk file size.
|
||||
}
|
||||
|
||||
// CutNewFile creates a new m-mapped file.
|
||||
func (cdm *ChunkDiskMapper) CutNewFile() (returnErr error) {
|
||||
cdm.writePathMtx.Lock()
|
||||
defer cdm.writePathMtx.Unlock()
|
||||
|
||||
return cdm.cut()
|
||||
}
|
||||
|
||||
// cut creates a new m-mapped file. The write lock should be held before calling this.
|
||||
func (cdm *ChunkDiskMapper) cut() (returnErr error) {
|
||||
// Sync current tail to disk and close.
|
||||
if err := cdm.finalizeCurFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
n, newFile, seq, err := cutSegmentFile(cdm.dir, MagicHeadChunks, headChunksFormatV1, HeadChunkFilePreallocationSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CutNewFile makes that a new file will be created the next time a chunk is written.
|
||||
func (cdm *ChunkDiskMapper) CutNewFile() {
|
||||
cdm.evtlPosMtx.Lock()
|
||||
defer cdm.evtlPosMtx.Unlock()
|
||||
|
||||
cdm.evtlPos.cutFileOnNextChunk()
|
||||
}
|
||||
|
||||
func (cdm *ChunkDiskMapper) IsQueueEmpty() bool {
|
||||
return cdm.writeQueue.queueIsEmpty()
|
||||
}
|
||||
|
||||
// cutAndExpectRef creates a new m-mapped file.
|
||||
// The write lock should be held before calling this.
|
||||
// It ensures that the position in the new file matches the given chunk reference, if not then it errors.
|
||||
func (cdm *ChunkDiskMapper) cutAndExpectRef(chkRef ChunkDiskMapperRef) (err error) {
|
||||
seq, offset, err := cdm.cut()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if expSeq, expOffset := chkRef.Unpack(); seq != expSeq || offset != expOffset {
|
||||
return errors.Errorf("expected newly cut file to have sequence:offset %d:%d, got %d:%d", expSeq, expOffset, seq, offset)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cut creates a new m-mapped file. The write lock should be held before calling this.
|
||||
// It returns the file sequence and the offset in that file to start writing chunks.
|
||||
func (cdm *ChunkDiskMapper) cut() (seq, offset int, returnErr error) {
|
||||
// Sync current tail to disk and close.
|
||||
if err := cdm.finalizeCurFile(); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
offset, newFile, seq, err := cutSegmentFile(cdm.dir, MagicHeadChunks, headChunksFormatV1, HeadChunkFilePreallocationSize)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// The file should not be closed if there is no error,
|
||||
// its kept open in the ChunkDiskMapper.
|
||||
|
@ -387,7 +514,7 @@ func (cdm *ChunkDiskMapper) cut() (returnErr error) {
|
|||
}
|
||||
}()
|
||||
|
||||
cdm.curFileNumBytes.Store(int64(n))
|
||||
cdm.curFileOffset.Store(uint64(offset))
|
||||
|
||||
if cdm.curFile != nil {
|
||||
cdm.readPathMtx.Lock()
|
||||
|
@ -397,7 +524,7 @@ func (cdm *ChunkDiskMapper) cut() (returnErr error) {
|
|||
|
||||
mmapFile, err := fileutil.OpenMmapFileWithSize(newFile.Name(), MaxHeadChunkFileSize)
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
cdm.readPathMtx.Lock()
|
||||
|
@ -415,7 +542,7 @@ func (cdm *ChunkDiskMapper) cut() (returnErr error) {
|
|||
|
||||
cdm.curFileMaxt = 0
|
||||
|
||||
return nil
|
||||
return seq, offset, nil
|
||||
}
|
||||
|
||||
// finalizeCurFile writes all pending data to the current tail file,
|
||||
|
@ -438,7 +565,7 @@ func (cdm *ChunkDiskMapper) finalizeCurFile() error {
|
|||
|
||||
func (cdm *ChunkDiskMapper) write(b []byte) error {
|
||||
n, err := cdm.chkWriter.Write(b)
|
||||
cdm.curFileNumBytes.Add(int64(n))
|
||||
cdm.curFileOffset.Add(uint64(n))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -476,6 +603,11 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error
|
|||
return nil, ErrChunkDiskMapperClosed
|
||||
}
|
||||
|
||||
chunk := cdm.writeQueue.get(ref)
|
||||
if chunk != nil {
|
||||
return chunk, nil
|
||||
}
|
||||
|
||||
sgmIndex, chkStart := ref.Unpack()
|
||||
// We skip the series ref and the mint/maxt beforehand.
|
||||
chkStart += SeriesRefSize + (2 * MintMaxtSize)
|
||||
|
@ -732,7 +864,10 @@ func (cdm *ChunkDiskMapper) Truncate(mint int64) error {
|
|||
errs := tsdb_errors.NewMulti()
|
||||
// Cut a new file only if the current file has some chunks.
|
||||
if cdm.curFileSize() > HeadChunkFileHeaderSize {
|
||||
errs.Add(cdm.CutNewFile())
|
||||
// There is a known race condition here because between the check of curFileSize() and the call to CutNewFile()
|
||||
// a new file could already be cut, this is acceptable because it will simply result in an empty file which
|
||||
// won't do any harm.
|
||||
cdm.CutNewFile()
|
||||
}
|
||||
errs.Add(cdm.deleteFiles(removedFiles))
|
||||
return errs.Err()
|
||||
|
@ -787,13 +922,19 @@ func (cdm *ChunkDiskMapper) Size() (int64, error) {
|
|||
return fileutil.DirSize(cdm.dir.Name())
|
||||
}
|
||||
|
||||
func (cdm *ChunkDiskMapper) curFileSize() int64 {
|
||||
return cdm.curFileNumBytes.Load()
|
||||
func (cdm *ChunkDiskMapper) curFileSize() uint64 {
|
||||
return cdm.curFileOffset.Load()
|
||||
}
|
||||
|
||||
// Close closes all the open files in ChunkDiskMapper.
|
||||
// It is not longer safe to access chunks from this struct after calling Close.
|
||||
func (cdm *ChunkDiskMapper) Close() error {
|
||||
// Locking the eventual position lock blocks WriteChunk()
|
||||
cdm.evtlPosMtx.Lock()
|
||||
defer cdm.evtlPosMtx.Unlock()
|
||||
|
||||
cdm.writeQueue.stop()
|
||||
|
||||
// 'WriteChunk' locks writePathMtx first and then readPathMtx for cutting head chunk file.
|
||||
// The lock order should not be reversed here else it can cause deadlocks.
|
||||
cdm.writePathMtx.Lock()
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
)
|
||||
|
||||
func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
|
||||
hrw := testChunkDiskMapper(t)
|
||||
hrw := createChunkDiskMapper(t, "")
|
||||
defer func() {
|
||||
require.NoError(t, hrw.Close())
|
||||
}()
|
||||
|
@ -129,8 +129,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
|
|||
// Testing IterateAllChunks method.
|
||||
dir := hrw.dir.Name()
|
||||
require.NoError(t, hrw.Close())
|
||||
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool(), DefaultWriteBufferSize)
|
||||
require.NoError(t, err)
|
||||
hrw = createChunkDiskMapper(t, dir)
|
||||
|
||||
idx := 0
|
||||
require.NoError(t, hrw.IterateAllChunks(func(seriesRef HeadSeriesRef, chunkRef ChunkDiskMapperRef, mint, maxt int64, numSamples uint16) error {
|
||||
|
@ -156,10 +155,9 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
|
|||
// TestChunkDiskMapper_Truncate tests
|
||||
// * If truncation is happening properly based on the time passed.
|
||||
// * The active file is not deleted even if the passed time makes it eligible to be deleted.
|
||||
// * Empty current file does not lead to creation of another file after truncation.
|
||||
// * Non-empty current file leads to creation of another file after truncation.
|
||||
func TestChunkDiskMapper_Truncate(t *testing.T) {
|
||||
hrw := testChunkDiskMapper(t)
|
||||
hrw := createChunkDiskMapper(t, "")
|
||||
defer func() {
|
||||
require.NoError(t, hrw.Close())
|
||||
}()
|
||||
|
@ -167,16 +165,20 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
|
|||
timeRange := 0
|
||||
fileTimeStep := 100
|
||||
var thirdFileMinT, sixthFileMinT int64
|
||||
|
||||
addChunk := func() int {
|
||||
mint := timeRange + 1 // Just after the new file cut.
|
||||
maxt := timeRange + fileTimeStep - 1 // Just before the next file.
|
||||
t.Helper()
|
||||
|
||||
// Write a chunks to set maxt for the segment.
|
||||
_, err := hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t))
|
||||
step := 100
|
||||
mint, maxt := timeRange+1, timeRange+step-1
|
||||
var err error
|
||||
awaitCb := make(chan struct{})
|
||||
hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), func(cbErr error) {
|
||||
err = cbErr
|
||||
close(awaitCb)
|
||||
})
|
||||
<-awaitCb
|
||||
require.NoError(t, err)
|
||||
|
||||
timeRange += fileTimeStep
|
||||
timeRange += step
|
||||
|
||||
return mint
|
||||
}
|
||||
|
@ -198,7 +200,7 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
|
|||
|
||||
// Create segments 1 to 7.
|
||||
for i := 1; i <= 7; i++ {
|
||||
require.NoError(t, hrw.CutNewFile())
|
||||
hrw.CutNewFile()
|
||||
mint := int64(addChunk())
|
||||
if i == 3 {
|
||||
thirdFileMinT = mint
|
||||
|
@ -210,19 +212,17 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
|
|||
|
||||
// Truncating files.
|
||||
require.NoError(t, hrw.Truncate(thirdFileMinT))
|
||||
|
||||
// Add a chunk to trigger cutting of new file.
|
||||
addChunk()
|
||||
|
||||
verifyFiles([]int{3, 4, 5, 6, 7, 8})
|
||||
|
||||
dir := hrw.dir.Name()
|
||||
require.NoError(t, hrw.Close())
|
||||
|
||||
// Restarted.
|
||||
var err error
|
||||
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool(), DefaultWriteBufferSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.False(t, hrw.fileMaxtSet)
|
||||
require.NoError(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16) error { return nil }))
|
||||
require.True(t, hrw.fileMaxtSet)
|
||||
hrw = createChunkDiskMapper(t, dir)
|
||||
|
||||
verifyFiles([]int{3, 4, 5, 6, 7, 8})
|
||||
// New file is created after restart even if last file was empty.
|
||||
|
@ -231,15 +231,23 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
|
|||
|
||||
// Truncating files after restart.
|
||||
require.NoError(t, hrw.Truncate(sixthFileMinT))
|
||||
verifyFiles([]int{6, 7, 8, 9, 10})
|
||||
verifyFiles([]int{6, 7, 8, 9})
|
||||
|
||||
// As the last file was empty, this creates no new files.
|
||||
// Truncating a second time without adding a chunk shouldn't create a new file.
|
||||
require.NoError(t, hrw.Truncate(sixthFileMinT+1))
|
||||
verifyFiles([]int{6, 7, 8, 9, 10})
|
||||
verifyFiles([]int{6, 7, 8, 9})
|
||||
|
||||
// Add a chunk to trigger cutting of new file.
|
||||
addChunk()
|
||||
|
||||
verifyFiles([]int{6, 7, 8, 9, 10})
|
||||
|
||||
// Truncating till current time should not delete the current active file.
|
||||
require.NoError(t, hrw.Truncate(int64(timeRange+(2*fileTimeStep))))
|
||||
|
||||
// Add a chunk to trigger cutting of new file.
|
||||
addChunk()
|
||||
|
||||
verifyFiles([]int{10, 11}) // One file is the previously active file and one currently created.
|
||||
}
|
||||
|
||||
|
@ -248,23 +256,40 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
|
|||
// This test exposes https://github.com/prometheus/prometheus/issues/7412 where the truncation
|
||||
// simply deleted all empty files instead of stopping once it encountered a non-empty file.
|
||||
func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
|
||||
hrw := testChunkDiskMapper(t)
|
||||
hrw := createChunkDiskMapper(t, "")
|
||||
defer func() {
|
||||
require.NoError(t, hrw.Close())
|
||||
}()
|
||||
|
||||
timeRange := 0
|
||||
|
||||
addChunk := func() {
|
||||
t.Helper()
|
||||
|
||||
awaitCb := make(chan struct{})
|
||||
|
||||
step := 100
|
||||
mint, maxt := timeRange+1, timeRange+step-1
|
||||
_, err := hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t))
|
||||
hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), func(err error) {
|
||||
close(awaitCb)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
<-awaitCb
|
||||
timeRange += step
|
||||
}
|
||||
|
||||
emptyFile := func() {
|
||||
require.NoError(t, hrw.CutNewFile())
|
||||
t.Helper()
|
||||
|
||||
_, _, err := hrw.cut()
|
||||
require.NoError(t, err)
|
||||
hrw.evtlPosMtx.Lock()
|
||||
hrw.evtlPos.toNewFile()
|
||||
hrw.evtlPosMtx.Unlock()
|
||||
}
|
||||
|
||||
nonEmptyFile := func() {
|
||||
t.Helper()
|
||||
|
||||
emptyFile()
|
||||
addChunk()
|
||||
}
|
||||
|
@ -297,42 +322,48 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) {
|
|||
// though files 4 and 6 are empty.
|
||||
file2Maxt := hrw.mmappedChunkFiles[2].maxt
|
||||
require.NoError(t, hrw.Truncate(file2Maxt+1))
|
||||
// As 6 was empty, it should not create another file.
|
||||
verifyFiles([]int{3, 4, 5, 6})
|
||||
|
||||
// Add chunk, so file 6 is not empty anymore.
|
||||
addChunk()
|
||||
// Truncate creates another file as 6 is not empty now.
|
||||
require.NoError(t, hrw.Truncate(file2Maxt+1))
|
||||
verifyFiles([]int{3, 4, 5, 6, 7})
|
||||
verifyFiles([]int{3, 4, 5, 6})
|
||||
|
||||
// Truncating till file 3 should also delete file 4, because it is empty.
|
||||
file3Maxt := hrw.mmappedChunkFiles[3].maxt
|
||||
require.NoError(t, hrw.Truncate(file3Maxt+1))
|
||||
addChunk()
|
||||
verifyFiles([]int{5, 6, 7})
|
||||
|
||||
dir := hrw.dir.Name()
|
||||
require.NoError(t, hrw.Close())
|
||||
|
||||
// Restarting checks for unsequential files.
|
||||
var err error
|
||||
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool(), DefaultWriteBufferSize)
|
||||
require.NoError(t, err)
|
||||
verifyFiles([]int{3, 4, 5, 6, 7})
|
||||
hrw = createChunkDiskMapper(t, dir)
|
||||
verifyFiles([]int{5, 6, 7})
|
||||
}
|
||||
|
||||
// TestHeadReadWriter_TruncateAfterIterateChunksError tests for
|
||||
// https://github.com/prometheus/prometheus/issues/7753
|
||||
func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) {
|
||||
hrw := testChunkDiskMapper(t)
|
||||
hrw := createChunkDiskMapper(t, "")
|
||||
defer func() {
|
||||
require.NoError(t, hrw.Close())
|
||||
}()
|
||||
|
||||
// Write a chunks to iterate on it later.
|
||||
_, err := hrw.WriteChunk(1, 0, 1000, randomChunk(t))
|
||||
var err error
|
||||
awaitCb := make(chan struct{})
|
||||
hrw.WriteChunk(1, 0, 1000, randomChunk(t), func(cbErr error) {
|
||||
err = cbErr
|
||||
close(awaitCb)
|
||||
})
|
||||
<-awaitCb
|
||||
require.NoError(t, err)
|
||||
|
||||
dir := hrw.dir.Name()
|
||||
require.NoError(t, hrw.Close())
|
||||
|
||||
// Restarting to recreate https://github.com/prometheus/prometheus/issues/7753.
|
||||
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool(), DefaultWriteBufferSize)
|
||||
require.NoError(t, err)
|
||||
hrw = createChunkDiskMapper(t, dir)
|
||||
|
||||
// Forcefully failing IterateAllChunks.
|
||||
require.Error(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16) error {
|
||||
|
@ -344,21 +375,31 @@ func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
|
||||
hrw := testChunkDiskMapper(t)
|
||||
hrw := createChunkDiskMapper(t, "")
|
||||
defer func() {
|
||||
require.NoError(t, hrw.Close())
|
||||
}()
|
||||
|
||||
timeRange := 0
|
||||
addChunk := func() {
|
||||
t.Helper()
|
||||
|
||||
step := 100
|
||||
mint, maxt := timeRange+1, timeRange+step-1
|
||||
_, err := hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t))
|
||||
var err error
|
||||
awaitCb := make(chan struct{})
|
||||
hrw.WriteChunk(1, int64(mint), int64(maxt), randomChunk(t), func(cbErr error) {
|
||||
err = cbErr
|
||||
close(awaitCb)
|
||||
})
|
||||
<-awaitCb
|
||||
require.NoError(t, err)
|
||||
timeRange += step
|
||||
}
|
||||
nonEmptyFile := func() {
|
||||
require.NoError(t, hrw.CutNewFile())
|
||||
t.Helper()
|
||||
|
||||
hrw.CutNewFile()
|
||||
addChunk()
|
||||
}
|
||||
|
||||
|
@ -388,11 +429,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
|
|||
require.NoError(t, f.Close())
|
||||
|
||||
// Open chunk disk mapper again, corrupt file should be removed.
|
||||
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool(), DefaultWriteBufferSize)
|
||||
require.NoError(t, err)
|
||||
require.False(t, hrw.fileMaxtSet)
|
||||
require.NoError(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16) error { return nil }))
|
||||
require.True(t, hrw.fileMaxtSet)
|
||||
hrw = createChunkDiskMapper(t, dir)
|
||||
|
||||
// Removed from memory.
|
||||
require.Equal(t, 3, len(hrw.mmappedChunkFiles))
|
||||
|
@ -411,18 +448,22 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testChunkDiskMapper(t *testing.T) *ChunkDiskMapper {
|
||||
tmpdir, err := ioutil.TempDir("", "data")
|
||||
func createChunkDiskMapper(t *testing.T, dir string) *ChunkDiskMapper {
|
||||
if dir == "" {
|
||||
var err error
|
||||
dir, err = ioutil.TempDir("", "data")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, os.RemoveAll(tmpdir))
|
||||
require.NoError(t, os.RemoveAll(dir))
|
||||
})
|
||||
}
|
||||
|
||||
hrw, err := NewChunkDiskMapper(tmpdir, chunkenc.NewPool(), DefaultWriteBufferSize)
|
||||
hrw, err := NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), DefaultWriteBufferSize, DefaultWriteQueueSize)
|
||||
require.NoError(t, err)
|
||||
require.False(t, hrw.fileMaxtSet)
|
||||
require.NoError(t, hrw.IterateAllChunks(func(_ HeadSeriesRef, _ ChunkDiskMapperRef, _, _ int64, _ uint16) error { return nil }))
|
||||
require.True(t, hrw.fileMaxtSet)
|
||||
|
||||
return hrw
|
||||
}
|
||||
|
||||
|
@ -443,7 +484,11 @@ func createChunk(t *testing.T, idx int, hrw *ChunkDiskMapper) (seriesRef HeadSer
|
|||
mint = int64((idx)*1000 + 1)
|
||||
maxt = int64((idx + 1) * 1000)
|
||||
chunk = randomChunk(t)
|
||||
chunkRef, err = hrw.WriteChunk(seriesRef, mint, maxt, chunk)
|
||||
awaitCb := make(chan struct{})
|
||||
chunkRef = hrw.WriteChunk(seriesRef, mint, maxt, chunk, func(cbErr error) {
|
||||
require.NoError(t, err)
|
||||
close(awaitCb)
|
||||
})
|
||||
<-awaitCb
|
||||
return
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
@ -347,19 +346,20 @@ func splitByRange(ds []dirMeta, tr int64) [][]dirMeta {
|
|||
}
|
||||
|
||||
// CompactBlockMetas merges many block metas into one, combining it's source blocks together
|
||||
// and adjusting compaction level.
|
||||
// and adjusting compaction level. Min/Max time of result block meta covers all input blocks.
|
||||
func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
|
||||
res := &BlockMeta{
|
||||
ULID: uid,
|
||||
MinTime: blocks[0].MinTime,
|
||||
}
|
||||
|
||||
sources := map[ulid.ULID]struct{}{}
|
||||
// For overlapping blocks, the Maxt can be
|
||||
// in any block so we track it globally.
|
||||
maxt := int64(math.MinInt64)
|
||||
mint := blocks[0].MinTime
|
||||
maxt := blocks[0].MaxTime
|
||||
|
||||
for _, b := range blocks {
|
||||
if b.MinTime < mint {
|
||||
mint = b.MinTime
|
||||
}
|
||||
if b.MaxTime > maxt {
|
||||
maxt = b.MaxTime
|
||||
}
|
||||
|
@ -384,6 +384,7 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
|
|||
return res.Compaction.Sources[i].Compare(res.Compaction.Sources[j]) < 0
|
||||
})
|
||||
|
||||
res.MinTime = mint
|
||||
res.MaxTime = maxt
|
||||
return res
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/oklog/ulid"
|
||||
"github.com/pkg/errors"
|
||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -1311,3 +1312,38 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactBlockMetas(t *testing.T) {
|
||||
parent1 := ulid.MustNew(100, nil)
|
||||
parent2 := ulid.MustNew(200, nil)
|
||||
parent3 := ulid.MustNew(300, nil)
|
||||
parent4 := ulid.MustNew(400, nil)
|
||||
|
||||
input := []*BlockMeta{
|
||||
{ULID: parent1, MinTime: 1000, MaxTime: 2000, Compaction: BlockMetaCompaction{Level: 2, Sources: []ulid.ULID{ulid.MustNew(1, nil), ulid.MustNew(10, nil)}}},
|
||||
{ULID: parent2, MinTime: 200, MaxTime: 500, Compaction: BlockMetaCompaction{Level: 1}},
|
||||
{ULID: parent3, MinTime: 500, MaxTime: 2500, Compaction: BlockMetaCompaction{Level: 3, Sources: []ulid.ULID{ulid.MustNew(5, nil), ulid.MustNew(6, nil)}}},
|
||||
{ULID: parent4, MinTime: 100, MaxTime: 900, Compaction: BlockMetaCompaction{Level: 1}},
|
||||
}
|
||||
|
||||
outUlid := ulid.MustNew(1000, nil)
|
||||
output := CompactBlockMetas(outUlid, input...)
|
||||
|
||||
expected := &BlockMeta{
|
||||
ULID: outUlid,
|
||||
MinTime: 100,
|
||||
MaxTime: 2500,
|
||||
Stats: BlockStats{},
|
||||
Compaction: BlockMetaCompaction{
|
||||
Level: 4,
|
||||
Sources: []ulid.ULID{ulid.MustNew(1, nil), ulid.MustNew(5, nil), ulid.MustNew(6, nil), ulid.MustNew(10, nil)},
|
||||
Parents: []BlockDesc{
|
||||
{ULID: parent1, MinTime: 1000, MaxTime: 2000},
|
||||
{ULID: parent2, MinTime: 200, MaxTime: 500},
|
||||
{ULID: parent3, MinTime: 500, MaxTime: 2500},
|
||||
{ULID: parent4, MinTime: 100, MaxTime: 900},
|
||||
},
|
||||
},
|
||||
}
|
||||
require.Equal(t, expected, output)
|
||||
}
|
||||
|
|
|
@ -80,6 +80,7 @@ func DefaultOptions() *Options {
|
|||
StripeSize: DefaultStripeSize,
|
||||
HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize,
|
||||
IsolationDisabled: defaultIsolationDisabled,
|
||||
HeadChunksWriteQueueSize: chunks.DefaultWriteQueueSize,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,6 +136,9 @@ type Options struct {
|
|||
// HeadChunksWriteBufferSize configures the write buffer size used by the head chunks mapper.
|
||||
HeadChunksWriteBufferSize int
|
||||
|
||||
// HeadChunksWriteQueueSize configures the size of the chunk write queue used in the head chunks mapper.
|
||||
HeadChunksWriteQueueSize int
|
||||
|
||||
// SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series.
|
||||
// It is always a no-op in Prometheus and mainly meant for external users who import TSDB.
|
||||
SeriesLifecycleCallback SeriesLifecycleCallback
|
||||
|
@ -582,6 +586,9 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) {
|
|||
if opts.HeadChunksWriteBufferSize <= 0 {
|
||||
opts.HeadChunksWriteBufferSize = chunks.DefaultWriteBufferSize
|
||||
}
|
||||
if opts.HeadChunksWriteQueueSize < 0 {
|
||||
opts.HeadChunksWriteQueueSize = chunks.DefaultWriteQueueSize
|
||||
}
|
||||
if opts.MaxBlockChunkSegmentSize <= 0 {
|
||||
opts.MaxBlockChunkSegmentSize = chunks.DefaultChunkSegmentSize
|
||||
}
|
||||
|
@ -704,6 +711,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
|||
headOpts.ChunkDirRoot = dir
|
||||
headOpts.ChunkPool = db.chunkPool
|
||||
headOpts.ChunkWriteBufferSize = opts.HeadChunksWriteBufferSize
|
||||
headOpts.ChunkWriteQueueSize = opts.HeadChunksWriteQueueSize
|
||||
headOpts.StripeSize = opts.StripeSize
|
||||
headOpts.SeriesCallback = opts.SeriesLifecycleCallback
|
||||
headOpts.EnableExemplarStorage = opts.EnableExemplarStorage
|
||||
|
|
|
@ -212,17 +212,13 @@ func TestNoPanicAfterWALCorruption(t *testing.T) {
|
|||
var maxt int64
|
||||
ctx := context.Background()
|
||||
{
|
||||
for {
|
||||
// Appending 121 samples because on the 121st a new chunk will be created.
|
||||
for i := 0; i < 121; i++ {
|
||||
app := db.Appender(ctx)
|
||||
_, err := app.Append(0, labels.FromStrings("foo", "bar"), maxt, 0)
|
||||
expSamples = append(expSamples, sample{t: maxt, v: 0})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
mmapedChunks, err := ioutil.ReadDir(mmappedChunksDir(db.Dir()))
|
||||
require.NoError(t, err)
|
||||
if len(mmapedChunks) > 0 {
|
||||
break
|
||||
}
|
||||
maxt++
|
||||
}
|
||||
require.NoError(t, db.Close())
|
||||
|
@ -1465,6 +1461,10 @@ func TestSizeRetention(t *testing.T) {
|
|||
}
|
||||
require.NoError(t, headApp.Commit())
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return db.Head().chunkDiskMapper.IsQueueEmpty()
|
||||
}, 2*time.Second, 100*time.Millisecond)
|
||||
|
||||
// Test that registered size matches the actual disk size.
|
||||
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size.
|
||||
require.Equal(t, len(blocks), len(db.Blocks())) // Ensure all blocks are registered.
|
||||
|
@ -2453,7 +2453,7 @@ func TestDBReadOnly_FlushWAL(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
require.NoError(t, db.Close())
|
||||
}
|
||||
|
||||
// Flush WAL.
|
||||
|
|
|
@ -16,97 +16,89 @@ package tsdb
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"testing"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
func TestExample(t *testing.T) {
|
||||
func Example() {
|
||||
// Create a random dir to work in. Open() doesn't require a pre-existing dir, but
|
||||
// we want to make sure not to make a mess where we shouldn't.
|
||||
dir := t.TempDir()
|
||||
dir, err := ioutil.TempDir("", "tsdb-test")
|
||||
noErr(err)
|
||||
|
||||
// Open a TSDB for reading and/or writing.
|
||||
db, err := Open(dir, nil, nil, DefaultOptions(), nil)
|
||||
require.NoError(t, err)
|
||||
noErr(err)
|
||||
|
||||
// Open an appender for writing.
|
||||
app := db.Appender(context.Background())
|
||||
|
||||
lbls := labels.FromStrings("foo", "bar")
|
||||
var appendedSamples []sample
|
||||
series := labels.FromStrings("foo", "bar")
|
||||
|
||||
// Ref is 0 for the first append since we don't know the reference for the series.
|
||||
ts, v := time.Now().Unix(), 123.0
|
||||
ref, err := app.Append(0, lbls, ts, v)
|
||||
require.NoError(t, err)
|
||||
appendedSamples = append(appendedSamples, sample{ts, v})
|
||||
ref, err := app.Append(0, series, time.Now().Unix(), 123)
|
||||
noErr(err)
|
||||
|
||||
// Another append for a second later.
|
||||
// Re-using the ref from above since it's the same series, makes append faster.
|
||||
time.Sleep(time.Second)
|
||||
ts, v = time.Now().Unix(), 124
|
||||
_, err = app.Append(ref, lbls, ts, v)
|
||||
require.NoError(t, err)
|
||||
appendedSamples = append(appendedSamples, sample{ts, v})
|
||||
_, err = app.Append(ref, series, time.Now().Unix(), 124)
|
||||
noErr(err)
|
||||
|
||||
// Commit to storage.
|
||||
err = app.Commit()
|
||||
require.NoError(t, err)
|
||||
noErr(err)
|
||||
|
||||
// In case you want to do more appends after app.Commit(),
|
||||
// you need a new appender.
|
||||
// app = db.Appender(context.Background())
|
||||
//
|
||||
app = db.Appender(context.Background())
|
||||
// ... adding more samples.
|
||||
//
|
||||
// Commit to storage.
|
||||
// err = app.Commit()
|
||||
// require.NoError(t, err)
|
||||
|
||||
// Open a querier for reading.
|
||||
querier, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
|
||||
noErr(err)
|
||||
ss := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"))
|
||||
var queriedSamples []sample
|
||||
|
||||
for ss.Next() {
|
||||
series := ss.At()
|
||||
fmt.Println("series:", series.Labels().String())
|
||||
|
||||
it := series.Iterator()
|
||||
for it.Next() {
|
||||
ts, v := it.At()
|
||||
fmt.Println("sample", ts, v)
|
||||
queriedSamples = append(queriedSamples, sample{ts, v})
|
||||
_, v := it.At() // We ignore the timestamp here, only to have a predictable output we can test against (below)
|
||||
fmt.Println("sample", v)
|
||||
}
|
||||
|
||||
require.NoError(t, it.Err())
|
||||
fmt.Println("it.Err():", it.Err())
|
||||
}
|
||||
require.NoError(t, ss.Err())
|
||||
fmt.Println("ss.Err():", ss.Err())
|
||||
ws := ss.Warnings()
|
||||
if len(ws) > 0 {
|
||||
fmt.Println("warnings:", ws)
|
||||
}
|
||||
err = querier.Close()
|
||||
require.NoError(t, err)
|
||||
noErr(err)
|
||||
|
||||
// Clean up any last resources when done.
|
||||
err = db.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, appendedSamples, queriedSamples)
|
||||
noErr(err)
|
||||
err = os.RemoveAll(dir)
|
||||
noErr(err)
|
||||
|
||||
// Output:
|
||||
// series: {foo="bar"}
|
||||
// sample <ts1> 123
|
||||
// sample <ts2> 124
|
||||
// sample 123
|
||||
// sample 124
|
||||
// it.Err(): <nil>
|
||||
// ss.Err(): <nil>
|
||||
}
|
||||
|
||||
func noErr(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,8 +27,12 @@ import (
|
|||
"github.com/prometheus/prometheus/storage"
|
||||
)
|
||||
|
||||
// Indicates that there is no index entry for an exmplar.
|
||||
const noExemplar = -1
|
||||
const (
|
||||
// Indicates that there is no index entry for an exmplar.
|
||||
noExemplar = -1
|
||||
// Estimated number of exemplars per series, for sizing the index.
|
||||
estimatedExemplarsPerSeries = 16
|
||||
)
|
||||
|
||||
type CircularExemplarStorage struct {
|
||||
lock sync.RWMutex
|
||||
|
@ -117,7 +121,7 @@ func NewCircularExemplarStorage(len int64, m *ExemplarMetrics) (ExemplarStorage,
|
|||
}
|
||||
c := &CircularExemplarStorage{
|
||||
exemplars: make([]*circularBufferEntry, len),
|
||||
index: make(map[string]*indexEntry),
|
||||
index: make(map[string]*indexEntry, len/estimatedExemplarsPerSeries),
|
||||
metrics: m,
|
||||
}
|
||||
|
||||
|
@ -202,7 +206,8 @@ Outer:
|
|||
}
|
||||
|
||||
func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar.Exemplar) error {
|
||||
seriesLabels := l.String()
|
||||
var buf [1024]byte
|
||||
seriesLabels := l.Bytes(buf[:])
|
||||
|
||||
// TODO(bwplotka): This lock can lock all scrapers, there might high contention on this on scale.
|
||||
// Optimize by moving the lock to be per series (& benchmark it).
|
||||
|
@ -213,7 +218,7 @@ func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar.
|
|||
|
||||
// Not thread safe. The append parameters tells us whether this is an external validation, or internal
|
||||
// as a result of an AddExemplar call, in which case we should update any relevant metrics.
|
||||
func (ce *CircularExemplarStorage) validateExemplar(l string, e exemplar.Exemplar, append bool) error {
|
||||
func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemplar, append bool) error {
|
||||
if len(ce.exemplars) <= 0 {
|
||||
return storage.ErrExemplarsDisabled
|
||||
}
|
||||
|
@ -230,7 +235,7 @@ func (ce *CircularExemplarStorage) validateExemplar(l string, e exemplar.Exempla
|
|||
}
|
||||
}
|
||||
|
||||
idx, ok := ce.index[l]
|
||||
idx, ok := ce.index[string(key)]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
@ -269,7 +274,7 @@ func (ce *CircularExemplarStorage) Resize(l int64) int {
|
|||
oldNextIndex := int64(ce.nextIndex)
|
||||
|
||||
ce.exemplars = make([]*circularBufferEntry, l)
|
||||
ce.index = make(map[string]*indexEntry)
|
||||
ce.index = make(map[string]*indexEntry, l/estimatedExemplarsPerSeries)
|
||||
ce.nextIndex = 0
|
||||
|
||||
// Replay as many entries as needed, starting with oldest first.
|
||||
|
@ -305,13 +310,14 @@ func (ce *CircularExemplarStorage) Resize(l int64) int {
|
|||
// migrate is like AddExemplar but reuses existing structs. Expected to be called in batch and requires
|
||||
// external lock and does not compute metrics.
|
||||
func (ce *CircularExemplarStorage) migrate(entry *circularBufferEntry) {
|
||||
seriesLabels := entry.ref.seriesLabels.String()
|
||||
var buf [1024]byte
|
||||
seriesLabels := entry.ref.seriesLabels.Bytes(buf[:])
|
||||
|
||||
idx, ok := ce.index[seriesLabels]
|
||||
idx, ok := ce.index[string(seriesLabels)]
|
||||
if !ok {
|
||||
idx = entry.ref
|
||||
idx.oldest = ce.nextIndex
|
||||
ce.index[seriesLabels] = idx
|
||||
ce.index[string(seriesLabels)] = idx
|
||||
} else {
|
||||
entry.ref = idx
|
||||
ce.exemplars[idx.newest].next = ce.nextIndex
|
||||
|
@ -329,7 +335,8 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
|
|||
return storage.ErrExemplarsDisabled
|
||||
}
|
||||
|
||||
seriesLabels := l.String()
|
||||
var buf [1024]byte
|
||||
seriesLabels := l.Bytes(buf[:])
|
||||
|
||||
// TODO(bwplotka): This lock can lock all scrapers, there might high contention on this on scale.
|
||||
// Optimize by moving the lock to be per series (& benchmark it).
|
||||
|
@ -345,11 +352,11 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
|
|||
return err
|
||||
}
|
||||
|
||||
_, ok := ce.index[seriesLabels]
|
||||
_, ok := ce.index[string(seriesLabels)]
|
||||
if !ok {
|
||||
ce.index[seriesLabels] = &indexEntry{oldest: ce.nextIndex, seriesLabels: l}
|
||||
ce.index[string(seriesLabels)] = &indexEntry{oldest: ce.nextIndex, seriesLabels: l}
|
||||
} else {
|
||||
ce.exemplars[ce.index[seriesLabels].newest].next = ce.nextIndex
|
||||
ce.exemplars[ce.index[string(seriesLabels)].newest].next = ce.nextIndex
|
||||
}
|
||||
|
||||
if prev := ce.exemplars[ce.nextIndex]; prev == nil {
|
||||
|
@ -357,12 +364,13 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
|
|||
} else {
|
||||
// There exists exemplar already on this ce.nextIndex entry, drop it, to make place
|
||||
// for others.
|
||||
prevLabels := prev.ref.seriesLabels.String()
|
||||
var buf [1024]byte
|
||||
prevLabels := prev.ref.seriesLabels.Bytes(buf[:])
|
||||
if prev.next == noExemplar {
|
||||
// Last item for this series, remove index entry.
|
||||
delete(ce.index, prevLabels)
|
||||
delete(ce.index, string(prevLabels))
|
||||
} else {
|
||||
ce.index[prevLabels].oldest = prev.next
|
||||
ce.index[string(prevLabels)].oldest = prev.next
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -370,8 +378,8 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
|
|||
// since this is the first exemplar stored for this series.
|
||||
ce.exemplars[ce.nextIndex].next = noExemplar
|
||||
ce.exemplars[ce.nextIndex].exemplar = e
|
||||
ce.exemplars[ce.nextIndex].ref = ce.index[seriesLabels]
|
||||
ce.index[seriesLabels].newest = ce.nextIndex
|
||||
ce.exemplars[ce.nextIndex].ref = ce.index[string(seriesLabels)]
|
||||
ce.index[string(seriesLabels)].newest = ce.nextIndex
|
||||
|
||||
ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars)
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ func TestAddExemplar(t *testing.T) {
|
|||
}
|
||||
|
||||
require.NoError(t, es.AddExemplar(l, e))
|
||||
require.Equal(t, es.index[l.String()].newest, 0, "exemplar was not stored correctly")
|
||||
require.Equal(t, es.index[string(l.Bytes(nil))].newest, 0, "exemplar was not stored correctly")
|
||||
|
||||
e2 := exemplar.Exemplar{
|
||||
Labels: labels.Labels{
|
||||
|
@ -126,8 +126,8 @@ func TestAddExemplar(t *testing.T) {
|
|||
}
|
||||
|
||||
require.NoError(t, es.AddExemplar(l, e2))
|
||||
require.Equal(t, es.index[l.String()].newest, 1, "exemplar was not stored correctly, location of newest exemplar for series in index did not update")
|
||||
require.True(t, es.exemplars[es.index[l.String()].newest].exemplar.Equals(e2), "exemplar was not stored correctly, expected %+v got: %+v", e2, es.exemplars[es.index[l.String()].newest].exemplar)
|
||||
require.Equal(t, es.index[string(l.Bytes(nil))].newest, 1, "exemplar was not stored correctly, location of newest exemplar for series in index did not update")
|
||||
require.True(t, es.exemplars[es.index[string(l.Bytes(nil))].newest].exemplar.Equals(e2), "exemplar was not stored correctly, expected %+v got: %+v", e2, es.exemplars[es.index[string(l.Bytes(nil))].newest].exemplar)
|
||||
|
||||
require.NoError(t, es.AddExemplar(l, e2), "no error is expected attempting to add duplicate exemplar")
|
||||
|
||||
|
@ -300,7 +300,7 @@ func TestSelectExemplar_TimeRange(t *testing.T) {
|
|||
Ts: int64(101 + i),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, es.index[l.String()].newest, i, "exemplar was not stored correctly")
|
||||
require.Equal(t, es.index[string(l.Bytes(nil))].newest, i, "exemplar was not stored correctly")
|
||||
}
|
||||
|
||||
m, err := labels.NewMatcher(labels.MatchEqual, l[0].Name, l[0].Value)
|
||||
|
@ -376,14 +376,14 @@ func TestIndexOverwrite(t *testing.T) {
|
|||
|
||||
// Ensure index GC'ing is taking place, there should no longer be any
|
||||
// index entry for series l1 since we just wrote two exemplars for series l2.
|
||||
_, ok := es.index[l1.String()]
|
||||
_, ok := es.index[string(l1.Bytes(nil))]
|
||||
require.False(t, ok)
|
||||
require.Equal(t, &indexEntry{1, 0, l2}, es.index[l2.String()])
|
||||
require.Equal(t, &indexEntry{1, 0, l2}, es.index[string(l2.Bytes(nil))])
|
||||
|
||||
err = es.AddExemplar(l1, exemplar.Exemplar{Value: 4, Ts: 4})
|
||||
require.NoError(t, err)
|
||||
|
||||
i := es.index[l2.String()]
|
||||
i := es.index[string(l2.Bytes(nil))]
|
||||
require.Equal(t, &indexEntry{0, 0, l2}, i)
|
||||
}
|
||||
|
||||
|
@ -492,19 +492,24 @@ func BenchmarkAddExemplar(b *testing.B) {
|
|||
|
||||
for _, n := range []int{10000, 100000, 1000000} {
|
||||
b.Run(fmt.Sprintf("%d", n), func(b *testing.B) {
|
||||
for j := 0; j < b.N; j++ {
|
||||
b.StopTimer()
|
||||
exs, err := NewCircularExemplarStorage(int64(n), eMetrics)
|
||||
require.NoError(b, err)
|
||||
es := exs.(*CircularExemplarStorage)
|
||||
|
||||
b.ResetTimer()
|
||||
l := labels.Labels{{Name: "service", Value: strconv.Itoa(0)}}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
if i%100 == 0 {
|
||||
l = labels.Labels{{Name: "service", Value: strconv.Itoa(i)}}
|
||||
}
|
||||
err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: int64(i), Labels: exLabels})
|
||||
if err != nil {
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -543,6 +548,9 @@ func BenchmarkResizeExemplars(b *testing.B) {
|
|||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b.Run(fmt.Sprintf("%s-%d-to-%d", tc.name, tc.startSize, tc.endSize), func(b *testing.B) {
|
||||
for j := 0; j < b.N; j++ {
|
||||
b.StopTimer()
|
||||
exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics)
|
||||
require.NoError(b, err)
|
||||
es := exs.(*CircularExemplarStorage)
|
||||
|
@ -551,16 +559,13 @@ func BenchmarkResizeExemplars(b *testing.B) {
|
|||
l := labels.FromStrings("service", strconv.Itoa(i))
|
||||
|
||||
err = es.AddExemplar(l, exemplar.Exemplar{Value: float64(i), Ts: int64(i)})
|
||||
if err != nil {
|
||||
require.NoError(b, err)
|
||||
}
|
||||
saveIndex := es.index
|
||||
saveExemplars := es.exemplars
|
||||
|
||||
b.Run(fmt.Sprintf("%s-%d-to-%d", tc.name, tc.startSize, tc.endSize), func(t *testing.B) {
|
||||
es.index = saveIndex
|
||||
es.exemplars = saveExemplars
|
||||
b.ResetTimer()
|
||||
}
|
||||
b.StartTimer()
|
||||
es.Resize(tc.endSize)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -127,6 +127,8 @@ type HeadOptions struct {
|
|||
ChunkDirRoot string
|
||||
ChunkPool chunkenc.Pool
|
||||
ChunkWriteBufferSize int
|
||||
ChunkWriteQueueSize int
|
||||
|
||||
// StripeSize sets the number of entries in the hash map, it must be a power of 2.
|
||||
// A larger StripeSize will allocate more memory up-front, but will increase performance when handling a large number of series.
|
||||
// A smaller StripeSize reduces the memory allocated, but can decrease performance with large number of series.
|
||||
|
@ -144,6 +146,7 @@ func DefaultHeadOptions() *HeadOptions {
|
|||
ChunkDirRoot: "",
|
||||
ChunkPool: chunkenc.NewPool(),
|
||||
ChunkWriteBufferSize: chunks.DefaultWriteBufferSize,
|
||||
ChunkWriteQueueSize: chunks.DefaultWriteQueueSize,
|
||||
StripeSize: DefaultStripeSize,
|
||||
SeriesCallback: &noopSeriesLifecycleCallback{},
|
||||
IsolationDisabled: defaultIsolationDisabled,
|
||||
|
@ -208,9 +211,11 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOpti
|
|||
}
|
||||
|
||||
h.chunkDiskMapper, err = chunks.NewChunkDiskMapper(
|
||||
r,
|
||||
mmappedChunksDir(opts.ChunkDirRoot),
|
||||
opts.ChunkPool,
|
||||
opts.ChunkWriteBufferSize,
|
||||
opts.ChunkWriteQueueSize,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -571,12 +571,7 @@ func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper
|
|||
return
|
||||
}
|
||||
|
||||
chunkRef, err := chunkDiskMapper.WriteChunk(s.ref, s.headChunk.minTime, s.headChunk.maxTime, s.headChunk.chunk)
|
||||
if err != nil {
|
||||
if err != chunks.ErrChunkDiskMapperClosed {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.headChunk.minTime, s.headChunk.maxTime, s.headChunk.chunk, handleChunkWriteError)
|
||||
s.mmappedChunks = append(s.mmappedChunks, &mmappedChunk{
|
||||
ref: chunkRef,
|
||||
numSamples: uint16(s.headChunk.chunk.NumSamples()),
|
||||
|
@ -585,6 +580,12 @@ func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper
|
|||
})
|
||||
}
|
||||
|
||||
func handleChunkWriteError(err error) {
|
||||
if err != nil && err != chunks.ErrChunkDiskMapperClosed {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Rollback removes the samples and exemplars from headAppender and writes any series to WAL.
|
||||
func (a *headAppender) Rollback() (err error) {
|
||||
if a.closed {
|
||||
|
|
|
@ -103,11 +103,18 @@ func (h *headIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, err
|
|||
|
||||
// Postings returns the postings list iterator for the label pairs.
|
||||
func (h *headIndexReader) Postings(name string, values ...string) (index.Postings, error) {
|
||||
switch len(values) {
|
||||
case 0:
|
||||
return index.EmptyPostings(), nil
|
||||
case 1:
|
||||
return h.head.postings.Get(name, values[0]), nil
|
||||
default:
|
||||
res := make([]index.Postings, 0, len(values))
|
||||
for _, value := range values {
|
||||
res = append(res, h.head.postings.Get(name, value))
|
||||
}
|
||||
return index.Merge(res...), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/atomic"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
|
@ -224,7 +225,7 @@ func BenchmarkLoadWAL(b *testing.B) {
|
|||
|
||||
// Write mmapped chunks.
|
||||
if c.mmappedChunkT != 0 {
|
||||
chunkDiskMapper, err := chunks.NewChunkDiskMapper(mmappedChunksDir(dir), chunkenc.NewPool(), chunks.DefaultWriteBufferSize)
|
||||
chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, mmappedChunksDir(dir), chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize)
|
||||
require.NoError(b, err)
|
||||
for k := 0; k < c.batches*c.seriesPerBatch; k++ {
|
||||
// Create one mmapped chunk per series, with one sample at the given time.
|
||||
|
@ -270,6 +271,195 @@ func BenchmarkLoadWAL(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestHead_HighConcurrencyReadAndWrite generates 1000 series with a step of 15s and fills a whole block with samples,
|
||||
// this means in total it generates 4000 chunks because with a step of 15s there are 4 chunks per block per series.
|
||||
// While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the
|
||||
// returned results are correct.
|
||||
func TestHead_HighConcurrencyReadAndWrite(t *testing.T) {
|
||||
head, _ := newTestHead(t, DefaultBlockDuration, false)
|
||||
defer func() {
|
||||
require.NoError(t, head.Close())
|
||||
}()
|
||||
|
||||
seriesCnt := 1000
|
||||
readConcurrency := 2
|
||||
writeConcurrency := 10
|
||||
startTs := uint64(DefaultBlockDuration) // start at the second block relative to the unix epoch.
|
||||
qryRange := uint64(5 * time.Minute.Milliseconds())
|
||||
step := uint64(15 * time.Second / time.Millisecond)
|
||||
endTs := startTs + uint64(DefaultBlockDuration)
|
||||
|
||||
labelSets := make([]labels.Labels, seriesCnt)
|
||||
for i := 0; i < seriesCnt; i++ {
|
||||
labelSets[i] = labels.FromStrings("seriesId", strconv.Itoa(i))
|
||||
}
|
||||
|
||||
head.Init(0)
|
||||
|
||||
g, ctx := errgroup.WithContext(context.Background())
|
||||
whileNotCanceled := func(f func() (bool, error)) error {
|
||||
for ctx.Err() == nil {
|
||||
cont, err := f()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !cont {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create one channel for each write worker, the channels will be used by the coordinator
|
||||
// go routine to coordinate which timestamps each write worker has to write.
|
||||
writerTsCh := make([]chan uint64, writeConcurrency)
|
||||
for writerTsChIdx := range writerTsCh {
|
||||
writerTsCh[writerTsChIdx] = make(chan uint64)
|
||||
}
|
||||
|
||||
// workerReadyWg is used to synchronize the start of the test,
|
||||
// we only start the test once all workers signal that they're ready.
|
||||
var workerReadyWg sync.WaitGroup
|
||||
workerReadyWg.Add(writeConcurrency + readConcurrency)
|
||||
|
||||
// Start the write workers.
|
||||
for wid := 0; wid < writeConcurrency; wid++ {
|
||||
// Create copy of workerID to be used by worker routine.
|
||||
workerID := wid
|
||||
|
||||
g.Go(func() error {
|
||||
// The label sets which this worker will write.
|
||||
workerLabelSets := labelSets[(seriesCnt/writeConcurrency)*workerID : (seriesCnt/writeConcurrency)*(workerID+1)]
|
||||
|
||||
// Signal that this worker is ready.
|
||||
workerReadyWg.Done()
|
||||
|
||||
return whileNotCanceled(func() (bool, error) {
|
||||
ts, ok := <-writerTsCh[workerID]
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
app := head.Appender(ctx)
|
||||
for i := 0; i < len(workerLabelSets); i++ {
|
||||
// We also use the timestamp as the sample value.
|
||||
_, err := app.Append(0, workerLabelSets[i], int64(ts), float64(ts))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Error when appending to head: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return true, app.Commit()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// queryHead is a helper to query the head for a given time range and labelset.
|
||||
queryHead := func(mint, maxt uint64, label labels.Label) (map[string][]tsdbutil.Sample, error) {
|
||||
q, err := NewBlockQuerier(head, int64(mint), int64(maxt))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return query(t, q, labels.MustNewMatcher(labels.MatchEqual, label.Name, label.Value)), nil
|
||||
}
|
||||
|
||||
// readerTsCh will be used by the coordinator go routine to coordinate which timestamps the reader should read.
|
||||
readerTsCh := make(chan uint64)
|
||||
|
||||
// Start the read workers.
|
||||
for wid := 0; wid < readConcurrency; wid++ {
|
||||
// Create copy of threadID to be used by worker routine.
|
||||
workerID := wid
|
||||
|
||||
g.Go(func() error {
|
||||
querySeriesRef := (seriesCnt / readConcurrency) * workerID
|
||||
|
||||
// Signal that this worker is ready.
|
||||
workerReadyWg.Done()
|
||||
|
||||
return whileNotCanceled(func() (bool, error) {
|
||||
ts, ok := <-readerTsCh
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
querySeriesRef = (querySeriesRef + 1) % seriesCnt
|
||||
lbls := labelSets[querySeriesRef]
|
||||
samples, err := queryHead(ts-qryRange, ts, lbls[0])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(samples) != 1 {
|
||||
return false, fmt.Errorf("expected 1 series, got %d", len(samples))
|
||||
}
|
||||
|
||||
series := lbls.String()
|
||||
expectSampleCnt := qryRange/step + 1
|
||||
if expectSampleCnt != uint64(len(samples[series])) {
|
||||
return false, fmt.Errorf("expected %d samples, got %d", expectSampleCnt, len(samples[series]))
|
||||
}
|
||||
|
||||
for sampleIdx, sample := range samples[series] {
|
||||
expectedValue := ts - qryRange + (uint64(sampleIdx) * step)
|
||||
if sample.T() != int64(expectedValue) {
|
||||
return false, fmt.Errorf("expected sample %d to have ts %d, got %d", sampleIdx, expectedValue, sample.T())
|
||||
}
|
||||
if sample.V() != float64(expectedValue) {
|
||||
return false, fmt.Errorf("expected sample %d to have value %d, got %f", sampleIdx, expectedValue, sample.V())
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Start the coordinator go routine.
|
||||
g.Go(func() error {
|
||||
currTs := startTs
|
||||
|
||||
defer func() {
|
||||
// End of the test, close all channels to stop the workers.
|
||||
for _, ch := range writerTsCh {
|
||||
close(ch)
|
||||
}
|
||||
close(readerTsCh)
|
||||
}()
|
||||
|
||||
// Wait until all workers are ready to start the test.
|
||||
workerReadyWg.Wait()
|
||||
return whileNotCanceled(func() (bool, error) {
|
||||
// Send the current timestamp to each of the writers.
|
||||
for _, ch := range writerTsCh {
|
||||
select {
|
||||
case ch <- currTs:
|
||||
case <-ctx.Done():
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Once data for at least <qryRange> has been ingested, send the current timestamp to the readers.
|
||||
if currTs > startTs+qryRange {
|
||||
select {
|
||||
case readerTsCh <- currTs - step:
|
||||
case <-ctx.Done():
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
currTs += step
|
||||
if currTs > endTs {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
})
|
||||
|
||||
require.NoError(t, g.Wait())
|
||||
}
|
||||
|
||||
func TestHead_ReadWAL(t *testing.T) {
|
||||
for _, compress := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
|
||||
|
@ -540,7 +730,7 @@ func TestMemSeries_truncateChunks(t *testing.T) {
|
|||
require.NoError(t, os.RemoveAll(dir))
|
||||
}()
|
||||
// This is usually taken from the Head, but passing manually here.
|
||||
chunkDiskMapper, err := chunks.NewChunkDiskMapper(dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize)
|
||||
chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, chunkDiskMapper.Close())
|
||||
|
@ -1084,7 +1274,7 @@ func TestMemSeries_append(t *testing.T) {
|
|||
require.NoError(t, os.RemoveAll(dir))
|
||||
}()
|
||||
// This is usually taken from the Head, but passing manually here.
|
||||
chunkDiskMapper, err := chunks.NewChunkDiskMapper(dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize)
|
||||
chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, chunkDiskMapper.Close())
|
||||
|
@ -1462,14 +1652,16 @@ func TestHeadReadWriterRepair(t *testing.T) {
|
|||
ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, h.chunkDiskMapper)
|
||||
require.True(t, ok, "series append failed")
|
||||
require.False(t, chunkCreated, "chunk was created")
|
||||
require.NoError(t, h.chunkDiskMapper.CutNewFile())
|
||||
h.chunkDiskMapper.CutNewFile()
|
||||
}
|
||||
require.NoError(t, h.Close())
|
||||
|
||||
// Verify that there are 7 segment files.
|
||||
// Verify that there are 6 segment files.
|
||||
// It should only be 6 because the last call to .CutNewFile() won't
|
||||
// take effect without another chunk being written.
|
||||
files, err := ioutil.ReadDir(mmappedChunksDir(dir))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 7, len(files))
|
||||
require.Equal(t, 6, len(files))
|
||||
|
||||
// Corrupt the 4th file by writing a random byte to series ref.
|
||||
f, err := os.OpenFile(filepath.Join(mmappedChunksDir(dir), files[3].Name()), os.O_WRONLY, 0o666)
|
||||
|
@ -2270,7 +2462,7 @@ func TestMemSafeIteratorSeekIntoBuffer(t *testing.T) {
|
|||
require.NoError(t, os.RemoveAll(dir))
|
||||
}()
|
||||
// This is usually taken from the Head, but passing manually here.
|
||||
chunkDiskMapper, err := chunks.NewChunkDiskMapper(dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize)
|
||||
chunkDiskMapper, err := chunks.NewChunkDiskMapper(nil, dir, chunkenc.NewPool(), chunks.DefaultWriteBufferSize, chunks.DefaultWriteQueueSize)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, chunkDiskMapper.Close())
|
||||
|
|
|
@ -20,6 +20,8 @@ import (
|
|||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
)
|
||||
|
@ -831,3 +833,121 @@ type seriesRefSlice []storage.SeriesRef
|
|||
func (x seriesRefSlice) Len() int { return len(x) }
|
||||
func (x seriesRefSlice) Less(i, j int) bool { return x[i] < x[j] }
|
||||
func (x seriesRefSlice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
// FindIntersectingPostings checks the intersection of p and candidates[i] for each i in candidates,
|
||||
// if intersection is non empty, then i is added to the indexes returned.
|
||||
// Returned indexes are not sorted.
|
||||
func FindIntersectingPostings(p Postings, candidates []Postings) (indexes []int, err error) {
|
||||
h := make(postingsWithIndexHeap, 0, len(candidates))
|
||||
for idx, it := range candidates {
|
||||
if it.Next() {
|
||||
h = append(h, postingsWithIndex{index: idx, p: it})
|
||||
} else if it.Err() != nil {
|
||||
return nil, it.Err()
|
||||
}
|
||||
}
|
||||
if h.empty() {
|
||||
return nil, nil
|
||||
}
|
||||
heap.Init(&h)
|
||||
|
||||
for !h.empty() {
|
||||
if !p.Seek(h.at()) {
|
||||
return indexes, p.Err()
|
||||
}
|
||||
if p.At() == h.at() {
|
||||
indexes = append(indexes, h.popIndex())
|
||||
} else if err := h.next(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return indexes, nil
|
||||
}
|
||||
|
||||
// postingsWithIndex is used as postingsWithIndexHeap elements by FindIntersectingPostings,
|
||||
// keeping track of the original index of each postings while they move inside the heap.
|
||||
type postingsWithIndex struct {
|
||||
index int
|
||||
p Postings
|
||||
// popped means that these postings shouldn't be considered anymore.
|
||||
// See popIndex() comment to understand why we need this.
|
||||
popped bool
|
||||
}
|
||||
|
||||
// postingsWithIndexHeap implements heap.Interface,
|
||||
// with root always pointing to the postings with minimum Postings.At() value.
|
||||
// It also implements a special way of removing elements that marks them as popped and moves them to the bottom of the
|
||||
// heap instead of actually removing them, see popIndex() for more details.
|
||||
type postingsWithIndexHeap []postingsWithIndex
|
||||
|
||||
// empty checks whether the heap is empty, which is true if it has no elements, of if the smallest element is popped.
|
||||
func (h *postingsWithIndexHeap) empty() bool {
|
||||
return len(*h) == 0 || (*h)[0].popped
|
||||
}
|
||||
|
||||
// popIndex pops the smallest heap element and returns its index.
|
||||
// In our implementation we don't actually do heap.Pop(), instead we mark the element as `popped` and fix its position, which
|
||||
// should be after all the non-popped elements according to our sorting strategy.
|
||||
// By skipping the `heap.Pop()` call we avoid an extra allocation in this heap's Pop() implementation which returns an interface{}.
|
||||
func (h *postingsWithIndexHeap) popIndex() int {
|
||||
index := (*h)[0].index
|
||||
(*h)[0].popped = true
|
||||
heap.Fix(h, 0)
|
||||
return index
|
||||
}
|
||||
|
||||
// at provides the storage.SeriesRef where root Postings is pointing at this moment.
|
||||
func (h postingsWithIndexHeap) at() storage.SeriesRef { return h[0].p.At() }
|
||||
|
||||
// next performs the Postings.Next() operation on the root of the heap, performing the related operation on the heap
|
||||
// and conveniently returning the result of calling Postings.Err() if the result of calling Next() was false.
|
||||
// If Next() succeeds, heap is fixed to move the root to its new position, according to its Postings.At() value.
|
||||
// If Next() returns fails and there's no error reported by Postings.Err(), then root is marked as removed and heap is fixed.
|
||||
func (h *postingsWithIndexHeap) next() error {
|
||||
pi := (*h)[0]
|
||||
next := pi.p.Next()
|
||||
if next {
|
||||
heap.Fix(h, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := pi.p.Err(); err != nil {
|
||||
return errors.Wrapf(err, "postings %d", pi.index)
|
||||
}
|
||||
h.popIndex()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Len implements heap.Interface.
|
||||
// Notice that Len() > 0 does not imply that heap is not empty as elements are not removed from this heap.
|
||||
// Use empty() to check whether heap is empty or not.
|
||||
func (h postingsWithIndexHeap) Len() int { return len(h) }
|
||||
|
||||
// Less implements heap.Interface, it puts all the popped elements at the bottom,
|
||||
// and then sorts by Postings.At() property of each node.
|
||||
func (h postingsWithIndexHeap) Less(i, j int) bool {
|
||||
if h[i].popped != h[j].popped {
|
||||
return h[j].popped
|
||||
}
|
||||
return h[i].p.At() < h[j].p.At()
|
||||
}
|
||||
|
||||
// Swap implements heap.Interface.
|
||||
func (h *postingsWithIndexHeap) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] }
|
||||
|
||||
// Push implements heap.Interface.
|
||||
func (h *postingsWithIndexHeap) Push(x interface{}) {
|
||||
*h = append(*h, x.(postingsWithIndex))
|
||||
}
|
||||
|
||||
// Pop implements heap.Interface and pops the last element, which is NOT the min element,
|
||||
// so this doesn't return the same heap.Pop()
|
||||
// Although this method is implemented for correctness, we don't expect it to be used, see popIndex() method for details.
|
||||
func (h *postingsWithIndexHeap) Pop() interface{} {
|
||||
old := *h
|
||||
n := len(old)
|
||||
x := old[n-1]
|
||||
*h = old[0 : n-1]
|
||||
return x
|
||||
}
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
package index
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
@ -21,6 +23,7 @@ import (
|
|||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -929,3 +932,142 @@ func TestMemPostings_Delete(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(expanded), "expected empty postings, got %v", expanded)
|
||||
}
|
||||
|
||||
func TestFindIntersectingPostings(t *testing.T) {
|
||||
t.Run("multiple intersections", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{10, 15, 20, 25, 30, 35, 40, 45, 50})
|
||||
candidates := []Postings{
|
||||
0: NewListPostings([]storage.SeriesRef{7, 13, 14, 27}), // Does not intersect.
|
||||
1: NewListPostings([]storage.SeriesRef{10, 20}), // Does intersect.
|
||||
2: NewListPostings([]storage.SeriesRef{29, 30, 31}), // Does intersect.
|
||||
3: NewListPostings([]storage.SeriesRef{29, 30, 31}), // Does intersect (same again).
|
||||
4: NewListPostings([]storage.SeriesRef{60}), // Does not intersect.
|
||||
5: NewListPostings([]storage.SeriesRef{45}), // Does intersect.
|
||||
6: EmptyPostings(), // Does not intersect.
|
||||
}
|
||||
|
||||
indexes, err := FindIntersectingPostings(p, candidates)
|
||||
require.NoError(t, err)
|
||||
sort.Ints(indexes)
|
||||
require.Equal(t, []int{1, 2, 3, 5}, indexes)
|
||||
})
|
||||
|
||||
t.Run("no intersections", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{10, 15, 20, 25, 30, 35, 40, 45, 50})
|
||||
candidates := []Postings{
|
||||
0: NewListPostings([]storage.SeriesRef{7, 13, 14, 27}), // Does not intersect.
|
||||
1: NewListPostings([]storage.SeriesRef{60}), // Does not intersect.
|
||||
2: EmptyPostings(), // Does not intersect.
|
||||
}
|
||||
|
||||
indexes, err := FindIntersectingPostings(p, candidates)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, indexes)
|
||||
})
|
||||
|
||||
t.Run("p is ErrPostings", func(t *testing.T) {
|
||||
p := ErrPostings(context.Canceled)
|
||||
candidates := []Postings{NewListPostings([]storage.SeriesRef{1})}
|
||||
_, err := FindIntersectingPostings(p, candidates)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("one of the candidates is ErrPostings", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{1})
|
||||
candidates := []Postings{
|
||||
NewListPostings([]storage.SeriesRef{1}),
|
||||
ErrPostings(context.Canceled),
|
||||
}
|
||||
_, err := FindIntersectingPostings(p, candidates)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("one of the candidates fails on nth call", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50, 60, 70})
|
||||
candidates := []Postings{
|
||||
NewListPostings([]storage.SeriesRef{7, 13, 14, 27}),
|
||||
&postingsFailingAfterNthCall{2, NewListPostings([]storage.SeriesRef{29, 30, 31, 40})},
|
||||
}
|
||||
_, err := FindIntersectingPostings(p, candidates)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("p fails on the nth call", func(t *testing.T) {
|
||||
p := &postingsFailingAfterNthCall{2, NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50, 60, 70})}
|
||||
candidates := []Postings{
|
||||
NewListPostings([]storage.SeriesRef{7, 13, 14, 27}),
|
||||
NewListPostings([]storage.SeriesRef{29, 30, 31, 40}),
|
||||
}
|
||||
_, err := FindIntersectingPostings(p, candidates)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
type postingsFailingAfterNthCall struct {
|
||||
ttl int
|
||||
Postings
|
||||
}
|
||||
|
||||
func (p *postingsFailingAfterNthCall) Seek(v storage.SeriesRef) bool {
|
||||
p.ttl--
|
||||
if p.ttl <= 0 {
|
||||
return false
|
||||
}
|
||||
return p.Postings.Seek(v)
|
||||
}
|
||||
|
||||
func (p *postingsFailingAfterNthCall) Next() bool {
|
||||
p.ttl--
|
||||
if p.ttl <= 0 {
|
||||
return false
|
||||
}
|
||||
return p.Postings.Next()
|
||||
}
|
||||
|
||||
func (p *postingsFailingAfterNthCall) Err() error {
|
||||
if p.ttl <= 0 {
|
||||
return errors.New("ttl exceeded")
|
||||
}
|
||||
return p.Postings.Err()
|
||||
}
|
||||
|
||||
func TestPostingsWithIndexHeap(t *testing.T) {
|
||||
t.Run("iterate", func(t *testing.T) {
|
||||
h := postingsWithIndexHeap{
|
||||
{index: 0, p: NewListPostings([]storage.SeriesRef{10, 20, 30})},
|
||||
{index: 1, p: NewListPostings([]storage.SeriesRef{1, 5})},
|
||||
{index: 2, p: NewListPostings([]storage.SeriesRef{25, 50})},
|
||||
}
|
||||
for _, node := range h {
|
||||
node.p.Next()
|
||||
}
|
||||
heap.Init(&h)
|
||||
|
||||
for _, expected := range []storage.SeriesRef{1, 5, 10, 20, 25, 30, 50} {
|
||||
require.Equal(t, expected, h.at())
|
||||
require.NoError(t, h.next())
|
||||
}
|
||||
require.True(t, h.empty())
|
||||
})
|
||||
|
||||
t.Run("pop", func(t *testing.T) {
|
||||
h := postingsWithIndexHeap{
|
||||
{index: 0, p: NewListPostings([]storage.SeriesRef{10, 20, 30})},
|
||||
{index: 1, p: NewListPostings([]storage.SeriesRef{1, 5})},
|
||||
{index: 2, p: NewListPostings([]storage.SeriesRef{25, 50})},
|
||||
}
|
||||
for _, node := range h {
|
||||
node.p.Next()
|
||||
}
|
||||
heap.Init(&h)
|
||||
|
||||
for _, expected := range []storage.SeriesRef{1, 5, 10, 20} {
|
||||
require.Equal(t, expected, h.at())
|
||||
require.NoError(t, h.next())
|
||||
}
|
||||
require.Equal(t, storage.SeriesRef(25), h.at())
|
||||
node := heap.Pop(&h).(postingsWithIndex)
|
||||
require.Equal(t, 2, node.index)
|
||||
require.Equal(t, storage.SeriesRef(25), node.p.At())
|
||||
})
|
||||
}
|
||||
|
|
|
@ -375,38 +375,30 @@ func inversePostingsForMatcher(ix IndexReader, m *labels.Matcher) (index.Posting
|
|||
}
|
||||
|
||||
func labelValuesWithMatchers(r IndexReader, name string, matchers ...*labels.Matcher) ([]string, error) {
|
||||
// We're only interested in metrics which have the label <name>.
|
||||
requireLabel, err := labels.NewMatcher(labels.MatchNotEqual, name, "")
|
||||
p, err := PostingsForMatchers(r, matchers...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to instantiate label matcher")
|
||||
return nil, errors.Wrap(err, "fetching postings for matchers")
|
||||
}
|
||||
|
||||
var p index.Postings
|
||||
p, err = PostingsForMatchers(r, append(matchers, requireLabel)...)
|
||||
allValues, err := r.LabelValues(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "fetching values of label %s", name)
|
||||
}
|
||||
|
||||
dedupe := map[string]interface{}{}
|
||||
for p.Next() {
|
||||
v, err := r.LabelValueFor(p.At(), name)
|
||||
valuesPostings := make([]index.Postings, len(allValues))
|
||||
for i, value := range allValues {
|
||||
valuesPostings[i], err = r.Postings(name, value)
|
||||
if err != nil {
|
||||
if err == storage.ErrNotFound {
|
||||
continue
|
||||
return nil, errors.Wrapf(err, "fetching postings for %s=%q", name, value)
|
||||
}
|
||||
}
|
||||
indexes, err := index.FindIntersectingPostings(p, valuesPostings)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "intersecting postings")
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
dedupe[v] = nil
|
||||
}
|
||||
|
||||
if err = p.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
values := make([]string, 0, len(dedupe))
|
||||
for value := range dedupe {
|
||||
values = append(values, value)
|
||||
values := make([]string, 0, len(indexes))
|
||||
for _, idx := range indexes {
|
||||
values = append(values, allValues[idx])
|
||||
}
|
||||
|
||||
return values, nil
|
||||
|
|
|
@ -31,7 +31,7 @@ const (
|
|||
postingsBenchSuffix = "aaaaaaaaaabbbbbbbbbbccccccccccdddddddddd"
|
||||
)
|
||||
|
||||
func BenchmarkPostingsForMatchers(b *testing.B) {
|
||||
func BenchmarkQuerier(b *testing.B) {
|
||||
chunkDir, err := ioutil.TempDir("", "chunk_dir")
|
||||
require.NoError(b, err)
|
||||
defer func() {
|
||||
|
@ -66,8 +66,13 @@ func BenchmarkPostingsForMatchers(b *testing.B) {
|
|||
ir, err := h.Index()
|
||||
require.NoError(b, err)
|
||||
b.Run("Head", func(b *testing.B) {
|
||||
b.Run("PostingsForMatchers", func(b *testing.B) {
|
||||
benchmarkPostingsForMatchers(b, ir)
|
||||
})
|
||||
b.Run("labelValuesWithMatchers", func(b *testing.B) {
|
||||
benchmarkLabelValuesWithMatchers(b, ir)
|
||||
})
|
||||
})
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "test_benchpostingsformatchers")
|
||||
require.NoError(b, err)
|
||||
|
@ -85,8 +90,13 @@ func BenchmarkPostingsForMatchers(b *testing.B) {
|
|||
require.NoError(b, err)
|
||||
defer ir.Close()
|
||||
b.Run("Block", func(b *testing.B) {
|
||||
b.Run("PostingsForMatchers", func(b *testing.B) {
|
||||
benchmarkPostingsForMatchers(b, ir)
|
||||
})
|
||||
b.Run("labelValuesWithMatchers", func(b *testing.B) {
|
||||
benchmarkLabelValuesWithMatchers(b, ir)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
|
||||
|
@ -103,7 +113,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
|
|||
i1Plus := labels.MustNewMatcher(labels.MatchRegexp, "i", "^1.+$")
|
||||
iEmptyRe := labels.MustNewMatcher(labels.MatchRegexp, "i", "^$")
|
||||
iNotEmpty := labels.MustNewMatcher(labels.MatchNotEqual, "i", "")
|
||||
iNot2 := labels.MustNewMatcher(labels.MatchNotEqual, "n", "2"+postingsBenchSuffix)
|
||||
iNot2 := labels.MustNewMatcher(labels.MatchNotEqual, "i", "2"+postingsBenchSuffix)
|
||||
iNot2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^2.*$")
|
||||
iNotStar2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.*2.*$")
|
||||
|
||||
|
@ -143,6 +153,38 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
|
|||
}
|
||||
}
|
||||
|
||||
func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) {
|
||||
i1 := labels.MustNewMatcher(labels.MatchEqual, "i", "1")
|
||||
iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$")
|
||||
jNotFoo := labels.MustNewMatcher(labels.MatchNotEqual, "j", "foo")
|
||||
n1 := labels.MustNewMatcher(labels.MatchEqual, "n", "1"+postingsBenchSuffix)
|
||||
nPlus := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.+$")
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
labelName string
|
||||
matchers []*labels.Matcher
|
||||
}{
|
||||
// i has 100k values.
|
||||
{`i with n="1"`, "i", []*labels.Matcher{n1}},
|
||||
{`i with n="^.+$"`, "i", []*labels.Matcher{nPlus}},
|
||||
{`i with n="1",j!="foo"`, "i", []*labels.Matcher{n1, jNotFoo}},
|
||||
{`i with n="1",i=~"^.*$",j!="foo"`, "i", []*labels.Matcher{n1, iStar, jNotFoo}},
|
||||
// n has 10 values.
|
||||
{`n with j!="foo"`, "n", []*labels.Matcher{jNotFoo}},
|
||||
{`n with i="1"`, "n", []*labels.Matcher{i1}},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
b.Run(c.name, func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := labelValuesWithMatchers(ir, c.labelName, c.matchers...)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkQuerierSelect(b *testing.B) {
|
||||
chunkDir, err := ioutil.TempDir("", "chunk_dir")
|
||||
require.NoError(b, err)
|
||||
|
|
|
@ -39,6 +39,7 @@ func New(t testutil.T) *TestStorage {
|
|||
opts := tsdb.DefaultOptions()
|
||||
opts.MinBlockDuration = int64(24 * time.Hour / time.Millisecond)
|
||||
opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond)
|
||||
opts.RetentionDuration = 0
|
||||
db, err := tsdb.Open(dir, nil, nil, opts, tsdb.NewDBStats())
|
||||
require.NoError(t, err, "unexpected error while opening test storage")
|
||||
reg := prometheus.NewRegistry()
|
||||
|
|
|
@ -378,11 +378,6 @@ func (api *API) query(r *http.Request) (result apiFuncResult) {
|
|||
}
|
||||
|
||||
qry, err := api.QueryEngine.NewInstantQuery(api.Queryable, r.FormValue("query"), ts)
|
||||
if err == promql.ErrValidationAtModifierDisabled {
|
||||
err = errors.New("@ modifier is disabled, use --enable-feature=promql-at-modifier to enable it")
|
||||
} else if err == promql.ErrValidationNegativeOffsetDisabled {
|
||||
err = errors.New("negative offset is disabled, use --enable-feature=promql-negative-offset to enable it")
|
||||
}
|
||||
if err != nil {
|
||||
return invalidParamError(err, "query")
|
||||
}
|
||||
|
@ -458,11 +453,6 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) {
|
|||
}
|
||||
|
||||
qry, err := api.QueryEngine.NewRangeQuery(api.Queryable, r.FormValue("query"), start, end, step)
|
||||
if err == promql.ErrValidationAtModifierDisabled {
|
||||
err = errors.New("@ modifier is disabled, use --enable-feature=promql-at-modifier to enable it")
|
||||
} else if err == promql.ErrValidationNegativeOffsetDisabled {
|
||||
err = errors.New("negative offset is disabled, use --enable-feature=promql-negative-offset to enable it")
|
||||
}
|
||||
if err != nil {
|
||||
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
||||
}
|
||||
|
@ -1164,6 +1154,7 @@ type RuleGroup struct {
|
|||
// same array.
|
||||
Rules []Rule `json:"rules"`
|
||||
Interval float64 `json:"interval"`
|
||||
Limit int `json:"limit"`
|
||||
EvaluationTime float64 `json:"evaluationTime"`
|
||||
LastEvaluation time.Time `json:"lastEvaluation"`
|
||||
}
|
||||
|
@ -1216,6 +1207,7 @@ func (api *API) rules(r *http.Request) apiFuncResult {
|
|||
Name: grp.Name(),
|
||||
File: grp.File(),
|
||||
Interval: grp.Interval().Seconds(),
|
||||
Limit: grp.Limit(),
|
||||
Rules: []Rule{},
|
||||
EvaluationTime: grp.GetEvaluationTime().Seconds(),
|
||||
LastEvaluation: grp.GetLastEvaluation(),
|
||||
|
|
|
@ -1470,6 +1470,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
Name: "grp",
|
||||
File: "/path/to/file",
|
||||
Interval: 1,
|
||||
Limit: 0,
|
||||
Rules: []Rule{
|
||||
AlertingRule{
|
||||
State: "inactive",
|
||||
|
@ -1516,6 +1517,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
Name: "grp",
|
||||
File: "/path/to/file",
|
||||
Interval: 1,
|
||||
Limit: 0,
|
||||
Rules: []Rule{
|
||||
AlertingRule{
|
||||
State: "inactive",
|
||||
|
@ -1555,6 +1557,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
|
|||
Name: "grp",
|
||||
File: "/path/to/file",
|
||||
Interval: 1,
|
||||
Limit: 0,
|
||||
Rules: []Rule{
|
||||
RecordingRule{
|
||||
Name: "recording-rule-1",
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
"module": "dist/esm/index.js",
|
||||
"scripts": {
|
||||
"build": "npm run build:grammar && npm run build:lib",
|
||||
"build:grammar": "lezer-generator src/grammar/promql.grammar -o src/grammar/parser",
|
||||
"build:grammar": "npx lezer-generator src/grammar/promql.grammar -o src/grammar/parser",
|
||||
"build:lib": "bash ./build.sh",
|
||||
"test": "npm run build:grammar && ts-mocha -p tsconfig.json ./**/*.test.ts",
|
||||
"test:coverage": "npm run build:grammar && nyc ts-mocha -p ./tsconfig.json ./**/*.test.ts",
|
||||
|
@ -48,8 +48,8 @@
|
|||
"@types/lru-cache": "^5.1.1",
|
||||
"@types/mocha": "^9.0.0",
|
||||
"@types/node": "^16.11.12",
|
||||
"@typescript-eslint/eslint-plugin": "^5.5.0",
|
||||
"@typescript-eslint/parser": "^5.5.0",
|
||||
"@typescript-eslint/eslint-plugin": "^5.8.0",
|
||||
"@typescript-eslint/parser": "^5.8.0",
|
||||
"chai": "^4.2.0",
|
||||
"codecov": "^3.8.3",
|
||||
"eslint": "^8.4.1",
|
||||
|
|
|
@ -34,8 +34,8 @@
|
|||
"@types/lru-cache": "^5.1.1",
|
||||
"@types/mocha": "^9.0.0",
|
||||
"@types/node": "^16.11.12",
|
||||
"@typescript-eslint/eslint-plugin": "^5.5.0",
|
||||
"@typescript-eslint/parser": "^5.5.0",
|
||||
"@typescript-eslint/eslint-plugin": "^5.8.0",
|
||||
"@typescript-eslint/parser": "^5.8.0",
|
||||
"chai": "^4.2.0",
|
||||
"codecov": "^3.8.3",
|
||||
"eslint": "^8.4.1",
|
||||
|
@ -101,13 +101,13 @@
|
|||
}
|
||||
},
|
||||
"module/codemirror-promql/node_modules/@typescript-eslint/eslint-plugin": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.5.0.tgz",
|
||||
"integrity": "sha512-4bV6fulqbuaO9UMXU0Ia0o6z6if+kmMRW8rMRyfqXj/eGrZZRGedS4n0adeGNnjr8LKAM495hrQ7Tea52UWmQA==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.8.0.tgz",
|
||||
"integrity": "sha512-spu1UW7QuBn0nJ6+psnfCc3iVoQAifjKORgBngKOmC8U/1tbe2YJMzYQqDGYB4JCss7L8+RM2kKLb1B1Aw9BNA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@typescript-eslint/experimental-utils": "5.5.0",
|
||||
"@typescript-eslint/scope-manager": "5.5.0",
|
||||
"@typescript-eslint/experimental-utils": "5.8.0",
|
||||
"@typescript-eslint/scope-manager": "5.8.0",
|
||||
"debug": "^4.3.2",
|
||||
"functional-red-black-tree": "^1.0.1",
|
||||
"ignore": "^5.1.8",
|
||||
|
@ -141,15 +141,15 @@
|
|||
}
|
||||
},
|
||||
"module/codemirror-promql/node_modules/@typescript-eslint/experimental-utils": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-5.5.0.tgz",
|
||||
"integrity": "sha512-kjWeeVU+4lQ1SLYErRKV5yDXbWDPkpbzTUUlfAUifPYvpX0qZlrcCZ96/6oWxt3QxtK5WVhXz+KsnwW9cIW+3A==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-5.8.0.tgz",
|
||||
"integrity": "sha512-KN5FvNH71bhZ8fKtL+lhW7bjm7cxs1nt+hrDZWIqb6ViCffQcWyLunGrgvISgkRojIDcXIsH+xlFfI4RCDA0xA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@types/json-schema": "^7.0.9",
|
||||
"@typescript-eslint/scope-manager": "5.5.0",
|
||||
"@typescript-eslint/types": "5.5.0",
|
||||
"@typescript-eslint/typescript-estree": "5.5.0",
|
||||
"@typescript-eslint/scope-manager": "5.8.0",
|
||||
"@typescript-eslint/types": "5.8.0",
|
||||
"@typescript-eslint/typescript-estree": "5.8.0",
|
||||
"eslint-scope": "^5.1.1",
|
||||
"eslint-utils": "^3.0.0"
|
||||
},
|
||||
|
@ -161,18 +161,18 @@
|
|||
"url": "https://opencollective.com/typescript-eslint"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"eslint": "*"
|
||||
"eslint": "^6.0.0 || ^7.0.0 || ^8.0.0"
|
||||
}
|
||||
},
|
||||
"module/codemirror-promql/node_modules/@typescript-eslint/parser": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.5.0.tgz",
|
||||
"integrity": "sha512-JsXBU+kgQOAgzUn2jPrLA+Rd0Y1dswOlX3hp8MuRO1hQDs6xgHtbCXEiAu7bz5hyVURxbXcA2draasMbNqrhmg==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.8.0.tgz",
|
||||
"integrity": "sha512-Gleacp/ZhRtJRYs5/T8KQR3pAQjQI89Dn/k+OzyCKOsLiZH2/Vh60cFBTnFsHNI6WAD+lNUo/xGZ4NeA5u0Ipw==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@typescript-eslint/scope-manager": "5.5.0",
|
||||
"@typescript-eslint/types": "5.5.0",
|
||||
"@typescript-eslint/typescript-estree": "5.5.0",
|
||||
"@typescript-eslint/scope-manager": "5.8.0",
|
||||
"@typescript-eslint/types": "5.8.0",
|
||||
"@typescript-eslint/typescript-estree": "5.8.0",
|
||||
"debug": "^4.3.2"
|
||||
},
|
||||
"engines": {
|
||||
|
@ -192,13 +192,13 @@
|
|||
}
|
||||
},
|
||||
"module/codemirror-promql/node_modules/@typescript-eslint/scope-manager": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.5.0.tgz",
|
||||
"integrity": "sha512-0/r656RmRLo7CbN4Mdd+xZyPJ/fPCKhYdU6mnZx+8msAD8nJSP8EyCFkzbd6vNVZzZvWlMYrSNekqGrCBqFQhg==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.8.0.tgz",
|
||||
"integrity": "sha512-x82CYJsLOjPCDuFFEbS6e7K1QEWj7u5Wk1alw8A+gnJiYwNnDJk0ib6PCegbaPMjrfBvFKa7SxE3EOnnIQz2Gg==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@typescript-eslint/types": "5.5.0",
|
||||
"@typescript-eslint/visitor-keys": "5.5.0"
|
||||
"@typescript-eslint/types": "5.8.0",
|
||||
"@typescript-eslint/visitor-keys": "5.8.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
|
||||
|
@ -209,9 +209,9 @@
|
|||
}
|
||||
},
|
||||
"module/codemirror-promql/node_modules/@typescript-eslint/types": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.5.0.tgz",
|
||||
"integrity": "sha512-OaYTqkW3GnuHxqsxxJ6KypIKd5Uw7bFiQJZRyNi1jbMJnK3Hc/DR4KwB6KJj6PBRkJJoaNwzMNv9vtTk87JhOg==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.8.0.tgz",
|
||||
"integrity": "sha512-LdCYOqeqZWqCMOmwFnum6YfW9F3nKuxJiR84CdIRN5nfHJ7gyvGpXWqL/AaW0k3Po0+wm93ARAsOdzlZDPCcXg==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
|
||||
|
@ -222,13 +222,13 @@
|
|||
}
|
||||
},
|
||||
"module/codemirror-promql/node_modules/@typescript-eslint/typescript-estree": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.5.0.tgz",
|
||||
"integrity": "sha512-pVn8btYUiYrjonhMAO0yG8lm7RApzy2L4RC7Td/mC/qFkyf6vRbGyZozoA94+w6D2Y2GRqpMoCWcwx/EUOzyoQ==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.8.0.tgz",
|
||||
"integrity": "sha512-srfeZ3URdEcUsSLbkOFqS7WoxOqn8JNil2NSLO9O+I2/Uyc85+UlfpEvQHIpj5dVts7KKOZnftoJD/Fdv0L7nQ==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@typescript-eslint/types": "5.5.0",
|
||||
"@typescript-eslint/visitor-keys": "5.5.0",
|
||||
"@typescript-eslint/types": "5.8.0",
|
||||
"@typescript-eslint/visitor-keys": "5.8.0",
|
||||
"debug": "^4.3.2",
|
||||
"globby": "^11.0.4",
|
||||
"is-glob": "^4.0.3",
|
||||
|
@ -249,12 +249,12 @@
|
|||
}
|
||||
},
|
||||
"module/codemirror-promql/node_modules/@typescript-eslint/visitor-keys": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.5.0.tgz",
|
||||
"integrity": "sha512-4GzJ1kRtsWzHhdM40tv0ZKHNSbkDhF0Woi/TDwVJX6UICwJItvP7ZTXbjTkCdrors7ww0sYe0t+cIKDAJwZ7Kw==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.8.0.tgz",
|
||||
"integrity": "sha512-+HDIGOEMnqbxdAHegxvnOqESUH6RWFRR2b8qxP1W9CZnnYh4Usz6MBL+2KMAgPk/P0o9c1HqnYtwzVH6GTIqug==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@typescript-eslint/types": "5.5.0",
|
||||
"@typescript-eslint/types": "5.8.0",
|
||||
"eslint-visitor-keys": "^3.0.0"
|
||||
},
|
||||
"engines": {
|
||||
|
@ -1592,6 +1592,19 @@
|
|||
"@lezer/common": "^0.15.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@nexucis/fuzzy": {
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@nexucis/fuzzy/-/fuzzy-0.3.0.tgz",
|
||||
"integrity": "sha512-Z1+ADKY0fxdBE28REraWhUCNy+Bp5UmpK3Tc/5wdCDpY+6fXh8l2csMtbPGaqEBsyGLxJz9wUYGCf+CW9unyvQ=="
|
||||
},
|
||||
"node_modules/@nexucis/kvsearch": {
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@nexucis/kvsearch/-/kvsearch-0.3.0.tgz",
|
||||
"integrity": "sha512-tHIH6W/mRUZZ0ZQyRbgp2uhat+2O1c1jX1EC6NHv7/8OIeHx1HBZ5ZZb0KSUVWl4jkNzYw6AO39OoTELtrjaQw==",
|
||||
"dependencies": {
|
||||
"@nexucis/fuzzy": "^0.3.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@nodelib/fs.scandir": {
|
||||
"version": "2.1.5",
|
||||
"dev": true,
|
||||
|
@ -1857,9 +1870,10 @@
|
|||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/sanitize-html": {
|
||||
"version": "2.5.0",
|
||||
"version": "2.6.1",
|
||||
"resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.6.1.tgz",
|
||||
"integrity": "sha512-+JLlZdJkIHdgvlFnMorJeLv5WIORNcI+jHsAoPBWMnhGx5Rbz/D1QN5D4qvmoA7fooDlEVy6zlioWSgqbU0KeQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"htmlparser2": "^6.0.0"
|
||||
}
|
||||
|
@ -4132,12 +4146,6 @@
|
|||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/i": {
|
||||
"version": "0.3.7",
|
||||
"engines": {
|
||||
"node": ">=0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/ignore": {
|
||||
"version": "4.0.6",
|
||||
"dev": true,
|
||||
|
@ -5957,6 +5965,17 @@
|
|||
"react": "17.0.2"
|
||||
}
|
||||
},
|
||||
"node_modules/react-infinite-scroll-component": {
|
||||
"version": "6.1.0",
|
||||
"resolved": "https://registry.npmjs.org/react-infinite-scroll-component/-/react-infinite-scroll-component-6.1.0.tgz",
|
||||
"integrity": "sha512-SQu5nCqy8DxQWpnUVLx7V7b7LcA37aM7tvoWjTLZp1dk6EJibM5/4EJKzOnl07/BsM1Y40sKLuqjCwwH/xV0TQ==",
|
||||
"dependencies": {
|
||||
"throttle-debounce": "^2.1.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"react": ">=16.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/react-is": {
|
||||
"version": "17.0.2",
|
||||
"license": "MIT"
|
||||
|
@ -6608,6 +6627,14 @@
|
|||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/throttle-debounce": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/throttle-debounce/-/throttle-debounce-2.3.0.tgz",
|
||||
"integrity": "sha512-H7oLPV0P7+jgvrk+6mwwwBDmxTaxnu9HMXmloNLXwnNO0ZxZ31Orah2n8lU1eMPvsaowP2CX+USCgyovXfdOFQ==",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/to-fast-properties": {
|
||||
"version": "2.0.0",
|
||||
"dev": true,
|
||||
|
@ -7245,11 +7272,11 @@
|
|||
"@fortawesome/free-solid-svg-icons": "^5.7.2",
|
||||
"@fortawesome/react-fontawesome": "^0.1.16",
|
||||
"@nexucis/fuzzy": "^0.3.0",
|
||||
"@nexucis/kvsearch": "^0.3.0",
|
||||
"bootstrap": "^4.6.1",
|
||||
"codemirror-promql": "0.19.0",
|
||||
"css.escape": "^1.5.1",
|
||||
"downshift": "^6.1.7",
|
||||
"i": "^0.3.7",
|
||||
"jquery": "^3.5.1",
|
||||
"jquery.flot.tooltip": "^0.9.0",
|
||||
"moment": "^2.24.0",
|
||||
|
@ -7258,6 +7285,7 @@
|
|||
"react": "^17.0.2",
|
||||
"react-copy-to-clipboard": "^5.0.4",
|
||||
"react-dom": "^17.0.2",
|
||||
"react-infinite-scroll-component": "^6.1.0",
|
||||
"react-resize-detector": "^6.7.6",
|
||||
"react-router-dom": "^5.2.1",
|
||||
"react-test-renderer": "^17.0.2",
|
||||
|
@ -7279,7 +7307,7 @@
|
|||
"@types/react-dom": "^17.0.11",
|
||||
"@types/react-resize-detector": "^6.1.0",
|
||||
"@types/react-router-dom": "^5.3.2",
|
||||
"@types/sanitize-html": "^2.5.0",
|
||||
"@types/sanitize-html": "^2.6.1",
|
||||
"@types/sinon": "^10.0.6",
|
||||
"@wojtekmaj/enzyme-adapter-react-17": "^0.6.5",
|
||||
"enzyme": "^3.11.0",
|
||||
|
@ -9926,10 +9954,6 @@
|
|||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"react-app/node_modules/@nexucis/fuzzy": {
|
||||
"version": "0.3.0",
|
||||
"license": "MIT"
|
||||
},
|
||||
"react-app/node_modules/@npmcli/fs": {
|
||||
"version": "1.0.0",
|
||||
"dev": true,
|
||||
|
@ -27666,6 +27690,19 @@
|
|||
"@lezer/common": "^0.15.0"
|
||||
}
|
||||
},
|
||||
"@nexucis/fuzzy": {
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@nexucis/fuzzy/-/fuzzy-0.3.0.tgz",
|
||||
"integrity": "sha512-Z1+ADKY0fxdBE28REraWhUCNy+Bp5UmpK3Tc/5wdCDpY+6fXh8l2csMtbPGaqEBsyGLxJz9wUYGCf+CW9unyvQ=="
|
||||
},
|
||||
"@nexucis/kvsearch": {
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@nexucis/kvsearch/-/kvsearch-0.3.0.tgz",
|
||||
"integrity": "sha512-tHIH6W/mRUZZ0ZQyRbgp2uhat+2O1c1jX1EC6NHv7/8OIeHx1HBZ5ZZb0KSUVWl4jkNzYw6AO39OoTELtrjaQw==",
|
||||
"requires": {
|
||||
"@nexucis/fuzzy": "^0.3.0"
|
||||
}
|
||||
},
|
||||
"@nodelib/fs.scandir": {
|
||||
"version": "2.1.5",
|
||||
"dev": true,
|
||||
|
@ -27897,7 +27934,9 @@
|
|||
"version": "0.1.6"
|
||||
},
|
||||
"@types/sanitize-html": {
|
||||
"version": "2.5.0",
|
||||
"version": "2.6.1",
|
||||
"resolved": "https://registry.npmjs.org/@types/sanitize-html/-/sanitize-html-2.6.1.tgz",
|
||||
"integrity": "sha512-+JLlZdJkIHdgvlFnMorJeLv5WIORNcI+jHsAoPBWMnhGx5Rbz/D1QN5D4qvmoA7fooDlEVy6zlioWSgqbU0KeQ==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"htmlparser2": "^6.0.0"
|
||||
|
@ -28442,8 +28481,8 @@
|
|||
"@types/lru-cache": "^5.1.1",
|
||||
"@types/mocha": "^9.0.0",
|
||||
"@types/node": "^16.11.12",
|
||||
"@typescript-eslint/eslint-plugin": "^5.5.0",
|
||||
"@typescript-eslint/parser": "^5.5.0",
|
||||
"@typescript-eslint/eslint-plugin": "^5.8.0",
|
||||
"@typescript-eslint/parser": "^5.8.0",
|
||||
"chai": "^4.2.0",
|
||||
"codecov": "^3.8.3",
|
||||
"eslint": "^8.4.1",
|
||||
|
@ -28492,13 +28531,13 @@
|
|||
}
|
||||
},
|
||||
"@typescript-eslint/eslint-plugin": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.5.0.tgz",
|
||||
"integrity": "sha512-4bV6fulqbuaO9UMXU0Ia0o6z6if+kmMRW8rMRyfqXj/eGrZZRGedS4n0adeGNnjr8LKAM495hrQ7Tea52UWmQA==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.8.0.tgz",
|
||||
"integrity": "sha512-spu1UW7QuBn0nJ6+psnfCc3iVoQAifjKORgBngKOmC8U/1tbe2YJMzYQqDGYB4JCss7L8+RM2kKLb1B1Aw9BNA==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@typescript-eslint/experimental-utils": "5.5.0",
|
||||
"@typescript-eslint/scope-manager": "5.5.0",
|
||||
"@typescript-eslint/experimental-utils": "5.8.0",
|
||||
"@typescript-eslint/scope-manager": "5.8.0",
|
||||
"debug": "^4.3.2",
|
||||
"functional-red-black-tree": "^1.0.1",
|
||||
"ignore": "^5.1.8",
|
||||
|
@ -28514,55 +28553,55 @@
|
|||
}
|
||||
},
|
||||
"@typescript-eslint/experimental-utils": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-5.5.0.tgz",
|
||||
"integrity": "sha512-kjWeeVU+4lQ1SLYErRKV5yDXbWDPkpbzTUUlfAUifPYvpX0qZlrcCZ96/6oWxt3QxtK5WVhXz+KsnwW9cIW+3A==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-5.8.0.tgz",
|
||||
"integrity": "sha512-KN5FvNH71bhZ8fKtL+lhW7bjm7cxs1nt+hrDZWIqb6ViCffQcWyLunGrgvISgkRojIDcXIsH+xlFfI4RCDA0xA==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@types/json-schema": "^7.0.9",
|
||||
"@typescript-eslint/scope-manager": "5.5.0",
|
||||
"@typescript-eslint/types": "5.5.0",
|
||||
"@typescript-eslint/typescript-estree": "5.5.0",
|
||||
"@typescript-eslint/scope-manager": "5.8.0",
|
||||
"@typescript-eslint/types": "5.8.0",
|
||||
"@typescript-eslint/typescript-estree": "5.8.0",
|
||||
"eslint-scope": "^5.1.1",
|
||||
"eslint-utils": "^3.0.0"
|
||||
}
|
||||
},
|
||||
"@typescript-eslint/parser": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.5.0.tgz",
|
||||
"integrity": "sha512-JsXBU+kgQOAgzUn2jPrLA+Rd0Y1dswOlX3hp8MuRO1hQDs6xgHtbCXEiAu7bz5hyVURxbXcA2draasMbNqrhmg==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.8.0.tgz",
|
||||
"integrity": "sha512-Gleacp/ZhRtJRYs5/T8KQR3pAQjQI89Dn/k+OzyCKOsLiZH2/Vh60cFBTnFsHNI6WAD+lNUo/xGZ4NeA5u0Ipw==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@typescript-eslint/scope-manager": "5.5.0",
|
||||
"@typescript-eslint/types": "5.5.0",
|
||||
"@typescript-eslint/typescript-estree": "5.5.0",
|
||||
"@typescript-eslint/scope-manager": "5.8.0",
|
||||
"@typescript-eslint/types": "5.8.0",
|
||||
"@typescript-eslint/typescript-estree": "5.8.0",
|
||||
"debug": "^4.3.2"
|
||||
}
|
||||
},
|
||||
"@typescript-eslint/scope-manager": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.5.0.tgz",
|
||||
"integrity": "sha512-0/r656RmRLo7CbN4Mdd+xZyPJ/fPCKhYdU6mnZx+8msAD8nJSP8EyCFkzbd6vNVZzZvWlMYrSNekqGrCBqFQhg==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.8.0.tgz",
|
||||
"integrity": "sha512-x82CYJsLOjPCDuFFEbS6e7K1QEWj7u5Wk1alw8A+gnJiYwNnDJk0ib6PCegbaPMjrfBvFKa7SxE3EOnnIQz2Gg==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@typescript-eslint/types": "5.5.0",
|
||||
"@typescript-eslint/visitor-keys": "5.5.0"
|
||||
"@typescript-eslint/types": "5.8.0",
|
||||
"@typescript-eslint/visitor-keys": "5.8.0"
|
||||
}
|
||||
},
|
||||
"@typescript-eslint/types": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.5.0.tgz",
|
||||
"integrity": "sha512-OaYTqkW3GnuHxqsxxJ6KypIKd5Uw7bFiQJZRyNi1jbMJnK3Hc/DR4KwB6KJj6PBRkJJoaNwzMNv9vtTk87JhOg==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.8.0.tgz",
|
||||
"integrity": "sha512-LdCYOqeqZWqCMOmwFnum6YfW9F3nKuxJiR84CdIRN5nfHJ7gyvGpXWqL/AaW0k3Po0+wm93ARAsOdzlZDPCcXg==",
|
||||
"dev": true
|
||||
},
|
||||
"@typescript-eslint/typescript-estree": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.5.0.tgz",
|
||||
"integrity": "sha512-pVn8btYUiYrjonhMAO0yG8lm7RApzy2L4RC7Td/mC/qFkyf6vRbGyZozoA94+w6D2Y2GRqpMoCWcwx/EUOzyoQ==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.8.0.tgz",
|
||||
"integrity": "sha512-srfeZ3URdEcUsSLbkOFqS7WoxOqn8JNil2NSLO9O+I2/Uyc85+UlfpEvQHIpj5dVts7KKOZnftoJD/Fdv0L7nQ==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@typescript-eslint/types": "5.5.0",
|
||||
"@typescript-eslint/visitor-keys": "5.5.0",
|
||||
"@typescript-eslint/types": "5.8.0",
|
||||
"@typescript-eslint/visitor-keys": "5.8.0",
|
||||
"debug": "^4.3.2",
|
||||
"globby": "^11.0.4",
|
||||
"is-glob": "^4.0.3",
|
||||
|
@ -28571,12 +28610,12 @@
|
|||
}
|
||||
},
|
||||
"@typescript-eslint/visitor-keys": {
|
||||
"version": "5.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.5.0.tgz",
|
||||
"integrity": "sha512-4GzJ1kRtsWzHhdM40tv0ZKHNSbkDhF0Woi/TDwVJX6UICwJItvP7ZTXbjTkCdrors7ww0sYe0t+cIKDAJwZ7Kw==",
|
||||
"version": "5.8.0",
|
||||
"resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.8.0.tgz",
|
||||
"integrity": "sha512-+HDIGOEMnqbxdAHegxvnOqESUH6RWFRR2b8qxP1W9CZnnYh4Usz6MBL+2KMAgPk/P0o9c1HqnYtwzVH6GTIqug==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@typescript-eslint/types": "5.5.0",
|
||||
"@typescript-eslint/types": "5.8.0",
|
||||
"eslint-visitor-keys": "^3.0.0"
|
||||
}
|
||||
},
|
||||
|
@ -29686,6 +29725,7 @@
|
|||
"@fortawesome/free-solid-svg-icons": "^5.7.2",
|
||||
"@fortawesome/react-fontawesome": "^0.1.16",
|
||||
"@nexucis/fuzzy": "^0.3.0",
|
||||
"@nexucis/kvsearch": "^0.3.0",
|
||||
"@testing-library/react-hooks": "^7.0.1",
|
||||
"@types/enzyme": "^3.10.10",
|
||||
"@types/flot": "0.0.32",
|
||||
|
@ -29697,7 +29737,7 @@
|
|||
"@types/react-dom": "^17.0.11",
|
||||
"@types/react-resize-detector": "^6.1.0",
|
||||
"@types/react-router-dom": "^5.3.2",
|
||||
"@types/sanitize-html": "^2.5.0",
|
||||
"@types/sanitize-html": "^2.6.1",
|
||||
"@types/sinon": "^10.0.6",
|
||||
"@wojtekmaj/enzyme-adapter-react-17": "^0.6.5",
|
||||
"bootstrap": "^4.6.1",
|
||||
|
@ -29710,7 +29750,6 @@
|
|||
"eslint-config-react-app": "^6.0.0",
|
||||
"eslint-plugin-prettier": "^4.0.0",
|
||||
"fsevents": "^2.3.2",
|
||||
"i": "^0.3.7",
|
||||
"jest-canvas-mock": "^2.3.1",
|
||||
"jest-fetch-mock": "^3.0.3",
|
||||
"jquery": "^3.5.1",
|
||||
|
@ -29723,6 +29762,7 @@
|
|||
"react": "^17.0.2",
|
||||
"react-copy-to-clipboard": "^5.0.4",
|
||||
"react-dom": "^17.0.2",
|
||||
"react-infinite-scroll-component": "^6.1.0",
|
||||
"react-resize-detector": "^6.7.6",
|
||||
"react-router-dom": "^5.2.1",
|
||||
"react-scripts": "4.0.3",
|
||||
|
@ -31400,9 +31440,6 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"@nexucis/fuzzy": {
|
||||
"version": "0.3.0"
|
||||
},
|
||||
"@npmcli/fs": {
|
||||
"version": "1.0.0",
|
||||
"dev": true,
|
||||
|
@ -43292,9 +43329,6 @@
|
|||
"debug": "4"
|
||||
}
|
||||
},
|
||||
"i": {
|
||||
"version": "0.3.7"
|
||||
},
|
||||
"ignore": {
|
||||
"version": "4.0.6",
|
||||
"dev": true
|
||||
|
@ -44498,6 +44532,14 @@
|
|||
"scheduler": "^0.20.2"
|
||||
}
|
||||
},
|
||||
"react-infinite-scroll-component": {
|
||||
"version": "6.1.0",
|
||||
"resolved": "https://registry.npmjs.org/react-infinite-scroll-component/-/react-infinite-scroll-component-6.1.0.tgz",
|
||||
"integrity": "sha512-SQu5nCqy8DxQWpnUVLx7V7b7LcA37aM7tvoWjTLZp1dk6EJibM5/4EJKzOnl07/BsM1Y40sKLuqjCwwH/xV0TQ==",
|
||||
"requires": {
|
||||
"throttle-debounce": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"react-is": {
|
||||
"version": "17.0.2"
|
||||
},
|
||||
|
@ -44945,6 +44987,11 @@
|
|||
"version": "0.2.0",
|
||||
"dev": true
|
||||
},
|
||||
"throttle-debounce": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/throttle-debounce/-/throttle-debounce-2.3.0.tgz",
|
||||
"integrity": "sha512-H7oLPV0P7+jgvrk+6mwwwBDmxTaxnu9HMXmloNLXwnNO0ZxZ31Orah2n8lU1eMPvsaowP2CX+USCgyovXfdOFQ=="
|
||||
},
|
||||
"to-fast-properties": {
|
||||
"version": "2.0.0",
|
||||
"dev": true
|
||||
|
|
|
@ -20,11 +20,11 @@
|
|||
"@fortawesome/free-solid-svg-icons": "^5.7.2",
|
||||
"@fortawesome/react-fontawesome": "^0.1.16",
|
||||
"@nexucis/fuzzy": "^0.3.0",
|
||||
"@nexucis/kvsearch": "^0.3.0",
|
||||
"bootstrap": "^4.6.1",
|
||||
"codemirror-promql": "0.19.0",
|
||||
"css.escape": "^1.5.1",
|
||||
"downshift": "^6.1.7",
|
||||
"i": "^0.3.7",
|
||||
"jquery": "^3.5.1",
|
||||
"jquery.flot.tooltip": "^0.9.0",
|
||||
"moment": "^2.24.0",
|
||||
|
@ -33,6 +33,7 @@
|
|||
"react": "^17.0.2",
|
||||
"react-copy-to-clipboard": "^5.0.4",
|
||||
"react-dom": "^17.0.2",
|
||||
"react-infinite-scroll-component": "^6.1.0",
|
||||
"react-resize-detector": "^6.7.6",
|
||||
"react-router-dom": "^5.2.1",
|
||||
"react-test-renderer": "^17.0.2",
|
||||
|
@ -75,7 +76,7 @@
|
|||
"@types/react-dom": "^17.0.11",
|
||||
"@types/react-resize-detector": "^6.1.0",
|
||||
"@types/react-router-dom": "^5.3.2",
|
||||
"@types/sanitize-html": "^2.5.0",
|
||||
"@types/sanitize-html": "^2.6.1",
|
||||
"@types/sinon": "^10.0.6",
|
||||
"@wojtekmaj/enzyme-adapter-react-17": "^0.6.5",
|
||||
"enzyme": "^3.11.0",
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
import { ComponentType, useEffect, useState } from 'react';
|
||||
import InfiniteScroll from 'react-infinite-scroll-component';
|
||||
|
||||
const initialNumberOfItemsDisplayed = 50;
|
||||
|
||||
export interface InfiniteScrollItemsProps<T> {
|
||||
items: T[];
|
||||
}
|
||||
|
||||
interface CustomInfiniteScrollProps<T> {
|
||||
allItems: T[];
|
||||
child: ComponentType<InfiniteScrollItemsProps<T>>;
|
||||
}
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types
|
||||
const CustomInfiniteScroll = <T extends unknown>({ allItems, child }: CustomInfiniteScrollProps<T>) => {
|
||||
const [items, setItems] = useState<T[]>(allItems.slice(0, 50));
|
||||
const [index, setIndex] = useState<number>(initialNumberOfItemsDisplayed);
|
||||
const [hasMore, setHasMore] = useState<boolean>(allItems.length > initialNumberOfItemsDisplayed);
|
||||
const Child = child;
|
||||
|
||||
useEffect(() => {
|
||||
setItems(allItems.slice(0, initialNumberOfItemsDisplayed));
|
||||
setHasMore(allItems.length > initialNumberOfItemsDisplayed);
|
||||
}, [allItems]);
|
||||
|
||||
const fetchMoreData = () => {
|
||||
if (items.length === allItems.length) {
|
||||
setHasMore(false);
|
||||
} else {
|
||||
const newIndex = index + initialNumberOfItemsDisplayed;
|
||||
setIndex(newIndex);
|
||||
setItems(allItems.slice(0, newIndex));
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<InfiniteScroll
|
||||
next={fetchMoreData}
|
||||
hasMore={hasMore}
|
||||
loader={<h4>loading...</h4>}
|
||||
dataLength={items.length}
|
||||
height={items.length > 25 ? '75vh' : ''}
|
||||
>
|
||||
<Child items={items} />
|
||||
</InfiniteScroll>
|
||||
);
|
||||
};
|
||||
|
||||
export default CustomInfiniteScroll;
|
|
@ -63,7 +63,7 @@ const AlertsContent: FC<AlertsProps> = ({ groups = [], statsCount }) => {
|
|||
return (
|
||||
<Checkbox
|
||||
key={state}
|
||||
wrapperStyles={{ marginRight: 10 }}
|
||||
wrapperStyles={{ marginRight: 20 }}
|
||||
checked={filter[state]}
|
||||
id={`${state}-toggler`}
|
||||
onChange={toggleFilter(state)}
|
||||
|
|
|
@ -12,7 +12,7 @@ exports[`AlertsContent matches a snapshot 1`] = `
|
|||
onChange={[Function]}
|
||||
wrapperStyles={
|
||||
Object {
|
||||
"marginRight": 10,
|
||||
"marginRight": 20,
|
||||
}
|
||||
}
|
||||
>
|
||||
|
@ -35,7 +35,7 @@ exports[`AlertsContent matches a snapshot 1`] = `
|
|||
onChange={[Function]}
|
||||
wrapperStyles={
|
||||
Object {
|
||||
"marginRight": 10,
|
||||
"marginRight": 20,
|
||||
}
|
||||
}
|
||||
>
|
||||
|
@ -58,7 +58,7 @@ exports[`AlertsContent matches a snapshot 1`] = `
|
|||
onChange={[Function]}
|
||||
wrapperStyles={
|
||||
Object {
|
||||
"marginRight": 10,
|
||||
"marginRight": 20,
|
||||
}
|
||||
}
|
||||
>
|
||||
|
|
|
@ -2,6 +2,7 @@ import React, { FC, useState } from 'react';
|
|||
import { Badge, Table } from 'reactstrap';
|
||||
import { TargetLabels } from './Services';
|
||||
import { ToggleMoreLess } from '../../components/ToggleMoreLess';
|
||||
import CustomInfiniteScroll, { InfiniteScrollItemsProps } from '../../components/CustomInfiniteScroll';
|
||||
|
||||
interface LabelProps {
|
||||
value: TargetLabels[];
|
||||
|
@ -20,6 +21,33 @@ const formatLabels = (labels: Record<string, string> | string) => {
|
|||
});
|
||||
};
|
||||
|
||||
const LabelsTableContent: FC<InfiniteScrollItemsProps<TargetLabels>> = ({ items }) => {
|
||||
return (
|
||||
<Table size="sm" bordered hover striped>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Discovered Labels</th>
|
||||
<th>Target Labels</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{items.map((_, i) => {
|
||||
return (
|
||||
<tr key={i}>
|
||||
<td>{formatLabels(items[i].discoveredLabels)}</td>
|
||||
{items[i].isDropped ? (
|
||||
<td style={{ fontWeight: 'bold' }}>Dropped</td>
|
||||
) : (
|
||||
<td>{formatLabels(items[i].labels)}</td>
|
||||
)}
|
||||
</tr>
|
||||
);
|
||||
})}
|
||||
</tbody>
|
||||
</Table>
|
||||
);
|
||||
};
|
||||
|
||||
export const LabelsTable: FC<LabelProps> = ({ value, name }) => {
|
||||
const [showMore, setShowMore] = useState(false);
|
||||
|
||||
|
@ -35,30 +63,7 @@ export const LabelsTable: FC<LabelProps> = ({ value, name }) => {
|
|||
<span className="target-head">{name}</span>
|
||||
</ToggleMoreLess>
|
||||
</div>
|
||||
{showMore ? (
|
||||
<Table size="sm" bordered hover striped>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Discovered Labels</th>
|
||||
<th>Target Labels</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{value.map((_, i) => {
|
||||
return (
|
||||
<tr key={i}>
|
||||
<td>{formatLabels(value[i].discoveredLabels)}</td>
|
||||
{value[i].isDropped ? (
|
||||
<td style={{ fontWeight: 'bold' }}>Dropped</td>
|
||||
) : (
|
||||
<td>{formatLabels(value[i].labels)}</td>
|
||||
)}
|
||||
</tr>
|
||||
);
|
||||
})}
|
||||
</tbody>
|
||||
</Table>
|
||||
) : null}
|
||||
{showMore ? <CustomInfiniteScroll allItems={value} child={LabelsTableContent} /> : null}
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import React, { FC } from 'react';
|
||||
import React, { ChangeEvent, FC, useEffect, useState } from 'react';
|
||||
import { useFetch } from '../../hooks/useFetch';
|
||||
import { LabelsTable } from './LabelsTable';
|
||||
import { DroppedTarget, Labels, Target } from '../targets/target';
|
||||
|
@ -7,6 +7,10 @@ import { withStatusIndicator } from '../../components/withStatusIndicator';
|
|||
import { mapObjEntries } from '../../utils';
|
||||
import { usePathPrefix } from '../../contexts/PathPrefixContext';
|
||||
import { API_PATH } from '../../constants/constants';
|
||||
import { KVSearch } from '@nexucis/kvsearch';
|
||||
import { Container, Input, InputGroup, InputGroupAddon, InputGroupText } from 'reactstrap';
|
||||
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
|
||||
import { faSearch } from '@fortawesome/free-solid-svg-icons';
|
||||
|
||||
interface ServiceMap {
|
||||
activeTargets: Target[];
|
||||
|
@ -19,6 +23,11 @@ export interface TargetLabels {
|
|||
isDropped: boolean;
|
||||
}
|
||||
|
||||
const kvSearch = new KVSearch({
|
||||
shouldSort: true,
|
||||
indexedKeys: ['labels', 'discoveredLabels', ['discoveredLabels', /.*/], ['labels', /.*/]],
|
||||
});
|
||||
|
||||
export const processSummary = (
|
||||
activeTargets: Target[],
|
||||
droppedTargets: DroppedTarget[]
|
||||
|
@ -82,14 +91,41 @@ export const processTargets = (activeTargets: Target[], droppedTargets: DroppedT
|
|||
};
|
||||
|
||||
export const ServiceDiscoveryContent: FC<ServiceMap> = ({ activeTargets, droppedTargets }) => {
|
||||
const targets = processSummary(activeTargets, droppedTargets);
|
||||
const labels = processTargets(activeTargets, droppedTargets);
|
||||
const [activeTargetList, setActiveTargetList] = useState(activeTargets);
|
||||
const [targetList, setTargetList] = useState(processSummary(activeTargets, droppedTargets));
|
||||
const [labelList, setLabelList] = useState(processTargets(activeTargets, droppedTargets));
|
||||
|
||||
const handleSearchChange = (e: ChangeEvent<HTMLTextAreaElement | HTMLInputElement>) => {
|
||||
if (e.target.value !== '') {
|
||||
const result = kvSearch.filter(e.target.value.trim(), activeTargets);
|
||||
setActiveTargetList(
|
||||
result.map((value) => {
|
||||
return value.original as unknown as Target;
|
||||
})
|
||||
);
|
||||
} else {
|
||||
setActiveTargetList(activeTargets);
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
setTargetList(processSummary(activeTargetList, droppedTargets));
|
||||
setLabelList(processTargets(activeTargetList, droppedTargets));
|
||||
}, [activeTargetList, droppedTargets]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<h2>Service Discovery</h2>
|
||||
<Container>
|
||||
<InputGroup>
|
||||
<InputGroupAddon addonType="prepend">
|
||||
<InputGroupText>{<FontAwesomeIcon icon={faSearch} />}</InputGroupText>
|
||||
</InputGroupAddon>
|
||||
<Input autoFocus onChange={handleSearchChange} placeholder="Filter by labels" />
|
||||
</InputGroup>
|
||||
</Container>
|
||||
<ul>
|
||||
{mapObjEntries(targets, ([k, v]) => (
|
||||
{mapObjEntries(targetList, ([k, v]) => (
|
||||
<li key={k}>
|
||||
<a href={'#' + k}>
|
||||
{k} ({v.active} / {v.total} active targets)
|
||||
|
@ -98,7 +134,7 @@ export const ServiceDiscoveryContent: FC<ServiceMap> = ({ activeTargets, dropped
|
|||
))}
|
||||
</ul>
|
||||
<hr />
|
||||
{mapObjEntries(labels, ([k, v]) => {
|
||||
{mapObjEntries(labelList, ([k, v]) => {
|
||||
return <LabelsTable value={v} name={k} key={k} />;
|
||||
})}
|
||||
</>
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
import React, { FC } from 'react';
|
||||
import { getColor, Target } from './target';
|
||||
import { Badge, Table } from 'reactstrap';
|
||||
import TargetLabels from './TargetLabels';
|
||||
import styles from './ScrapePoolPanel.module.css';
|
||||
import { formatRelative } from '../../utils';
|
||||
import { now } from 'moment';
|
||||
import TargetScrapeDuration from './TargetScrapeDuration';
|
||||
import EndpointLink from './EndpointLink';
|
||||
import CustomInfiniteScroll, { InfiniteScrollItemsProps } from '../../components/CustomInfiniteScroll';
|
||||
|
||||
const columns = ['Endpoint', 'State', 'Labels', 'Last Scrape', 'Scrape Duration', 'Error'];
|
||||
|
||||
interface ScrapePoolContentProps {
|
||||
targets: Target[];
|
||||
}
|
||||
|
||||
const ScrapePoolContentTable: FC<InfiniteScrollItemsProps<Target>> = ({ items }) => {
|
||||
return (
|
||||
<Table className={styles.table} size="sm" bordered hover striped>
|
||||
<thead>
|
||||
<tr key="header">
|
||||
{columns.map((column) => (
|
||||
<th key={column}>{column}</th>
|
||||
))}
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{items.map((target, index) => (
|
||||
<tr key={index}>
|
||||
<td className={styles.endpoint}>
|
||||
<EndpointLink endpoint={target.scrapeUrl} globalUrl={target.globalUrl} />
|
||||
</td>
|
||||
<td className={styles.state}>
|
||||
<Badge color={getColor(target.health)}>{target.health.toUpperCase()}</Badge>
|
||||
</td>
|
||||
<td className={styles.labels}>
|
||||
<TargetLabels
|
||||
discoveredLabels={target.discoveredLabels}
|
||||
labels={target.labels}
|
||||
scrapePool={target.scrapePool}
|
||||
idx={index}
|
||||
/>
|
||||
</td>
|
||||
<td className={styles['last-scrape']}>{formatRelative(target.lastScrape, now())}</td>
|
||||
<td className={styles['scrape-duration']}>
|
||||
<TargetScrapeDuration
|
||||
duration={target.lastScrapeDuration}
|
||||
scrapePool={target.scrapePool}
|
||||
idx={index}
|
||||
interval={target.scrapeInterval}
|
||||
timeout={target.scrapeTimeout}
|
||||
/>
|
||||
</td>
|
||||
<td className={styles.errors}>
|
||||
{target.lastError ? <span className="text-danger">{target.lastError}</span> : null}
|
||||
</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</Table>
|
||||
);
|
||||
};
|
||||
|
||||
export const ScrapePoolContent: FC<ScrapePoolContentProps> = ({ targets }) => {
|
||||
return <CustomInfiniteScroll allItems={targets} child={ScrapePoolContentTable} />;
|
||||
};
|
|
@ -3,8 +3,7 @@ import { mount, ReactWrapper } from 'enzyme';
|
|||
import { act } from 'react-dom/test-utils';
|
||||
import { Alert } from 'reactstrap';
|
||||
import { sampleApiResponse } from './__testdata__/testdata';
|
||||
import ScrapePoolList from './ScrapePoolList';
|
||||
import ScrapePoolPanel from './ScrapePoolPanel';
|
||||
import ScrapePoolList, { ScrapePoolPanel } from './ScrapePoolList';
|
||||
import { Target } from './target';
|
||||
import { FetchMock } from 'jest-fetch-mock/types';
|
||||
import { PathPrefixContext } from '../../contexts/PathPrefixContext';
|
||||
|
@ -48,7 +47,7 @@ describe('ScrapePoolList', () => {
|
|||
});
|
||||
const panels = scrapePoolList.find(ScrapePoolPanel);
|
||||
expect(panels).toHaveLength(3);
|
||||
const activeTargets: Target[] = sampleApiResponse.data.activeTargets as Target[];
|
||||
const activeTargets: Target[] = sampleApiResponse.data.activeTargets as unknown as Target[];
|
||||
activeTargets.forEach(({ scrapePool }: Target) => {
|
||||
const panel = scrapePoolList.find(ScrapePoolPanel).filterWhere((panel) => panel.prop('scrapePool') === scrapePool);
|
||||
expect(panel).toHaveLength(1);
|
||||
|
|
|
@ -1,26 +1,69 @@
|
|||
import React, { FC } from 'react';
|
||||
import Filter, { Expanded, FilterData } from './Filter';
|
||||
import { useFetch } from '../../hooks/useFetch';
|
||||
import { groupTargets, Target } from './target';
|
||||
import ScrapePoolPanel from './ScrapePoolPanel';
|
||||
import { withStatusIndicator } from '../../components/withStatusIndicator';
|
||||
import { KVSearch } from '@nexucis/kvsearch';
|
||||
import { usePathPrefix } from '../../contexts/PathPrefixContext';
|
||||
import { useFetch } from '../../hooks/useFetch';
|
||||
import { API_PATH } from '../../constants/constants';
|
||||
import { groupTargets, ScrapePool, ScrapePools, Target } from './target';
|
||||
import { withStatusIndicator } from '../../components/withStatusIndicator';
|
||||
import { ChangeEvent, FC, useEffect, useState } from 'react';
|
||||
import { Col, Collapse, Input, InputGroup, InputGroupAddon, InputGroupText, Row } from 'reactstrap';
|
||||
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
|
||||
import { faSearch } from '@fortawesome/free-solid-svg-icons';
|
||||
import { ScrapePoolContent } from './ScrapePoolContent';
|
||||
import Filter, { Expanded, FilterData } from './Filter';
|
||||
import { useLocalStorage } from '../../hooks/useLocalStorage';
|
||||
import styles from './ScrapePoolPanel.module.css';
|
||||
import { ToggleMoreLess } from '../../components/ToggleMoreLess';
|
||||
|
||||
interface ScrapePoolListProps {
|
||||
activeTargets: Target[];
|
||||
}
|
||||
|
||||
export const ScrapePoolContent: FC<ScrapePoolListProps> = ({ activeTargets }) => {
|
||||
const targetGroups = groupTargets(activeTargets);
|
||||
const kvSearch = new KVSearch({
|
||||
shouldSort: true,
|
||||
indexedKeys: ['labels', 'scrapePool', ['labels', /.*/]],
|
||||
});
|
||||
|
||||
interface PanelProps {
|
||||
scrapePool: string;
|
||||
targetGroup: ScrapePool;
|
||||
expanded: boolean;
|
||||
toggleExpanded: () => void;
|
||||
}
|
||||
|
||||
export const ScrapePoolPanel: FC<PanelProps> = (props: PanelProps) => {
|
||||
const modifier = props.targetGroup.upCount < props.targetGroup.targets.length ? 'danger' : 'normal';
|
||||
const id = `pool-${props.scrapePool}`;
|
||||
const anchorProps = {
|
||||
href: `#${id}`,
|
||||
id,
|
||||
};
|
||||
return (
|
||||
<div>
|
||||
<ToggleMoreLess event={props.toggleExpanded} showMore={props.expanded}>
|
||||
<a className={styles[modifier]} {...anchorProps}>
|
||||
{`${props.scrapePool} (${props.targetGroup.upCount}/${props.targetGroup.targets.length} up)`}
|
||||
</a>
|
||||
</ToggleMoreLess>
|
||||
<Collapse isOpen={props.expanded}>
|
||||
<ScrapePoolContent targets={props.targetGroup.targets} />
|
||||
</Collapse>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
// ScrapePoolListContent is taking care of every possible filter
|
||||
const ScrapePoolListContent: FC<ScrapePoolListProps> = ({ activeTargets }) => {
|
||||
const initialPoolList = groupTargets(activeTargets);
|
||||
const [poolList, setPoolList] = useState<ScrapePools>(initialPoolList);
|
||||
const [targetList, setTargetList] = useState(activeTargets);
|
||||
|
||||
const initialFilter: FilterData = {
|
||||
showHealthy: true,
|
||||
showUnhealthy: true,
|
||||
};
|
||||
const [filter, setFilter] = useLocalStorage('targets-page-filter', initialFilter);
|
||||
|
||||
const initialExpanded: Expanded = Object.keys(targetGroups).reduce(
|
||||
const initialExpanded: Expanded = Object.keys(initialPoolList).reduce(
|
||||
(acc: { [scrapePool: string]: boolean }, scrapePool: string) => ({
|
||||
...acc,
|
||||
[scrapePool]: true,
|
||||
|
@ -28,14 +71,44 @@ export const ScrapePoolContent: FC<ScrapePoolListProps> = ({ activeTargets }) =>
|
|||
{}
|
||||
);
|
||||
const [expanded, setExpanded] = useLocalStorage('targets-page-expansion-state', initialExpanded);
|
||||
|
||||
const { showHealthy, showUnhealthy } = filter;
|
||||
|
||||
const handleSearchChange = (e: ChangeEvent<HTMLTextAreaElement | HTMLInputElement>) => {
|
||||
if (e.target.value !== '') {
|
||||
const result = kvSearch.filter(e.target.value.trim(), activeTargets);
|
||||
setTargetList(
|
||||
result.map((value) => {
|
||||
return value.original as unknown as Target;
|
||||
})
|
||||
);
|
||||
} else {
|
||||
setTargetList(activeTargets);
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const list = targetList.filter((t) => showHealthy || t.health.toLowerCase() !== 'up');
|
||||
setPoolList(groupTargets(list));
|
||||
}, [showHealthy, targetList]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Row xs="4" className="align-items-center">
|
||||
<Col>
|
||||
<Filter filter={filter} setFilter={setFilter} expanded={expanded} setExpanded={setExpanded} />
|
||||
{Object.keys(targetGroups)
|
||||
</Col>
|
||||
<Col xs="6">
|
||||
<InputGroup>
|
||||
<InputGroupAddon addonType="prepend">
|
||||
<InputGroupText>{<FontAwesomeIcon icon={faSearch} />}</InputGroupText>
|
||||
</InputGroupAddon>
|
||||
<Input autoFocus onChange={handleSearchChange} placeholder="Filter by endpoint or labels" />
|
||||
</InputGroup>
|
||||
</Col>
|
||||
</Row>
|
||||
{Object.keys(poolList)
|
||||
.filter((scrapePool) => {
|
||||
const targetGroup = targetGroups[scrapePool];
|
||||
const targetGroup = poolList[scrapePool];
|
||||
const isHealthy = targetGroup.upCount === targetGroup.targets.length;
|
||||
return (isHealthy && showHealthy) || (!isHealthy && showUnhealthy);
|
||||
})
|
||||
|
@ -43,7 +116,7 @@ export const ScrapePoolContent: FC<ScrapePoolListProps> = ({ activeTargets }) =>
|
|||
<ScrapePoolPanel
|
||||
key={scrapePool}
|
||||
scrapePool={scrapePool}
|
||||
targetGroup={targetGroups[scrapePool]}
|
||||
targetGroup={poolList[scrapePool]}
|
||||
expanded={expanded[scrapePool]}
|
||||
toggleExpanded={(): void => setExpanded({ ...expanded, [scrapePool]: !expanded[scrapePool] })}
|
||||
/>
|
||||
|
@ -51,11 +124,10 @@ export const ScrapePoolContent: FC<ScrapePoolListProps> = ({ activeTargets }) =>
|
|||
</>
|
||||
);
|
||||
};
|
||||
ScrapePoolContent.displayName = 'ScrapePoolContent';
|
||||
|
||||
const ScrapePoolListWithStatusIndicator = withStatusIndicator(ScrapePoolContent);
|
||||
const ScrapePoolListWithStatusIndicator = withStatusIndicator(ScrapePoolListContent);
|
||||
|
||||
const ScrapePoolList: FC = () => {
|
||||
export const ScrapePoolList: FC = () => {
|
||||
const pathPrefix = usePathPrefix();
|
||||
const { response, error, isLoading } = useFetch<ScrapePoolListProps>(`${pathPrefix}/${API_PATH}/targets?state=active`);
|
||||
const { status: responseStatus } = response;
|
||||
|
|
|
@ -1,137 +0,0 @@
|
|||
import React from 'react';
|
||||
import { mount, shallow } from 'enzyme';
|
||||
import { targetGroups } from './__testdata__/testdata';
|
||||
import ScrapePoolPanel, { columns } from './ScrapePoolPanel';
|
||||
import { Button, Collapse, Table, Badge } from 'reactstrap';
|
||||
import { Target, getColor } from './target';
|
||||
import EndpointLink from './EndpointLink';
|
||||
import TargetLabels from './TargetLabels';
|
||||
import sinon from 'sinon';
|
||||
|
||||
describe('ScrapePoolPanel', () => {
|
||||
const defaultProps = {
|
||||
scrapePool: 'blackbox',
|
||||
targetGroup: targetGroups.blackbox,
|
||||
expanded: true,
|
||||
toggleExpanded: sinon.spy(),
|
||||
};
|
||||
const scrapePoolPanel = shallow(<ScrapePoolPanel {...defaultProps} />);
|
||||
|
||||
it('renders a container', () => {
|
||||
const div = scrapePoolPanel.find('div').filterWhere((elem) => elem.hasClass('container'));
|
||||
expect(div).toHaveLength(1);
|
||||
});
|
||||
|
||||
describe('Header', () => {
|
||||
it('renders an anchor with up count and danger color if upCount < targetsCount', () => {
|
||||
const anchor = scrapePoolPanel.find('a');
|
||||
expect(anchor).toHaveLength(1);
|
||||
expect(anchor.prop('id')).toEqual('pool-blackbox');
|
||||
expect(anchor.prop('href')).toEqual('#pool-blackbox');
|
||||
expect(anchor.text()).toEqual('blackbox (2/3 up)');
|
||||
expect(anchor.prop('className')).toEqual('danger');
|
||||
});
|
||||
|
||||
it('renders an anchor with up count and normal color if upCount == targetsCount', () => {
|
||||
const props = {
|
||||
...defaultProps,
|
||||
scrapePool: 'prometheus',
|
||||
targetGroup: targetGroups.prometheus,
|
||||
};
|
||||
const scrapePoolPanel = shallow(<ScrapePoolPanel {...props} />);
|
||||
const anchor = scrapePoolPanel.find('a');
|
||||
expect(anchor).toHaveLength(1);
|
||||
expect(anchor.prop('id')).toEqual('pool-prometheus');
|
||||
expect(anchor.prop('href')).toEqual('#pool-prometheus');
|
||||
expect(anchor.text()).toEqual('prometheus (1/1 up)');
|
||||
expect(anchor.prop('className')).toEqual('normal');
|
||||
});
|
||||
|
||||
it('renders a show more btn if collapsed', () => {
|
||||
const props = {
|
||||
...defaultProps,
|
||||
scrapePool: 'prometheus',
|
||||
targetGroup: targetGroups.prometheus,
|
||||
toggleExpanded: sinon.spy(),
|
||||
};
|
||||
const div = document.createElement('div');
|
||||
div.id = `series-labels-prometheus-0`;
|
||||
document.body.appendChild(div);
|
||||
const div2 = document.createElement('div');
|
||||
div2.id = `scrape-duration-prometheus-0`;
|
||||
document.body.appendChild(div2);
|
||||
const scrapePoolPanel = mount(<ScrapePoolPanel {...props} />);
|
||||
|
||||
const btn = scrapePoolPanel.find(Button);
|
||||
btn.simulate('click');
|
||||
expect(props.toggleExpanded.calledOnce).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it('renders a Collapse component', () => {
|
||||
const collapse = scrapePoolPanel.find(Collapse);
|
||||
expect(collapse.prop('isOpen')).toBe(true);
|
||||
});
|
||||
|
||||
describe('Table', () => {
|
||||
it('renders a table', () => {
|
||||
const table = scrapePoolPanel.find(Table);
|
||||
const headers = table.find('th');
|
||||
expect(table).toHaveLength(1);
|
||||
expect(headers).toHaveLength(6);
|
||||
columns.forEach((col) => {
|
||||
expect(headers.contains(col));
|
||||
});
|
||||
});
|
||||
|
||||
describe('for each target', () => {
|
||||
const table = scrapePoolPanel.find(Table);
|
||||
defaultProps.targetGroup.targets.forEach(
|
||||
({ discoveredLabels, labels, scrapeUrl, lastError, health }: Target, idx: number) => {
|
||||
const row = table.find('tr').at(idx + 1);
|
||||
|
||||
it('renders an EndpointLink with the scrapeUrl', () => {
|
||||
const link = row.find(EndpointLink);
|
||||
expect(link).toHaveLength(1);
|
||||
expect(link.prop('endpoint')).toEqual(scrapeUrl);
|
||||
});
|
||||
|
||||
it('renders a badge for health', () => {
|
||||
const td = row.find('td').filterWhere((elem) => Boolean(elem.hasClass('state')));
|
||||
const badge = td.find(Badge);
|
||||
expect(badge).toHaveLength(1);
|
||||
expect(badge.prop('color')).toEqual(getColor(health));
|
||||
expect(badge.children().text()).toEqual(health.toUpperCase());
|
||||
});
|
||||
|
||||
it('renders series labels', () => {
|
||||
const targetLabels = row.find(TargetLabels);
|
||||
expect(targetLabels).toHaveLength(1);
|
||||
expect(targetLabels.prop('discoveredLabels')).toEqual(discoveredLabels);
|
||||
expect(targetLabels.prop('labels')).toEqual(labels);
|
||||
});
|
||||
|
||||
it('renders last scrape time', () => {
|
||||
const lastScrapeCell = row.find('td').filterWhere((elem) => Boolean(elem.hasClass('last-scrape')));
|
||||
expect(lastScrapeCell).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('renders last scrape duration', () => {
|
||||
const lastScrapeCell = row.find('td').filterWhere((elem) => Boolean(elem.hasClass('scrape-duration')));
|
||||
expect(lastScrapeCell).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('renders a badge for Errors', () => {
|
||||
const td = row.find('td').filterWhere((elem) => Boolean(elem.hasClass('errors')));
|
||||
const badge = td.find(Badge);
|
||||
expect(badge).toHaveLength(lastError ? 1 : 0);
|
||||
if (lastError) {
|
||||
expect(badge.prop('color')).toEqual('danger');
|
||||
expect(badge.children().text()).toEqual(lastError);
|
||||
}
|
||||
});
|
||||
}
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
|
@ -1,95 +0,0 @@
|
|||
import React, { FC } from 'react';
|
||||
import { ScrapePool, getColor } from './target';
|
||||
import { Collapse, Table, Badge } from 'reactstrap';
|
||||
import styles from './ScrapePoolPanel.module.css';
|
||||
import { Target } from './target';
|
||||
import EndpointLink from './EndpointLink';
|
||||
import TargetLabels from './TargetLabels';
|
||||
import TargetScrapeDuration from './TargetScrapeDuration';
|
||||
import { now } from 'moment';
|
||||
import { ToggleMoreLess } from '../../components/ToggleMoreLess';
|
||||
import { formatRelative } from '../../utils';
|
||||
|
||||
interface PanelProps {
|
||||
scrapePool: string;
|
||||
targetGroup: ScrapePool;
|
||||
expanded: boolean;
|
||||
toggleExpanded: () => void;
|
||||
}
|
||||
|
||||
export const columns = ['Endpoint', 'State', 'Labels', 'Last Scrape', 'Scrape Duration', 'Error'];
|
||||
|
||||
const ScrapePoolPanel: FC<PanelProps> = ({ scrapePool, targetGroup, expanded, toggleExpanded }) => {
|
||||
const modifier = targetGroup.upCount < targetGroup.targets.length ? 'danger' : 'normal';
|
||||
const id = `pool-${scrapePool}`;
|
||||
const anchorProps = {
|
||||
href: `#${id}`,
|
||||
id,
|
||||
};
|
||||
|
||||
return (
|
||||
<div className={styles.container}>
|
||||
<ToggleMoreLess event={toggleExpanded} showMore={expanded}>
|
||||
<a className={styles[modifier]} {...anchorProps}>
|
||||
{`${scrapePool} (${targetGroup.upCount}/${targetGroup.targets.length} up)`}
|
||||
</a>
|
||||
</ToggleMoreLess>
|
||||
<Collapse isOpen={expanded}>
|
||||
<Table className={styles.table} size="sm" bordered hover striped>
|
||||
<thead>
|
||||
<tr key="header">
|
||||
{columns.map((column) => (
|
||||
<th key={column}>{column}</th>
|
||||
))}
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{targetGroup.targets.map((target: Target, idx: number) => {
|
||||
const {
|
||||
discoveredLabels,
|
||||
labels,
|
||||
scrapePool,
|
||||
scrapeUrl,
|
||||
globalUrl,
|
||||
lastError,
|
||||
lastScrape,
|
||||
lastScrapeDuration,
|
||||
health,
|
||||
scrapeInterval,
|
||||
scrapeTimeout,
|
||||
} = target;
|
||||
const color = getColor(health);
|
||||
|
||||
return (
|
||||
<tr key={scrapeUrl}>
|
||||
<td className={styles.endpoint}>
|
||||
<EndpointLink endpoint={scrapeUrl} globalUrl={globalUrl} />
|
||||
</td>
|
||||
<td className={styles.state}>
|
||||
<Badge color={color}>{health.toUpperCase()}</Badge>
|
||||
</td>
|
||||
<td className={styles.labels}>
|
||||
<TargetLabels discoveredLabels={discoveredLabels} labels={labels} scrapePool={scrapePool} idx={idx} />
|
||||
</td>
|
||||
<td className={styles['last-scrape']}>{formatRelative(lastScrape, now())}</td>
|
||||
<td className={styles['scrape-duration']}>
|
||||
<TargetScrapeDuration
|
||||
duration={lastScrapeDuration}
|
||||
scrapePool={scrapePool}
|
||||
idx={idx}
|
||||
interval={scrapeInterval}
|
||||
timeout={scrapeTimeout}
|
||||
/>
|
||||
</td>
|
||||
<td className={styles.errors}>{lastError ? <span className="text-danger">{lastError}</span> : null}</td>
|
||||
</tr>
|
||||
);
|
||||
})}
|
||||
</tbody>
|
||||
</Table>
|
||||
</Collapse>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default ScrapePoolPanel;
|
|
@ -33,10 +33,16 @@ const TargetLabels: FC<TargetLabelsProps> = ({ discoveredLabels, labels, idx, sc
|
|||
);
|
||||
})}
|
||||
</div>
|
||||
<Tooltip isOpen={tooltipOpen} target={CSS.escape(id)} toggle={toggle} style={{ maxWidth: 'none', textAlign: 'left' }}>
|
||||
<Tooltip
|
||||
isOpen={tooltipOpen}
|
||||
target={CSS.escape(id)}
|
||||
toggle={toggle}
|
||||
placement={'right-end'}
|
||||
style={{ maxWidth: 'none', textAlign: 'left' }}
|
||||
>
|
||||
<b>Before relabeling:</b>
|
||||
{formatLabels(discoveredLabels).map((s: string, idx: number) => (
|
||||
<Fragment key={idx}>
|
||||
{formatLabels(discoveredLabels).map((s: string, labelIndex: number) => (
|
||||
<Fragment key={labelIndex}>
|
||||
<br />
|
||||
<span className={styles.discovered}>{s}</span>
|
||||
</Fragment>
|
||||
|
|
|
@ -37,7 +37,7 @@ exports[`targetLabels renders discovered labels 1`] = `
|
|||
<Tooltip
|
||||
autohide={true}
|
||||
isOpen={false}
|
||||
placement="top"
|
||||
placement="right-end"
|
||||
placementPrefix="bs-tooltip"
|
||||
style={
|
||||
Object {
|
||||
|
|
|
@ -2,7 +2,7 @@ export interface Labels {
|
|||
[key: string]: string;
|
||||
}
|
||||
|
||||
export interface Target {
|
||||
export type Target = {
|
||||
discoveredLabels: Labels;
|
||||
labels: Labels;
|
||||
scrapePool: string;
|
||||
|
@ -14,7 +14,7 @@ export interface Target {
|
|||
health: string;
|
||||
scrapeInterval: string;
|
||||
scrapeTimeout: string;
|
||||
}
|
||||
};
|
||||
|
||||
export interface DroppedTarget {
|
||||
discoveredLabels: Labels;
|
||||
|
|
|
@ -257,7 +257,7 @@ type Options struct {
|
|||
RemoteReadSampleLimit int
|
||||
RemoteReadConcurrencyLimit int
|
||||
RemoteReadBytesInFrame int
|
||||
RemoteWriteReceiver bool
|
||||
EnableRemoteWriteReceiver bool
|
||||
IsAgent bool
|
||||
|
||||
Gatherer prometheus.Gatherer
|
||||
|
@ -314,7 +314,7 @@ func New(logger log.Logger, o *Options) *Handler {
|
|||
FactoryRr := func(_ context.Context) api_v1.RulesRetriever { return h.ruleManager }
|
||||
|
||||
var app storage.Appendable
|
||||
if o.RemoteWriteReceiver {
|
||||
if o.EnableRemoteWriteReceiver {
|
||||
app = h.storage
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue