From b5389192583bac5171716e6467a370f29bda3c43 Mon Sep 17 00:00:00 2001 From: tyltr Date: Wed, 31 Jan 2024 20:27:23 +0800 Subject: [PATCH 01/81] remove redundant code Signed-off-by: tyltr --- discovery/legacymanager/manager_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/discovery/legacymanager/manager_test.go b/discovery/legacymanager/manager_test.go index 6fbecabc2..767532168 100644 --- a/discovery/legacymanager/manager_test.go +++ b/discovery/legacymanager/manager_test.go @@ -1091,7 +1091,6 @@ func TestCoordinationWithReceiver(t *testing.T) { } for _, tc := range testCases { - tc := tc t.Run(tc.title, func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() From d5f68872943062d4535ef8cffc53b4613a541d11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=F0=9F=8C=B2=20Harry=20=F0=9F=8C=8A=20John=20=F0=9F=8F=94?= Date: Wed, 15 May 2024 11:39:54 -0700 Subject: [PATCH 02/81] Pass limit param as hint to storage.Querier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 🌲 Harry 🌊 John 🏔 --- docs/querying/api.md | 6 +++--- promql/engine_test.go | 4 ++-- storage/fanout_test.go | 4 ++-- storage/interface.go | 18 ++++++++++++++---- storage/merge.go | 16 ++++++++-------- storage/merge_test.go | 10 +++++----- storage/noop.go | 8 ++++---- storage/remote/read.go | 4 ++-- storage/secondary.go | 8 ++++---- tsdb/db_test.go | 6 +++--- tsdb/querier.go | 4 ++-- tsdb/querier_test.go | 2 +- web/api/v1/api.go | 39 +++++++++++++++++++++++++++++---------- web/api/v1/api_test.go | 24 ++++++++++++++++++++++-- web/api/v1/errors_test.go | 4 ++-- 15 files changed, 103 insertions(+), 54 deletions(-) diff --git a/docs/querying/api.md b/docs/querying/api.md index 71e01b3b9..a1c3cb8fe 100644 --- a/docs/querying/api.md +++ b/docs/querying/api.md @@ -256,7 +256,7 @@ URL query parameters: series to return. At least one `match[]` argument must be provided. - `start=`: Start timestamp. - `end=`: End timestamp. -- `limit=`: Maximum number of returned series. Optional. +- `limit=`: Maximum number of returned series. Optional. 0 means disabled. You can URL-encode these parameters directly in the request body by using the `POST` method and `Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large @@ -307,7 +307,7 @@ URL query parameters: - `end=`: End timestamp. Optional. - `match[]=`: Repeated series selector argument that selects the series from which to read the label names. Optional. -- `limit=`: Maximum number of returned series. Optional. +- `limit=`: Maximum number of returned series. Optional. 0 means disabled. The `data` section of the JSON response is a list of string label names. @@ -358,7 +358,7 @@ URL query parameters: - `end=`: End timestamp. Optional. - `match[]=`: Repeated series selector argument that selects the series from which to read the label values. Optional. -- `limit=`: Maximum number of returned series. Optional. +- `limit=`: Maximum number of returned series. Optional. 0 means disabled. The `data` section of the JSON response is a list of string label values. diff --git a/promql/engine_test.go b/promql/engine_test.go index 4e321a6c3..503db4187 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -236,11 +236,11 @@ func (q *errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*lab return errSeriesSet{err: q.err} } -func (*errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (*errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (*errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (*errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } func (*errQuerier) Close() error { return nil } diff --git a/storage/fanout_test.go b/storage/fanout_test.go index 913e2fe24..19bce6172 100644 --- a/storage/fanout_test.go +++ b/storage/fanout_test.go @@ -236,11 +236,11 @@ func (errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels return storage.ErrSeriesSet(errSelect) } -func (errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, errors.New("label values error") } -func (errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, errors.New("label names error") } diff --git a/storage/interface.go b/storage/interface.go index 493c2d689..f85f985e9 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -122,11 +122,11 @@ type MockQuerier struct { SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet } -func (q *MockQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *MockQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (q *MockQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *MockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } @@ -161,12 +161,12 @@ type LabelQuerier interface { // It is not safe to use the strings beyond the lifetime of the querier. // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. - LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) + LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) // LabelNames returns all the unique label names present in the block in sorted order. // If matchers are specified the returned result set is reduced // to label names of metrics matching the matchers. - LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) + LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) // Close releases the resources of the Querier. Close() error @@ -190,6 +190,9 @@ type SelectHints struct { Start int64 // Start time in milliseconds for this select. End int64 // End time in milliseconds for this select. + // Maximum number of results returned. Use a value of 0 to disable. + Limit int + Step int64 // Query step size in milliseconds. Func string // String representation of surrounding function or aggregation. @@ -217,6 +220,13 @@ type SelectHints struct { DisableTrimming bool } +// LabelHints specifies hints passed for label reads. +// This is used only as an option for implementation to use. +type LabelHints struct { + // Maximum number of results returned. Use a value of 0 to disable. + Limit int +} + // TODO(bwplotka): Move to promql/engine_test.go? // QueryableFunc is an adapter to allow the use of ordinary functions as // Queryables. It follows the idea of http.HandlerFunc. diff --git a/storage/merge.go b/storage/merge.go index 885560022..35c0c44ba 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -161,8 +161,8 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ // LabelValues returns all potential values for a label name. // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. -func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - res, ws, err := q.lvals(ctx, q.queriers, name, matchers...) +func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + res, ws, err := q.lvals(ctx, q.queriers, name, hints, matchers...) if err != nil { return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err) } @@ -170,22 +170,22 @@ func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matc } // lvals performs merge sort for LabelValues from multiple queriers. -func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { if lq.Len() == 0 { return nil, nil, nil } if lq.Len() == 1 { - return lq.Get(0).LabelValues(ctx, n, matchers...) + return lq.Get(0).LabelValues(ctx, n, hints, matchers...) } a, b := lq.SplitByHalf() var ws annotations.Annotations - s1, w, err := q.lvals(ctx, a, n, matchers...) + s1, w, err := q.lvals(ctx, a, n, hints, matchers...) ws.Merge(w) if err != nil { return nil, ws, err } - s2, ws, err := q.lvals(ctx, b, n, matchers...) + s2, ws, err := q.lvals(ctx, b, n, hints, matchers...) ws.Merge(w) if err != nil { return nil, ws, err @@ -221,13 +221,13 @@ func mergeStrings(a, b []string) []string { } // LabelNames returns all the unique label names present in all queriers in sorted order. -func (q *mergeGenericQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *mergeGenericQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { var ( labelNamesMap = make(map[string]struct{}) warnings annotations.Annotations ) for _, querier := range q.queriers { - names, wrn, err := querier.LabelNames(ctx, matchers...) + names, wrn, err := querier.LabelNames(ctx, hints, matchers...) if wrn != nil { // TODO(bwplotka): We could potentially wrap warnings. warnings.Merge(wrn) diff --git a/storage/merge_test.go b/storage/merge_test.go index 0e63affbb..e108e7146 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -1361,7 +1361,7 @@ func (m *mockGenericQuerier) Select(_ context.Context, b bool, _ *SelectHints, _ return &mockGenericSeriesSet{resp: m.resp, warnings: m.warnings, err: m.err} } -func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { m.mtx.Lock() m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{ name: name, @@ -1371,7 +1371,7 @@ func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, matcher return m.resp, m.warnings, m.err } -func (m *mockGenericQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (m *mockGenericQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { m.mtx.Lock() m.labelNamesCalls++ m.mtx.Unlock() @@ -1560,7 +1560,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { } }) t.Run("LabelNames", func(t *testing.T) { - res, w, err := q.LabelNames(ctx) + res, w, err := q.LabelNames(ctx, nil) require.Subset(t, tcase.expectedWarnings, w) require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match") require.Equal(t, tcase.expectedLabels, res) @@ -1575,7 +1575,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { } }) t.Run("LabelValues", func(t *testing.T) { - res, w, err := q.LabelValues(ctx, "test") + res, w, err := q.LabelValues(ctx, "test", nil) require.Subset(t, tcase.expectedWarnings, w) require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match") require.Equal(t, tcase.expectedLabels, res) @@ -1591,7 +1591,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { }) t.Run("LabelValuesWithMatchers", func(t *testing.T) { matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue") - res, w, err := q.LabelValues(ctx, "test2", matcher) + res, w, err := q.LabelValues(ctx, "test2", nil, matcher) require.Subset(t, tcase.expectedWarnings, w) require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match") require.Equal(t, tcase.expectedLabels, res) diff --git a/storage/noop.go b/storage/noop.go index be5741ddd..f5092da7c 100644 --- a/storage/noop.go +++ b/storage/noop.go @@ -31,11 +31,11 @@ func (noopQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matche return NoopSeriesSet() } -func (noopQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (noopQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (noopQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (noopQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } @@ -54,11 +54,11 @@ func (noopChunkQuerier) Select(context.Context, bool, *SelectHints, ...*labels.M return NoopChunkedSeriesSet() } -func (noopChunkQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (noopChunkQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } -func (noopChunkQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (noopChunkQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, nil } diff --git a/storage/remote/read.go b/storage/remote/read.go index 723030091..e54b14f1e 100644 --- a/storage/remote/read.go +++ b/storage/remote/read.go @@ -210,13 +210,13 @@ func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []s } // LabelValues implements storage.Querier and is a noop. -func (q *querier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *querier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 return nil, nil, errors.New("not implemented") } // LabelNames implements storage.Querier and is a noop. -func (q *querier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *querier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 return nil, nil, errors.New("not implemented") } diff --git a/storage/secondary.go b/storage/secondary.go index 44d978183..1cf8024b6 100644 --- a/storage/secondary.go +++ b/storage/secondary.go @@ -49,16 +49,16 @@ func newSecondaryQuerierFromChunk(cq ChunkQuerier) genericQuerier { return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)} } -func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - vals, w, err := s.genericQuerier.LabelValues(ctx, name, matchers...) +func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + vals, w, err := s.genericQuerier.LabelValues(ctx, name, hints, matchers...) if err != nil { return nil, w.Add(err), nil } return vals, w, nil } -func (s *secondaryQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - names, w, err := s.genericQuerier.LabelNames(ctx, matchers...) +func (s *secondaryQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + names, w, err := s.genericQuerier.LabelNames(ctx, hints, matchers...) if err != nil { return nil, w.Add(err), nil } diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 3d2fb2d99..a96a2f9a6 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1001,7 +1001,7 @@ func TestWALFlushedOnDBClose(t *testing.T) { q, err := db.Querier(0, 1) require.NoError(t, err) - values, ws, err := q.LabelValues(ctx, "labelname") + values, ws, err := q.LabelValues(ctx, "labelname", nil) require.NoError(t, err) require.Empty(t, ws) require.Equal(t, []string{"labelvalue"}, values) @@ -1976,7 +1976,7 @@ func TestQuerierWithBoundaryChunks(t *testing.T) { defer q.Close() // The requested interval covers 2 blocks, so the querier's label values for blockID should give us 2 values, one from each block. - b, ws, err := q.LabelValues(ctx, "blockID") + b, ws, err := q.LabelValues(ctx, "blockID", nil) require.NoError(t, err) var nilAnnotations annotations.Annotations require.Equal(t, nilAnnotations, ws) @@ -2288,7 +2288,7 @@ func TestDB_LabelNames(t *testing.T) { q, err := db.Querier(math.MinInt64, math.MaxInt64) require.NoError(t, err) var ws annotations.Annotations - labelNames, ws, err = q.LabelNames(ctx) + labelNames, ws, err = q.LabelNames(ctx, nil) require.NoError(t, err) require.Empty(t, ws) require.NoError(t, q.Close()) diff --git a/tsdb/querier.go b/tsdb/querier.go index fb4a87cc8..910c2d7fc 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -77,12 +77,12 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er }, nil } -func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { res, err := q.index.SortedLabelValues(ctx, name, matchers...) return res, nil, err } -func (q *blockBaseQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (q *blockBaseQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { res, err := q.index.LabelNames(ctx, matchers...) return res, nil, err } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index a1af49465..ffdf8dc02 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -3022,7 +3022,7 @@ func TestQuerierIndexQueriesRace(t *testing.T) { q, err := db.Querier(math.MinInt64, math.MaxInt64) require.NoError(t, err) - values, _, err := q.LabelValues(ctx, "seq", c.matchers...) + values, _, err := q.LabelValues(ctx, "seq", nil, c.matchers...) require.NoError(t, err) require.Emptyf(t, values, `label values for label "seq" should be empty`) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index b95ff25cf..4468a3263 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -658,6 +658,10 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} } + hints := &storage.LabelHints{ + Limit: toHintLimit(limit), + } + q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end)) if err != nil { return apiFuncResult{nil, returnAPIError(err), nil, nil} @@ -672,7 +676,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { labelNamesSet := make(map[string]struct{}) for _, matchers := range matcherSets { - vals, callWarnings, err := q.LabelNames(r.Context(), matchers...) + vals, callWarnings, err := q.LabelNames(r.Context(), hints, matchers...) if err != nil { return apiFuncResult{nil, returnAPIError(err), warnings, nil} } @@ -694,7 +698,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { if len(matcherSets) == 1 { matchers = matcherSets[0] } - names, warnings, err = q.LabelNames(r.Context(), matchers...) + names, warnings, err = q.LabelNames(r.Context(), hints, matchers...) if err != nil { return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil} } @@ -704,7 +708,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { names = []string{} } - if len(names) > limit { + if limit > 0 && len(names) > limit { names = names[:limit] warnings = warnings.Add(errors.New("results truncated due to limit")) } @@ -738,6 +742,10 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} } + hints := &storage.LabelHints{ + Limit: toHintLimit(limit), + } + q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end)) if err != nil { return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil} @@ -762,7 +770,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { var callWarnings annotations.Annotations labelValuesSet := make(map[string]struct{}) for _, matchers := range matcherSets { - vals, callWarnings, err = q.LabelValues(ctx, name, matchers...) + vals, callWarnings, err = q.LabelValues(ctx, name, hints, matchers...) if err != nil { return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer} } @@ -781,7 +789,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { if len(matcherSets) == 1 { matchers = matcherSets[0] } - vals, warnings, err = q.LabelValues(ctx, name, matchers...) + vals, warnings, err = q.LabelValues(ctx, name, hints, matchers...) if err != nil { return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer} } @@ -793,7 +801,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { slices.Sort(vals) - if len(vals) > limit { + if limit > 0 && len(vals) > limit { vals = vals[:limit] warnings = warnings.Add(errors.New("results truncated due to limit")) } @@ -863,6 +871,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { Start: timestamp.FromTime(start), End: timestamp.FromTime(end), Func: "series", // There is no series function, this token is used for lookups that don't need samples. + Limit: toHintLimit(limit), } var set storage.SeriesSet @@ -889,7 +898,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { } metrics = append(metrics, set.At().Labels()) - if len(metrics) > limit { + if limit > 0 && len(metrics) > limit { metrics = metrics[:limit] warnings.Add(errors.New("results truncated due to limit")) return apiFuncResult{metrics, nil, warnings, closer} @@ -1898,8 +1907,8 @@ OUTER: return matcherSets, nil } +// parseLimitParam returning 0 means no limit is to be applied. func parseLimitParam(limitStr string) (limit int, err error) { - limit = math.MaxInt if limitStr == "" { return limit, nil } @@ -1908,9 +1917,19 @@ func parseLimitParam(limitStr string) (limit int, err error) { if err != nil { return limit, err } - if limit <= 0 { - return limit, errors.New("limit must be positive") + if limit < 0 { + return limit, errors.New("limit must be non-negative") } return limit, nil } + +// toHintLimit increases the API limit, as returned by parseLimitParam, by 1. +// This allows for emitting warnings when the results are truncated. +func toHintLimit(limit int) int { + // 0 means no limit and avoid int overflow + if limit > 0 && limit < math.MaxInt { + return limit + 1 + } + return limit +} diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index b30890893..f0aed49d6 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -739,13 +739,16 @@ func TestLabelNames(t *testing.T) { api := &API{ Queryable: storage, } - request := func(method string, matchers ...string) (*http.Request, error) { + request := func(method, limit string, matchers ...string) (*http.Request, error) { u, err := url.Parse("http://example.com") require.NoError(t, err) q := u.Query() for _, matcher := range matchers { q.Add("match[]", matcher) } + if limit != "" { + q.Add("limit", limit) + } u.RawQuery = q.Encode() r, err := http.NewRequest(method, u.String(), nil) @@ -759,6 +762,7 @@ func TestLabelNames(t *testing.T) { name string api *API matchers []string + limit string expected []string expectedErrorType errorType }{ @@ -773,6 +777,13 @@ func TestLabelNames(t *testing.T) { expected: []string{"__name__", "abc", "foo", "xyz"}, api: api, }, + { + name: "non empty label matcher with limit", + matchers: []string{`{foo=~".+"}`}, + expected: []string{"__name__", "abc"}, + limit: "2", + api: api, + }, { name: "exact label matcher", matchers: []string{`{foo="boo"}`}, @@ -805,7 +816,7 @@ func TestLabelNames(t *testing.T) { t.Run(tc.name, func(t *testing.T) { for _, method := range []string{http.MethodGet, http.MethodPost} { ctx := context.Background() - req, err := request(method, tc.matchers...) + req, err := request(method, tc.limit, tc.matchers...) require.NoError(t, err) res := tc.api.labelNames(req.WithContext(ctx)) assertAPIError(t, res.err, tc.expectedErrorType) @@ -1430,6 +1441,15 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E responseLen: 2, // API does not specify which particular value will come back. warningsCount: 0, // No warnings if limit isn't exceeded. }, + { + endpoint: api.series, + query: url.Values{ + "match[]": []string{"test_metric1"}, + "limit": []string{"0"}, + }, + responseLen: 2, // API does not specify which particular value will come back. + warningsCount: 0, // No warnings if limit isn't exceeded. + }, // Missing match[] query params in series requests. { endpoint: api.series, diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index e76a1a3d3..2a2b847cf 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -170,11 +170,11 @@ type errorTestQuerier struct { err error } -func (t errorTestQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (t errorTestQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, t.err } -func (t errorTestQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { +func (t errorTestQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { return nil, nil, t.err } From d8a9d69f81d4bfd2183289263e74333ac4d628c1 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Tue, 25 Jun 2024 09:15:09 +0200 Subject: [PATCH 03/81] ci: Add job to report build_all status This should enable proper status reporting of matrix jobs for release branches. See also https://github.com/orgs/community/discussions/4324. The new job will succeed if all build_all jobs succeeded and fail if there is a single failed or cancelled build job. This only runs for PRs, not for release tags or the main branch, unlike the build_all step. Signed-off-by: Jan Fajerski --- .github/workflows/ci.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 978218dba..995aef675 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -143,6 +143,18 @@ jobs: with: parallelism: 12 thread: ${{ matrix.thread }} + build_all_status: + name: Report status of build Prometheus for all architectures + runs-on: ubuntu-latest + needs: [build_all] + if: github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-') + steps: + - name: Successful build + if: ${{ !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled')) }} + run: exit 0 + - name: Failing or cancelled build + if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} + run: exit 1 check_generated_parser: name: Check generated parser runs-on: ubuntu-latest From f090ab1baf87447935de3871b2a18a8239295aeb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 23:02:26 +0000 Subject: [PATCH 04/81] build(deps): bump sass from 1.77.4 to 1.77.6 in /web/ui Bumps [sass](https://github.com/sass/dart-sass) from 1.77.4 to 1.77.6. - [Release notes](https://github.com/sass/dart-sass/releases) - [Changelog](https://github.com/sass/dart-sass/blob/main/CHANGELOG.md) - [Commits](https://github.com/sass/dart-sass/compare/1.77.4...1.77.6) --- updated-dependencies: - dependency-name: sass dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/package-lock.json | 8 ++++---- web/ui/react-app/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 0458e961d..2e27b6a96 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -16807,9 +16807,9 @@ "license": "CC0-1.0" }, "node_modules/sass": { - "version": "1.77.4", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.77.4.tgz", - "integrity": "sha512-vcF3Ckow6g939GMA4PeU7b2K/9FALXk2KF9J87txdHzXbUF9XRQRwSxcAs/fGaTnJeBFd7UoV22j3lzMLdM0Pw==", + "version": "1.77.6", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.77.6.tgz", + "integrity": "sha512-ByXE1oLD79GVq9Ht1PeHWCPMPB8XHpBuz1r85oByKHjZY6qV6rWnQovQzXJXuQ/XyE1Oj3iPk3lo28uzaRA2/Q==", "dependencies": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -19368,7 +19368,7 @@ "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", "sanitize-html": "^2.13.0", - "sass": "1.77.4", + "sass": "1.77.6", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 1997a22af..045fb5dcd 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -38,7 +38,7 @@ "react-test-renderer": "^17.0.2", "reactstrap": "^8.10.1", "sanitize-html": "^2.13.0", - "sass": "1.77.4", + "sass": "1.77.6", "tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-core": "^5.19.3" }, From 65f57ca34177b1d89ab94f5efad8f234131d7147 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 23:02:45 +0000 Subject: [PATCH 05/81] build(deps-dev): bump @types/node from 20.14.2 to 20.14.9 in /web/ui Bumps [@types/node](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/node) from 20.14.2 to 20.14.9. - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/node) --- updated-dependencies: - dependency-name: "@types/node" dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/package-lock.json | 8 ++++---- web/ui/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 0458e961d..90819b9b6 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -13,7 +13,7 @@ ], "devDependencies": { "@types/jest": "^29.5.12", - "@types/node": "^20.14.2", + "@types/node": "^20.14.9", "eslint-config-prettier": "^9.1.0", "eslint-config-react-app": "^7.0.1", "eslint-plugin-prettier": "^4.2.1", @@ -4199,9 +4199,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "20.14.2", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.2.tgz", - "integrity": "sha512-xyu6WAMVwv6AKFLB+e/7ySZVr/0zLCzOa7rSpq6jNwpqOrUbcACDWC+53d4n2QHOnDou0fbIsg8wZu/sxrnI4Q==", + "version": "20.14.9", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.9.tgz", + "integrity": "sha512-06OCtnTXtWOZBJlRApleWndH4JsRVs1pDCc8dLSQp+7PpUpX3ePdHyeNSFTeSe7FtKyQkrlPvHwJOW3SLd8Oyg==", "dependencies": { "undici-types": "~5.26.4" } diff --git a/web/ui/package.json b/web/ui/package.json index 355b610eb..49367d9b1 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -17,7 +17,7 @@ }, "devDependencies": { "@types/jest": "^29.5.12", - "@types/node": "^20.14.2", + "@types/node": "^20.14.9", "eslint-config-prettier": "^9.1.0", "eslint-config-react-app": "^7.0.1", "eslint-plugin-prettier": "^4.2.1", From 512b490ed5772e382853529b1afa567ca2434bbd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 23:27:43 +0000 Subject: [PATCH 06/81] build(deps): bump github/codeql-action from 3.25.8 to 3.25.11 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.25.8 to 3.25.11. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/2e230e8fe0ad3a14a340ad0815ddb96d599d2aff...b611370bb5703a7efb587f9d136a52ea24c5c38c) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql-analysis.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1ea1f5efa..12ffc659c 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,12 +27,12 @@ jobs: uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Initialize CodeQL - uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 + uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 + uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 + uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 078084888..c82fa87a1 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # tag=v3.25.8 + uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11 with: sarif_file: results.sarif From 35811eda964ee0a4ff68d8a30fdacf566ab7e0c1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 23:28:03 +0000 Subject: [PATCH 07/81] build(deps): bump bufbuild/buf-setup-action from 1.32.2 to 1.34.0 Bumps [bufbuild/buf-setup-action](https://github.com/bufbuild/buf-setup-action) from 1.32.2 to 1.34.0. - [Release notes](https://github.com/bufbuild/buf-setup-action/releases) - [Commits](https://github.com/bufbuild/buf-setup-action/compare/dde0b9351db90fbf78e345f41a57de8514bf1091...35c243d7f2a909b1d4e40399b348a7fdab27d78d) --- updated-dependencies: - dependency-name: bufbuild/buf-setup-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/buf-lint.yml | 2 +- .github/workflows/buf.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index acf91ea12..cbfeb2ba5 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2 + - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index f52d20785..8b964ef24 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -13,7 +13,7 @@ jobs: if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2 + - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 From fb1b3d6b1a42b1b5a3aad0c02e83118cb9dd2e72 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 23:43:04 +0000 Subject: [PATCH 08/81] build(deps): bump actions/checkout from 4.1.6 to 4.1.7 in /scripts Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.6 to 4.1.7. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/a5ac7e51b41094c92402da3b24376905380afc29...692973e3d937129bcbf40652eb9f2f61becf3332) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- scripts/golangci-lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index bb65d7f60..83ae3906c 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install Go uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: From e5c69685b5bb1a9cd19bac4a79470dbc766d509d Mon Sep 17 00:00:00 2001 From: B1F030 Date: Mon, 8 Jul 2024 11:30:36 +0800 Subject: [PATCH 09/81] Fix web.cors.origin in command-line/prometheus.md Signed-off-by: B1F030 --- cmd/prometheus/main.go | 2 +- docs/command-line/prometheus.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 1d844ddba..787a28992 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -343,7 +343,7 @@ func main() { a.Flag("web.page-title", "Document title of Prometheus instance."). Default("Prometheus Time Series Collection and Processing Server").StringVar(&cfg.web.PageTitle) - a.Flag("web.cors.origin", `Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com'`). + a.Flag("web.cors.origin", `Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com'`). Default(".*").StringVar(&cfg.corsRegexString) serverOnlyFlag(a, "storage.tsdb.path", "Base path for metrics storage."). diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 223260243..2faf65105 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -30,7 +30,7 @@ The Prometheus monitoring server | --web.console.templates | Path to the console template directory, available at /consoles. | `consoles` | | --web.console.libraries | Path to the console library directory. | `console_libraries` | | --web.page-title | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` | -| --web.cors.origin | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com' | `.*` | +| --web.cors.origin | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com' | `.*` | | --storage.tsdb.path | Base path for metrics storage. Use with server mode only. | `data/` | | --storage.tsdb.retention | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | | | --storage.tsdb.retention.time | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | | From 7083ae8267ceead89b27621c165230cb0fd239ec Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 10 Jul 2024 10:00:31 +0100 Subject: [PATCH 10/81] [release 2.53] Revert 13583 to stop dropping samples in remote-write catch-up (#14446) * Revert "fix bug that would cause us to endlessly fall behind (#13583)" This reverts commit 0c71230784368da829f1f02d412d181d7a06aee6. (leaving the new test in place) * TSDB: enhance TestRun_AvoidNotifyWhenBehind With code suggested by @cstyan in #14439. * WAL watcher: add back log line showing current segment --------- Signed-off-by: Bryan Boreham --- tsdb/wlog/watcher.go | 21 ++++---- tsdb/wlog/watcher_test.go | 110 +++++++++++++++++++------------------- 2 files changed, 65 insertions(+), 66 deletions(-) diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index 8ebd9249a..d836e3d86 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -262,6 +262,11 @@ func (w *Watcher) loop() { // Run the watcher, which will tail the WAL until the quit channel is closed // or an error case is hit. func (w *Watcher) Run() error { + _, lastSegment, err := w.firstAndLast() + if err != nil { + return fmt.Errorf("wal.Segments: %w", err) + } + // We want to ensure this is false across iterations since // Run will be called again if there was a failure to read the WAL. w.sendSamples = false @@ -286,20 +291,14 @@ func (w *Watcher) Run() error { return err } - level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment) + level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment) for !isClosed(w.quit) { w.currentSegmentMetric.Set(float64(currentSegment)) - // Re-check on each iteration in case a new segment was added, - // because watch() will wait for notifications on the last segment. - _, lastSegment, err := w.firstAndLast() - if err != nil { - return fmt.Errorf("wal.Segments: %w", err) - } - tail := currentSegment >= lastSegment - - level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment, "lastSegment", lastSegment) - if err := w.watch(currentSegment, tail); err != nil && !errors.Is(err, ErrIgnorable) { + // On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment. + // On subsequent calls to this function, currentSegment will have been incremented and we should open that segment. + level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment) + if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) { return err } diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index ff006cb81..b39a0fa8a 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -17,6 +17,7 @@ import ( "math/rand" "os" "path" + "runtime" "sync" "testing" "time" @@ -698,11 +699,46 @@ func TestRun_StartupTime(t *testing.T) { } } +func generateWALRecords(w *WL, segment, seriesCount, samplesCount int) error { + enc := record.Encoder{} + for j := 0; j < seriesCount; j++ { + ref := j + (segment * 100) + series := enc.Series([]record.RefSeries{ + { + Ref: chunks.HeadSeriesRef(ref), + Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", segment)), + }, + }, nil) + if err := w.Log(series); err != nil { + return err + } + + for k := 0; k < samplesCount; k++ { + inner := rand.Intn(ref + 1) + sample := enc.Samples([]record.RefSample{ + { + Ref: chunks.HeadSeriesRef(inner), + T: int64(segment), + V: float64(segment), + }, + }, nil) + if err := w.Log(sample); err != nil { + return err + } + } + } + return nil +} + func TestRun_AvoidNotifyWhenBehind(t *testing.T) { - const pageSize = 32 * 1024 - const segments = 10 - const seriesCount = 20 - const samplesCount = 300 + if runtime.GOOS == "windows" { // Takes a really long time, perhaps because min sleep time is 15ms. + t.SkipNow() + } + const segmentSize = pageSize // Smallest allowed segment size. + const segmentsToWrite = 5 + const segmentsToRead = segmentsToWrite - 1 + const seriesCount = 10 + const samplesCount = 50 // This test can take longer than intended to finish in cloud CI. readTimeout := 10 * time.Second @@ -715,73 +751,37 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) { err := os.Mkdir(wdir, 0o777) require.NoError(t, err) - enc := record.Encoder{} - w, err := NewSize(nil, nil, wdir, pageSize, compress) + w, err := NewSize(nil, nil, wdir, segmentSize, compress) require.NoError(t, err) var wg sync.WaitGroup - // add one segment initially to ensure there's a value > 0 for the last segment id - for i := 0; i < 1; i++ { - for j := 0; j < seriesCount; j++ { - ref := j + (i * 100) - series := enc.Series([]record.RefSeries{ - { - Ref: chunks.HeadSeriesRef(ref), - Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)), - }, - }, nil) - require.NoError(t, w.Log(series)) - - for k := 0; k < samplesCount; k++ { - inner := rand.Intn(ref + 1) - sample := enc.Samples([]record.RefSample{ - { - Ref: chunks.HeadSeriesRef(inner), - T: int64(i), - V: float64(i), - }, - }, nil) - require.NoError(t, w.Log(sample)) - } - } - } + // Generate one segment initially to ensure that watcher.Run() finds at least one segment on disk. + require.NoError(t, generateWALRecords(w, 0, seriesCount, samplesCount)) + w.NextSegment() // Force creation of the next segment wg.Add(1) go func() { defer wg.Done() - for i := 1; i < segments; i++ { - for j := 0; j < seriesCount; j++ { - ref := j + (i * 100) - series := enc.Series([]record.RefSeries{ - { - Ref: chunks.HeadSeriesRef(ref), - Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)), - }, - }, nil) - require.NoError(t, w.Log(series)) - - for k := 0; k < samplesCount; k++ { - inner := rand.Intn(ref + 1) - sample := enc.Samples([]record.RefSample{ - { - Ref: chunks.HeadSeriesRef(inner), - T: int64(i), - V: float64(i), - }, - }, nil) - require.NoError(t, w.Log(sample)) - } - } + for i := 1; i < segmentsToWrite; i++ { + require.NoError(t, generateWALRecords(w, i, seriesCount, samplesCount)) + w.NextSegment() } }() wt := newWriteToMock(time.Millisecond) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false) - watcher.MaxSegment = segments + watcher.MaxSegment = segmentsToRead watcher.setMetrics() startTime := time.Now() err = watcher.Run() wg.Wait() require.Less(t, time.Since(startTime), readTimeout) + + // But samples records shouldn't get dropped + retry(t, defaultRetryInterval, defaultRetries, func() bool { + return wt.checkNumSeries() > 0 + }) + require.Equal(t, segmentsToRead*seriesCount*samplesCount, wt.samplesAppended) + require.NoError(t, err) require.NoError(t, w.Close()) }) From 14cfec3f6048b735e08c1e9c64c8d4211d32bab4 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 10 Jul 2024 11:14:07 +0100 Subject: [PATCH 11/81] Prepare release 2.53.1 (#14452) Co-authored-by: George Krajcsovits Signed-off-by: Bryan Boreham --- CHANGELOG.md | 8 ++++++++ VERSION | 2 +- web/ui/module/codemirror-promql/package.json | 4 ++-- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 14 +++++++------- web/ui/package.json | 2 +- web/ui/react-app/package.json | 4 ++-- 7 files changed, 22 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7e4799993..d5a91e900 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,14 @@ ## unreleased +## 2.53.1 / 2024-07-10 + +Fix a bug which would drop samples in remote-write if the sending flow stalled +for longer than it takes to write one "WAL segment". How long this takes depends on the size +of your Prometheus; as a rough guide with 10 million series it is about 2-3 minutes. + +* [BUGFIX] Remote-write: stop dropping samples in catch-up #14446 + ## 2.53.0 / 2024-06-16 This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75. diff --git a/VERSION b/VERSION index 261d95596..f419e2c6f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.53.0 +2.53.1 diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 519c33365..c9efe3491 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.53.0", + "version": "0.53.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.53.0", + "@prometheus-io/lezer-promql": "0.53.1", "lru-cache": "^7.18.3" }, "devDependencies": { diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 5a3b0055b..b6147ff11 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.53.0", + "version": "0.53.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index c8135d5e2..8a473e327 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.53.0", + "version": "0.53.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.53.0", + "version": "0.53.1", "workspaces": [ "react-app", "module/*" @@ -30,10 +30,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.53.0", + "version": "0.53.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.53.0", + "@prometheus-io/lezer-promql": "0.53.1", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -69,7 +69,7 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.53.0", + "version": "0.53.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.5.1", @@ -19233,7 +19233,7 @@ }, "react-app": { "name": "@prometheus-io/app", - "version": "0.53.0", + "version": "0.53.1", "dependencies": { "@codemirror/autocomplete": "^6.11.1", "@codemirror/commands": "^6.3.2", @@ -19251,7 +19251,7 @@ "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.53.0", + "@prometheus-io/codemirror-promql": "0.53.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.2", diff --git a/web/ui/package.json b/web/ui/package.json index 8b924737d..d59e294e2 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -28,5 +28,5 @@ "ts-jest": "^29.1.1", "typescript": "^4.9.5" }, - "version": "0.53.0" + "version": "0.53.1" } diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 7d9518e8d..90381cba5 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.53.0", + "version": "0.53.1", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.11.1", @@ -19,7 +19,7 @@ "@lezer/lr": "^1.3.14", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.53.0", + "@prometheus-io/codemirror-promql": "0.53.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^7.6.2", From 8c8860d2d6a2461762c0e83727136fed915df2b5 Mon Sep 17 00:00:00 2001 From: darshanime Date: Tue, 30 Jan 2024 13:07:49 +0530 Subject: [PATCH 12/81] Allow number literals as duration Signed-off-by: darshanime --- promql/parser/generated_parser.y | 4 + promql/parser/generated_parser.y.go | 451 ++++++++++---------- promql/parser/lex.go | 20 +- promql/parser/parse.go | 4 + promql/parser/parse_test.go | 51 ++- promql/promqltest/testdata/at_modifier.test | 37 ++ promql/promqltest/testdata/functions.test | 5 + promql/promqltest/testdata/staleness.test | 3 + promql/promqltest/testdata/subquery.test | 15 + 9 files changed, 343 insertions(+), 247 deletions(-) diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index d84acc37c..0de6b13e7 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -905,6 +905,10 @@ duration : DURATION yylex.(*parser).addParseErr($1.PositionRange(), err) } } + | number + { + $$ = yylex.(*parser).parseNumberLiteral($1); + } ; diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 07899c0a0..6d610bcde 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -324,209 +324,209 @@ var yyExca = [...]int16{ 78, 187, 84, 187, -2, 123, - -1, 199, - 12, 237, - 13, 237, - 18, 237, - 19, 237, - 25, 237, - 40, 237, - 46, 237, - 47, 237, - 50, 237, - 56, 237, - 61, 237, - 62, 237, - 63, 237, - 64, 237, - 65, 237, - 66, 237, - 67, 237, - 68, 237, - 69, 237, - 70, 237, - 71, 237, - 72, 237, - 73, 237, - 74, 237, - 78, 237, - 82, 237, - 84, 237, - 87, 237, - 88, 237, - -2, 0, -1, 200, - 12, 237, - 13, 237, - 18, 237, - 19, 237, - 25, 237, - 40, 237, - 46, 237, - 47, 237, - 50, 237, - 56, 237, - 61, 237, - 62, 237, - 63, 237, - 64, 237, - 65, 237, - 66, 237, - 67, 237, - 68, 237, - 69, 237, - 70, 237, - 71, 237, - 72, 237, - 73, 237, - 74, 237, - 78, 237, - 82, 237, - 84, 237, - 87, 237, - 88, 237, + 12, 238, + 13, 238, + 18, 238, + 19, 238, + 25, 238, + 40, 238, + 46, 238, + 47, 238, + 50, 238, + 56, 238, + 61, 238, + 62, 238, + 63, 238, + 64, 238, + 65, 238, + 66, 238, + 67, 238, + 68, 238, + 69, 238, + 70, 238, + 71, 238, + 72, 238, + 73, 238, + 74, 238, + 78, 238, + 82, 238, + 84, 238, + 87, 238, + 88, 238, -2, 0, - -1, 221, - 21, 235, + -1, 201, + 12, 238, + 13, 238, + 18, 238, + 19, 238, + 25, 238, + 40, 238, + 46, 238, + 47, 238, + 50, 238, + 56, 238, + 61, 238, + 62, 238, + 63, 238, + 64, 238, + 65, 238, + 66, 238, + 67, 238, + 68, 238, + 69, 238, + 70, 238, + 71, 238, + 72, 238, + 73, 238, + 74, 238, + 78, 238, + 82, 238, + 84, 238, + 87, 238, + 88, 238, -2, 0, - -1, 292, + -1, 222, 21, 236, -2, 0, + -1, 293, + 21, 237, + -2, 0, } const yyPrivate = 57344 -const yyLast = 793 +const yyLast = 794 var yyAct = [...]int16{ - 155, 330, 328, 274, 335, 152, 225, 39, 191, 148, - 288, 287, 156, 117, 81, 177, 227, 106, 105, 6, - 154, 108, 107, 197, 132, 198, 237, 109, 199, 200, - 159, 59, 243, 325, 324, 110, 321, 159, 189, 268, - 348, 301, 265, 127, 159, 192, 349, 264, 290, 195, - 176, 160, 159, 269, 308, 175, 319, 195, 160, 347, - 239, 240, 346, 112, 241, 113, 299, 161, 174, 270, - 263, 111, 254, 160, 161, 228, 230, 232, 233, 234, - 242, 244, 247, 248, 249, 250, 251, 255, 256, 161, - 114, 229, 231, 235, 236, 238, 245, 246, 108, 266, - 258, 252, 253, 329, 109, 157, 158, 159, 2, 3, - 4, 5, 307, 160, 162, 257, 262, 299, 172, 166, - 169, 217, 104, 164, 110, 165, 150, 306, 193, 161, - 178, 104, 179, 151, 305, 183, 196, 179, 185, 261, - 194, 201, 202, 203, 204, 205, 206, 207, 208, 209, - 210, 211, 212, 213, 214, 215, 128, 227, 88, 216, - 120, 218, 219, 100, 336, 103, 168, 237, 97, 98, - 118, 181, 100, 243, 103, 87, 181, 224, 259, 167, - 149, 180, 182, 121, 187, 76, 180, 182, 120, 260, - 102, 35, 124, 7, 10, 296, 151, 123, 118, 102, - 295, 239, 240, 267, 78, 241, 116, 186, 285, 286, - 122, 121, 289, 254, 318, 294, 228, 230, 232, 233, - 234, 242, 244, 247, 248, 249, 250, 251, 255, 256, - 317, 292, 229, 231, 235, 236, 238, 245, 246, 316, - 315, 314, 252, 253, 133, 134, 135, 136, 137, 138, - 139, 140, 141, 142, 143, 144, 145, 146, 147, 313, - 312, 311, 310, 309, 320, 293, 297, 298, 300, 273, - 302, 222, 151, 8, 85, 221, 272, 37, 303, 304, - 276, 277, 275, 282, 284, 281, 283, 278, 279, 280, - 220, 163, 126, 50, 125, 36, 1, 291, 151, 77, - 83, 49, 322, 323, 48, 83, 47, 104, 46, 327, - 82, 131, 332, 333, 334, 82, 331, 45, 184, 338, - 337, 340, 339, 80, 44, 43, 341, 342, 129, 53, - 76, 343, 55, 86, 88, 22, 54, 345, 170, 171, - 42, 130, 56, 41, 97, 98, 40, 350, 100, 101, - 103, 87, 58, 51, 190, 9, 9, 74, 344, 271, - 84, 188, 223, 18, 19, 79, 119, 20, 153, 57, - 226, 52, 115, 75, 0, 102, 0, 0, 60, 61, + 152, 331, 329, 275, 336, 154, 226, 39, 192, 148, + 289, 288, 158, 117, 81, 178, 228, 106, 105, 6, + 156, 108, 198, 132, 199, 153, 238, 109, 200, 201, + 300, 107, 244, 120, 326, 325, 322, 153, 190, 59, + 320, 302, 266, 118, 110, 193, 161, 349, 297, 196, + 127, 260, 153, 296, 291, 259, 121, 153, 161, 350, + 240, 241, 162, 112, 242, 113, 348, 330, 295, 347, + 258, 111, 255, 161, 162, 229, 231, 233, 234, 235, + 243, 245, 248, 249, 250, 251, 252, 256, 257, 162, + 153, 230, 232, 236, 237, 239, 246, 247, 114, 300, + 308, 253, 254, 309, 157, 159, 160, 108, 2, 3, + 4, 5, 223, 109, 163, 307, 222, 150, 173, 167, + 170, 196, 306, 165, 151, 166, 104, 151, 194, 218, + 337, 221, 110, 76, 153, 184, 197, 153, 186, 104, + 195, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 269, 128, 228, 217, + 35, 267, 219, 220, 265, 86, 88, 100, 238, 103, + 270, 149, 263, 188, 244, 161, 97, 98, 225, 83, + 100, 101, 103, 87, 7, 319, 271, 264, 169, 82, + 261, 162, 321, 177, 102, 262, 187, 185, 176, 318, + 317, 168, 240, 241, 268, 10, 242, 102, 316, 286, + 287, 175, 315, 290, 255, 78, 314, 229, 231, 233, + 234, 235, 243, 245, 248, 249, 250, 251, 252, 256, + 257, 313, 293, 230, 232, 236, 237, 239, 246, 247, + 179, 312, 180, 253, 254, 133, 134, 135, 136, 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 120, 311, 310, 126, 85, 125, 83, 298, 299, 301, + 118, 303, 124, 36, 1, 292, 82, 123, 116, 304, + 305, 182, 274, 121, 80, 49, 48, 47, 46, 273, + 122, 181, 183, 277, 278, 276, 283, 285, 282, 284, + 279, 280, 281, 323, 324, 180, 131, 8, 104, 294, + 328, 37, 157, 333, 334, 335, 151, 332, 50, 45, + 339, 338, 341, 340, 77, 44, 153, 342, 343, 43, + 53, 76, 344, 55, 129, 88, 22, 54, 346, 171, + 172, 42, 130, 56, 182, 97, 98, 41, 351, 100, + 164, 103, 87, 40, 181, 183, 58, 151, 74, 9, + 9, 51, 191, 345, 18, 19, 272, 153, 20, 84, + 189, 224, 79, 119, 75, 155, 102, 57, 227, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 52, 115, 0, 13, 0, 0, 0, + 24, 0, 30, 0, 0, 31, 32, 38, 0, 53, + 76, 0, 55, 327, 0, 22, 54, 0, 0, 0, + 0, 0, 56, 0, 277, 278, 276, 283, 285, 282, + 284, 279, 280, 281, 0, 0, 0, 74, 0, 0, + 0, 0, 0, 18, 19, 0, 0, 20, 0, 0, + 0, 0, 0, 75, 0, 0, 0, 0, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 0, 0, 0, 13, 0, 0, 0, 24, - 0, 30, 0, 0, 31, 32, 38, 0, 53, 76, - 0, 55, 326, 0, 22, 54, 0, 0, 0, 0, - 0, 56, 0, 276, 277, 275, 282, 284, 281, 283, - 278, 279, 280, 0, 0, 0, 74, 0, 0, 0, - 0, 0, 18, 19, 0, 0, 20, 0, 0, 0, - 0, 0, 75, 0, 0, 0, 0, 60, 61, 62, - 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, - 73, 0, 0, 0, 13, 0, 0, 0, 24, 0, - 30, 0, 0, 31, 32, 53, 76, 0, 55, 0, - 0, 22, 54, 0, 0, 0, 0, 0, 56, 0, + 0, 30, 0, 0, 31, 32, 53, 76, 0, 55, + 0, 0, 22, 54, 0, 0, 0, 0, 0, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 74, 0, 17, 76, 0, 0, 18, - 19, 22, 0, 20, 0, 0, 0, 0, 0, 75, - 0, 0, 0, 0, 60, 61, 62, 63, 64, 65, - 66, 67, 68, 69, 70, 71, 72, 73, 0, 18, - 19, 13, 0, 20, 0, 24, 0, 30, 0, 0, - 31, 32, 0, 0, 11, 12, 14, 15, 16, 21, - 23, 25, 26, 27, 28, 29, 33, 34, 17, 35, - 0, 13, 0, 0, 22, 24, 0, 30, 0, 0, - 31, 32, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 74, 0, 17, 76, 0, 0, + 18, 19, 22, 0, 20, 0, 0, 0, 0, 0, + 75, 0, 0, 0, 0, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 0, + 18, 19, 13, 0, 20, 0, 24, 0, 30, 0, + 0, 31, 32, 0, 0, 11, 12, 14, 15, 16, + 21, 23, 25, 26, 27, 28, 29, 33, 34, 17, + 35, 0, 13, 0, 0, 22, 24, 0, 30, 0, + 0, 31, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 18, 19, 0, 0, 20, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 11, 12, 14, - 15, 16, 21, 23, 25, 26, 27, 28, 29, 33, - 34, 104, 0, 0, 13, 0, 0, 0, 24, 173, - 30, 0, 0, 31, 32, 0, 0, 0, 0, 0, - 104, 0, 0, 0, 0, 0, 0, 86, 88, 89, + 0, 0, 0, 18, 19, 0, 0, 20, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, + 14, 15, 16, 21, 23, 25, 26, 27, 28, 29, + 33, 34, 104, 0, 0, 13, 0, 0, 0, 24, + 174, 30, 0, 0, 31, 32, 0, 0, 0, 0, + 0, 104, 0, 0, 0, 0, 0, 0, 86, 88, + 89, 0, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 0, 100, 101, 103, 87, 86, 88, 89, 0, 90, 91, 92, 93, 94, 95, 96, 97, 98, - 99, 0, 100, 101, 103, 87, 86, 88, 89, 0, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - 0, 100, 101, 103, 87, 104, 0, 0, 0, 102, + 99, 0, 100, 101, 103, 87, 104, 0, 0, 0, + 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 104, 0, 0, 0, 102, + 0, 0, 86, 88, 89, 0, 90, 91, 92, 0, + 94, 95, 96, 97, 98, 99, 0, 100, 101, 103, + 87, 86, 88, 89, 0, 90, 91, 0, 0, 94, + 95, 0, 97, 98, 99, 0, 100, 101, 103, 87, + 0, 0, 0, 0, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 104, 0, 0, 0, 102, 0, - 0, 86, 88, 89, 0, 90, 91, 92, 0, 94, - 95, 96, 97, 98, 99, 0, 100, 101, 103, 87, - 86, 88, 89, 0, 90, 91, 0, 0, 94, 95, - 0, 97, 98, 99, 0, 100, 101, 103, 87, 0, - 0, 0, 0, 102, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 102, + 0, 0, 0, 102, } var yyPact = [...]int16{ - 17, 183, 566, 566, 396, 503, -1000, -1000, -1000, 178, + 17, 174, 567, 567, 397, 504, -1000, -1000, -1000, 147, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 303, -1000, 272, -1000, 646, + -1000, -1000, -1000, -1000, -1000, 264, -1000, 262, -1000, 647, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 20, 109, -1000, 473, -1000, 473, 172, -1000, + -1000, -1000, 29, 117, -1000, 474, -1000, 474, 120, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 186, -1000, -1000, 190, - -1000, -1000, 290, -1000, 19, -1000, -53, -53, -53, -53, - -53, -53, -53, -53, -53, -53, -53, -53, -53, -53, - -53, -53, 124, 18, 289, 109, -57, -1000, 164, 164, - 317, -1000, 627, 108, -1000, 48, -1000, -1000, 128, 133, - -1000, -1000, -1000, 298, -1000, 182, -1000, 33, 473, -1000, - -58, -51, -1000, 473, 473, 473, 473, 473, 473, 473, - 473, 473, 473, 473, 473, 473, 473, 473, -1000, 187, - -1000, -1000, -1000, 106, -1000, -1000, -1000, -1000, -1000, -1000, - 88, 88, 269, -1000, -1000, -1000, -1000, 155, -1000, -1000, - 93, -1000, 646, -1000, -1000, 158, -1000, 114, -1000, -1000, - -1000, -1000, -1000, 45, -1000, -1000, -1000, -1000, -1000, 16, - 73, 13, -1000, -1000, -1000, 252, 117, 164, 164, 164, - 164, 108, 108, 293, 293, 293, 710, 691, 293, 293, - 710, 108, 108, 293, 108, 117, -1000, 26, -1000, -1000, - -1000, 263, -1000, 193, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 258, -1000, -1000, 270, + -1000, -1000, 261, -1000, 26, -1000, -54, -54, -54, -54, + -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, + -54, -54, 115, 18, 348, 117, -57, -1000, 186, 186, + 318, -1000, 628, 112, -1000, 191, -1000, -1000, 238, 301, + -1000, -1000, -1000, 177, -1000, 171, -1000, 33, 474, -1000, + -59, -51, -1000, 474, 474, 474, 474, 474, 474, 474, + 474, 474, 474, 474, 474, 474, 474, 474, -1000, 118, + -1000, -1000, -1000, -1000, -1000, 114, -1000, -1000, -1000, -1000, + -1000, 71, 71, 110, -1000, -1000, -1000, -1000, 156, -1000, + -1000, 48, -1000, 647, -1000, -1000, 31, -1000, 170, -1000, + -1000, -1000, -1000, -1000, 162, -1000, -1000, -1000, -1000, -1000, + 16, 135, 130, -1000, -1000, -1000, 265, 294, 186, 186, + 186, 186, 112, 112, 125, 125, 125, 711, 692, 125, + 125, 711, 112, 112, 125, 112, 294, -1000, 32, -1000, + -1000, -1000, 307, -1000, 46, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 473, -1000, - -1000, -1000, -1000, -1000, -1000, 98, 98, 15, 98, 41, - 41, 110, 37, -1000, -1000, 257, 256, 255, 254, 253, - 235, 234, 233, 224, 208, -1000, -1000, -1000, -1000, -1000, - -1000, 35, 262, -1000, -1000, 14, -1000, 646, -1000, -1000, - -1000, 98, -1000, 8, 7, 395, -1000, -1000, -1000, 47, - 11, 88, 88, 88, 150, 150, 47, 150, 47, -1000, - -1000, -1000, -1000, -1000, 98, 98, -1000, -1000, -1000, 98, - -1000, -1000, -1000, -1000, -1000, -1000, 88, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 38, -1000, 25, -1000, -1000, -1000, - -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 474, + -1000, -1000, -1000, -1000, -1000, -1000, 80, 80, 15, 80, + 105, 105, 98, 86, -1000, -1000, 256, 255, 235, 225, + 210, 206, 202, 194, 193, 179, -1000, -1000, -1000, -1000, + -1000, -1000, 19, 190, -1000, -1000, 14, -1000, 647, -1000, + -1000, -1000, 80, -1000, 9, 8, 396, -1000, -1000, -1000, + 11, 6, 71, 71, 71, 116, 116, 11, 116, 11, + -1000, -1000, -1000, -1000, -1000, 80, 80, -1000, -1000, -1000, + 80, -1000, -1000, -1000, -1000, -1000, -1000, 71, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 45, -1000, 38, -1000, -1000, + -1000, -1000, } var yyPgo = [...]int16{ - 0, 372, 13, 371, 6, 15, 370, 352, 369, 368, - 366, 194, 273, 365, 14, 362, 10, 11, 361, 360, - 8, 359, 3, 4, 358, 2, 1, 0, 354, 12, - 5, 353, 346, 18, 156, 343, 341, 7, 340, 338, - 17, 328, 31, 325, 324, 317, 311, 308, 306, 304, - 301, 293, 9, 297, 296, 295, + 0, 394, 13, 393, 6, 15, 378, 356, 377, 375, + 373, 205, 307, 372, 14, 371, 10, 11, 370, 369, + 8, 366, 3, 4, 363, 2, 1, 0, 362, 12, + 5, 361, 353, 18, 157, 347, 342, 7, 341, 339, + 17, 334, 39, 329, 325, 319, 306, 288, 287, 286, + 285, 318, 9, 275, 274, 273, } var yyR1 = [...]int8{ @@ -553,7 +553,7 @@ var yyR1 = [...]int8{ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 8, 8, 5, 5, 5, 5, 44, 27, 29, 29, 30, 30, 26, - 25, 25, 52, 48, 10, 53, 53, 17, 17, + 25, 25, 52, 52, 48, 10, 53, 53, 17, 17, } var yyR2 = [...]int8{ @@ -580,7 +580,7 @@ var yyR2 = [...]int8{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, - 2, 1, 1, 1, 1, 0, 1, 0, 1, + 2, 1, 1, 1, 1, 1, 0, 1, 0, 1, } var yyChk = [...]int16{ @@ -599,27 +599,27 @@ var yyChk = [...]int16{ 2, 25, 20, 7, 2, 4, 2, 24, -34, -41, -36, -46, 77, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -52, 56, - 2, 9, -30, -9, 2, -27, -29, 87, 88, 19, - 40, 56, -52, 2, -40, -33, -16, 15, 2, -16, - -39, 22, -37, 22, 20, 7, 2, -5, 2, 4, - 53, 43, 54, -5, 20, -14, 25, 2, -18, 5, - -28, -20, 12, -27, -29, 16, -37, 81, 83, 79, - 80, -37, -37, -37, -37, -37, -37, -37, -37, -37, - -37, -37, -37, -37, -37, -37, -52, 15, -27, -27, - 21, 6, 2, -15, 22, -4, -6, 2, 61, 77, - 62, 78, 63, 64, 65, 79, 80, 12, 81, 46, - 47, 50, 66, 18, 67, 82, 83, 68, 69, 70, - 71, 72, 87, 88, 58, 73, 74, 22, 7, 20, - -2, 25, 2, 25, 2, 26, 26, -29, 26, 40, - 56, -21, 24, 17, -22, 30, 28, 29, 35, 36, - 37, 33, 31, 34, 32, -16, -16, -17, -16, -17, - 22, -53, -52, 2, 22, 7, 2, -37, -26, 19, - -26, 26, -26, -20, -20, 24, 17, 2, 17, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 21, - 2, 22, -4, -26, 26, 26, 17, -22, -25, 56, - -26, -30, -27, -27, -27, -23, 14, -23, -25, -23, - -25, -26, -26, -26, -24, -27, 24, 21, 2, 21, - -27, + 2, 9, -27, 19, -30, -9, 2, -27, -29, 87, + 88, 40, 56, -52, 2, -40, -33, -16, 15, 2, + -16, -39, 22, -37, 22, 20, 7, 2, -5, 2, + 4, 53, 43, 54, -5, 20, -14, 25, 2, -18, + 5, -28, -20, 12, -27, -29, 16, -37, 81, 83, + 79, 80, -37, -37, -37, -37, -37, -37, -37, -37, + -37, -37, -37, -37, -37, -37, -37, -52, 15, -27, + -27, 21, 6, 2, -15, 22, -4, -6, 2, 61, + 77, 62, 78, 63, 64, 65, 79, 80, 12, 81, + 46, 47, 50, 66, 18, 67, 82, 83, 68, 69, + 70, 71, 72, 87, 88, 58, 73, 74, 22, 7, + 20, -2, 25, 2, 25, 2, 26, 26, -29, 26, + 40, 56, -21, 24, 17, -22, 30, 28, 29, 35, + 36, 37, 33, 31, 34, 32, -16, -16, -17, -16, + -17, 22, -53, -52, 2, 22, 7, 2, -37, -26, + 19, -26, 26, -26, -20, -20, 24, 17, 2, 17, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 21, 2, 22, -4, -26, 26, 26, 17, -22, -25, + 56, -26, -30, -27, -27, -27, -23, 14, -23, -25, + -23, -25, -26, -26, -26, -24, -27, 24, 21, 2, + 21, -27, } var yyDef = [...]int16{ @@ -628,37 +628,37 @@ var yyDef = [...]int16{ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 0, 2, -2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 0, 106, 223, 0, 233, 0, 83, 84, + 18, 19, 0, 106, 223, 0, 234, 0, 83, 84, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, 217, 218, 0, 5, 98, 0, 126, 129, 0, 134, 135, 139, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 0, 0, 0, 0, 22, 23, 0, 0, 0, 60, 0, 81, 82, 0, 87, 89, 0, 93, - 97, 234, 124, 0, 130, 0, 133, 138, 0, 42, + 97, 235, 124, 0, 130, 0, 133, 138, 0, 42, 47, 48, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 67, 0, - 69, 232, 70, 0, 72, 227, 228, 73, 74, 224, - 0, 0, 0, 80, 20, 21, 24, 0, 54, 25, - 0, 62, 64, 66, 85, 0, 90, 0, 96, 219, - 220, 221, 222, 0, 125, 128, 131, 132, 137, 140, - 142, 145, 149, 150, 151, 0, 26, 0, 0, -2, - -2, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 68, 0, 225, 226, - 75, -2, 79, 0, 53, 56, 58, 59, 188, 189, - 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, - 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, - 210, 211, 212, 213, 214, 215, 216, 61, 65, 86, - 88, 91, 95, 92, 94, 0, 0, 0, 0, 0, - 0, 0, 0, 155, 157, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 45, 46, 49, 238, 50, - 71, 0, -2, 78, 51, 0, 57, 63, 141, 229, - 143, 0, 146, 0, 0, 0, 153, 158, 154, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 76, - 77, 52, 55, 144, 0, 0, 152, 156, 159, 0, - 231, 160, 161, 162, 163, 164, 0, 165, 166, 167, - 168, 147, 148, 230, 0, 172, 0, 170, 173, 169, - 171, + 69, 232, 233, 224, 70, 0, 72, 227, 228, 73, + 74, 0, 0, 0, 80, 20, 21, 24, 0, 54, + 25, 0, 62, 64, 66, 85, 0, 90, 0, 96, + 219, 220, 221, 222, 0, 125, 128, 131, 132, 137, + 140, 142, 145, 149, 150, 151, 0, 26, 0, 0, + -2, -2, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 68, 0, 225, + 226, 75, -2, 79, 0, 53, 56, 58, 59, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 61, 65, + 86, 88, 91, 95, 92, 94, 0, 0, 0, 0, + 0, 0, 0, 0, 155, 157, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 45, 46, 49, 239, + 50, 71, 0, -2, 78, 51, 0, 57, 63, 141, + 229, 143, 0, 146, 0, 0, 0, 153, 158, 154, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 76, 77, 52, 55, 144, 0, 0, 152, 156, 159, + 0, 231, 160, 161, 162, 163, 164, 0, 165, 166, + 167, 168, 147, 148, 230, 0, 172, 0, 170, 173, + 169, 171, } var yyTok1 = [...]int8{ @@ -1866,6 +1866,11 @@ yydefault: } } case 233: + yyDollar = yyS[yypt-1 : yypt+1] + { + yyVAL.duration = yylex.(*parser).parseNumberLiteral(yyDollar[1].float) + } + case 234: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &StringLiteral{ @@ -1873,7 +1878,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 234: + case 235: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.item = Item{ @@ -1882,12 +1887,12 @@ yydefault: Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), } } - case 235: + case 236: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.duration = 0 } - case 237: + case 238: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.strings = nil diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 8c7fbb89b..0e06ca525 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -478,7 +478,7 @@ func lexStatements(l *Lexer) stateFn { skipSpaces(l) } l.bracketOpen = true - return lexDuration + return lexNumberOrDuration case r == ']': if !l.bracketOpen { return l.errorf("unexpected right bracket %q", r) @@ -846,18 +846,6 @@ func lexLineComment(l *Lexer) stateFn { return lexStatements } -func lexDuration(l *Lexer) stateFn { - if l.scanNumber() { - return l.errorf("missing unit character in duration") - } - if !acceptRemainingDuration(l) { - return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos]) - } - l.backup() - l.emit(DURATION) - return lexStatements -} - // lexNumber scans a number: decimal, hex, oct or float. func lexNumber(l *Lexer) stateFn { if !l.scanNumber() { @@ -909,6 +897,7 @@ func acceptRemainingDuration(l *Lexer) bool { // scanNumber scans numbers of different formats. The scanned Item is // not necessarily a valid number. This case is caught by the parser. func (l *Lexer) scanNumber() bool { + initialPos := l.pos // Modify the digit pattern if the number is hexadecimal. digitPattern := "0123456789" // Disallow hexadecimal in series descriptions as the syntax is ambiguous. @@ -980,7 +969,10 @@ func (l *Lexer) scanNumber() bool { // Handle digits at the end since we already consumed before this loop. l.acceptRun(digitPattern) } - + // empty string is not a valid number + if l.pos == initialPos { + return false + } // Next thing must not be alphanumeric unless it's the times token // for series repetitions. if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) { diff --git a/promql/parser/parse.go b/promql/parser/parse.go index 6f73e2427..c614c7ad6 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -849,6 +849,10 @@ func parseDuration(ds string) (time.Duration, error) { return time.Duration(dur), nil } +func (p *parser) parseNumberLiteral(ts float64) time.Duration { + return time.Duration(ts * float64(time.Second)) +} + // parseGenerated invokes the yacc generated parser. // The generated parser gets the provided startSymbol injected into // the lexer stream, based on which grammar will be used. diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index a4fe28e5b..8f9a116b9 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -2133,6 +2133,25 @@ var testExpr = []struct { EndPos: 25, }, }, + { + input: `test{a="b"}[5m] OFFSET 3600`, + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "test", + OriginalOffset: 1 * time.Hour, + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, "a", "b"), + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"), + }, + PosRange: posrange.PositionRange{ + Start: 0, + End: 11, + }, + }, + Range: 5 * time.Minute, + EndPos: 27, + }, + }, { input: `test{a="b"}[5y] @ 1603774699`, expected: &MatrixSelector{ @@ -2152,15 +2171,32 @@ var testExpr = []struct { EndPos: 28, }, }, + { + input: "test[5]", + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "test", + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"), + }, + PosRange: posrange.PositionRange{ + Start: 0, + End: 4, + }, + }, + Range: 5 * time.Second, + EndPos: 7, + }, + }, { input: `foo[5mm]`, fail: true, - errMsg: "bad duration syntax: \"5mm\"", + errMsg: "bad number or duration syntax: \"5mm\"", }, { input: `foo[5m1]`, fail: true, - errMsg: "bad duration syntax: \"5m1\"", + errMsg: "bad number or duration syntax: \"5m1\"", }, { input: `foo[5m:1m1]`, @@ -2194,17 +2230,12 @@ var testExpr = []struct { { input: `foo[]`, fail: true, - errMsg: "missing unit character in duration", + errMsg: "bad number or duration syntax: \"\"", }, { - input: `foo[1]`, + input: `foo[-1]`, fail: true, - errMsg: "missing unit character in duration", - }, - { - input: `some_metric[5m] OFFSET 1`, - fail: true, - errMsg: "unexpected number \"1\" in offset, expected duration", + errMsg: "bad number or duration syntax: \"\"", }, { input: `some_metric[5m] OFFSET 1mm`, diff --git a/promql/promqltest/testdata/at_modifier.test b/promql/promqltest/testdata/at_modifier.test index 3ba6afc49..e11640ae0 100644 --- a/promql/promqltest/testdata/at_modifier.test +++ b/promql/promqltest/testdata/at_modifier.test @@ -14,18 +14,42 @@ eval instant at 10s metric @ 100 offset 50s metric{job="1"} 5 metric{job="2"} 10 +eval instant at 10s metric @ 100 offset 50 + metric{job="1"} 5 + metric{job="2"} 10 + eval instant at 10s metric offset 50s @ 100 metric{job="1"} 5 metric{job="2"} 10 +eval instant at 10s metric offset 50 @ 100 + metric{job="1"} 5 + metric{job="2"} 10 + eval instant at 10s metric @ 0 offset -50s metric{job="1"} 5 metric{job="2"} 10 +eval instant at 10s metric @ 0 offset -50 + metric{job="1"} 5 + metric{job="2"} 10 + eval instant at 10s metric offset -50s @ 0 metric{job="1"} 5 metric{job="2"} 10 +eval instant at 10s metric offset -50 @ 0 + metric{job="1"} 5 + metric{job="2"} 10 + +eval instant at 10s metric @ 0 offset -50s + metric{job="1"} 5 + metric{job="2"} 10 + +eval instant at 10s metric @ 0 offset -50 + metric{job="1"} 5 + metric{job="2"} 10 + eval instant at 10s -metric @ 100 {job="1"} -10 {job="2"} -20 @@ -48,6 +72,12 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100 offset 50s) eval instant at 25s sum_over_time(metric{job="1"}[100s] offset 50s @ 100) {job="1"} 15 +eval instant at 25s sum_over_time(metric{job="1"}[100] @ 100 offset 50) + {job="1"} 15 + +eval instant at 25s sum_over_time(metric{job="1"}[100] offset 50s @ 100) + {job="1"} 15 + # Different timestamps. eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100 {job="1"} 15 @@ -58,6 +88,9 @@ eval instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metri eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "") {job="1"} 165 +eval instant at 25s sum_over_time(metric{job="1"}[100] @ 100) + label_replace(sum_over_time(metric{job="2"}[100] @ 100), "job", "1", "", "") + {job="1"} 165 + # Subqueries. # 10*(1+2+...+9) + 10. @@ -72,6 +105,10 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] @ 100 offset 20s) eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] offset 20s @ 100) {job="1"} 288 +# 10*(1+2+...+7) + 8. +eval instant at 25s sum_over_time(metric{job="1"}[100:1] offset 20 @ 100) + {job="1"} 288 + # Subquery with different timestamps. # Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries. diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index 718e001c3..ca02b337f 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -10,6 +10,11 @@ eval instant at 50m resets(http_requests[5m]) {path="/bar"} 0 {path="/biz"} 0 +eval instant at 50m resets(http_requests[300]) + {path="/foo"} 0 + {path="/bar"} 0 + {path="/biz"} 0 + eval instant at 50m resets(http_requests[20m]) {path="/foo"} 1 {path="/bar"} 0 diff --git a/promql/promqltest/testdata/staleness.test b/promql/promqltest/testdata/staleness.test index 76ee2f287..4fdbc997b 100644 --- a/promql/promqltest/testdata/staleness.test +++ b/promql/promqltest/testdata/staleness.test @@ -32,6 +32,9 @@ eval instant at 20s count_over_time(metric[1s]) eval instant at 20s count_over_time(metric[10s]) {} 1 +eval instant at 20s count_over_time(metric[10]) + {} 1 + clear diff --git a/promql/promqltest/testdata/subquery.test b/promql/promqltest/testdata/subquery.test index db85b1622..1d338d976 100644 --- a/promql/promqltest/testdata/subquery.test +++ b/promql/promqltest/testdata/subquery.test @@ -76,6 +76,21 @@ eval instant at 1010s sum_over_time(metric1[30s:10s] offset 3s) eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s) {} 297 +eval instant at 1010s sum_over_time(metric1[30:10] offset 3) + {} 297 + +eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s) + {} 297 + +eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s) + {} 297 + +eval instant at 1010s sum_over_time((metric1)[30:10] offset 3s) + {} 297 + +eval instant at 1010s sum_over_time((metric1)[30:10] offset 3) + {} 297 + # Nested subqueries eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s]) {} 0.4 From cfad8ff3b243caf5388ac53f5edefdab203a9ad2 Mon Sep 17 00:00:00 2001 From: darshanime Date: Mon, 4 Mar 2024 19:27:28 +0530 Subject: [PATCH 13/81] Deprecate duration token Signed-off-by: darshanime --- promql/parser/generated_parser.y | 95 +-- promql/parser/generated_parser.y.go | 818 +++++++++++----------- promql/parser/parse.go | 4 - promql/parser/parse_test.go | 92 ++- promql/promqltest/testdata/functions.test | 6 + 5 files changed, 557 insertions(+), 458 deletions(-) diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index 0de6b13e7..940526845 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -43,7 +43,6 @@ import ( int int64 uint uint64 float float64 - duration time.Duration } @@ -176,8 +175,7 @@ START_METRIC_SELECTOR %type int %type uint %type number series_value signed_number signed_or_unsigned_number -%type step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector -%type duration maybe_duration +%type step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector %start start @@ -218,7 +216,7 @@ expr : | binary_expr | function_call | matrix_selector - | number_literal + | number_duration_literal | offset_expr | paren_expr | string_literal @@ -415,18 +413,22 @@ paren_expr : LEFT_PAREN expr RIGHT_PAREN * Offset modifiers. */ -offset_expr: expr OFFSET duration +offset_expr: expr OFFSET number_duration_literal { - yylex.(*parser).addOffset($1, $3) - $$ = $1 + numLit, _ := $3.(*NumberLiteral) + dur := time.Duration(numLit.Val * 1000) * time.Millisecond + yylex.(*parser).addOffset($1, dur) + $$ = $1 } - | expr OFFSET SUB duration + | expr OFFSET SUB number_duration_literal { - yylex.(*parser).addOffset($1, -$4) - $$ = $1 + numLit, _ := $4.(*NumberLiteral) + dur := time.Duration(numLit.Val * 1000) * time.Millisecond + yylex.(*parser).addOffset($1, -dur) + $$ = $1 } | expr OFFSET error - { yylex.(*parser).unexpected("offset", "duration"); $$ = $1 } + { yylex.(*parser).unexpected("offset", "integer or duration"); $$ = $1 } ; /* * @ modifiers. @@ -452,7 +454,7 @@ at_modifier_preprocessors: START | END; * Subquery and range selectors. */ -matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET +matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET { var errMsg string vs, ok := $1.(*VectorSelector) @@ -469,32 +471,44 @@ matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET yylex.(*parser).addParseErrf(errRange, errMsg) } + numLit, _ := $3.(*NumberLiteral) $$ = &MatrixSelector{ VectorSelector: $1.(Expr), - Range: $3, + Range: time.Duration(numLit.Val * 1000) * time.Millisecond, EndPos: yylex.(*parser).lastClosing, } } ; -subquery_expr : expr LEFT_BRACKET duration COLON maybe_duration RIGHT_BRACKET +subquery_expr : expr LEFT_BRACKET number_duration_literal COLON number_duration_literal RIGHT_BRACKET { + numLitRange, _ := $3.(*NumberLiteral) + numLitStep, _ := $5.(*NumberLiteral) $$ = &SubqueryExpr{ Expr: $1.(Expr), - Range: $3, - Step: $5, - + Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond, + Step: time.Duration(numLitStep.Val * 1000) * time.Millisecond, EndPos: $6.Pos + 1, } } - | expr LEFT_BRACKET duration COLON duration error + | expr LEFT_BRACKET number_duration_literal COLON RIGHT_BRACKET + { + numLitRange, _ := $3.(*NumberLiteral) + $$ = &SubqueryExpr{ + Expr: $1.(Expr), + Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond, + Step: 0, + EndPos: $5.Pos + 1, + } + } + | expr LEFT_BRACKET number_duration_literal COLON number_duration_literal error { yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 } - | expr LEFT_BRACKET duration COLON error - { yylex.(*parser).unexpected("subquery selector", "duration or \"]\""); $$ = $1 } - | expr LEFT_BRACKET duration error + | expr LEFT_BRACKET number_duration_literal COLON error + { yylex.(*parser).unexpected("subquery selector", "number/duration or \"]\""); $$ = $1 } + | expr LEFT_BRACKET number_duration_literal error { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 } | expr LEFT_BRACKET error - { yylex.(*parser).unexpected("subquery selector", "duration"); $$ = $1 } + { yylex.(*parser).unexpected("subquery selector", "number/duration"); $$ = $1 } ; /* @@ -866,12 +880,25 @@ match_op : EQL | NEQ | EQL_REGEX | NEQ_REGEX ; * Literals. */ -number_literal : NUMBER +number_duration_literal : NUMBER { - $$ = &NumberLiteral{ + $$ = &NumberLiteral{ Val: yylex.(*parser).number($1.Val), PosRange: $1.PositionRange(), + } } + | DURATION + { + var err error + var dur time.Duration + dur, err = parseDuration($1.Val) + if err != nil { + yylex.(*parser).addParseErr($1.PositionRange(), err) + } + $$ = &NumberLiteral{ + Val: dur.Seconds(), + PosRange: $1.PositionRange(), + } } ; @@ -897,21 +924,6 @@ int : SUB uint { $$ = -int64($2) } | uint { $$ = int64($1) } ; -duration : DURATION - { - var err error - $$, err = parseDuration($1.Val) - if err != nil { - yylex.(*parser).addParseErr($1.PositionRange(), err) - } - } - | number - { - $$ = yylex.(*parser).parseNumberLiteral($1); - } - ; - - string_literal : STRING { $$ = &StringLiteral{ @@ -935,11 +947,6 @@ string_identifier : STRING * Wrappers for optional arguments. */ -maybe_duration : /* empty */ - {$$ = 0} - | duration - ; - maybe_grouping_labels: /* empty */ { $$ = nil } | grouping_labels ; diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 6d610bcde..8b8d9f400 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -31,7 +31,6 @@ type yySymType struct { int int64 uint uint64 float float64 - duration time.Duration } const EQL = 57346 @@ -236,16 +235,10 @@ var yyExca = [...]int16{ 1, -1, -2, 0, -1, 37, - 1, 136, - 10, 136, - 24, 136, + 1, 137, + 10, 137, + 24, 137, -2, 0, - -1, 60, - 2, 174, - 15, 174, - 78, 174, - 84, 174, - -2, 100, -1, 61, 2, 175, 15, 175, @@ -257,7 +250,7 @@ var yyExca = [...]int16{ 15, 176, 78, 176, 84, 176, - -2, 103, + -2, 102, -1, 63, 2, 177, 15, 177, @@ -275,19 +268,19 @@ var yyExca = [...]int16{ 15, 179, 78, 179, 84, 179, - -2, 110, + -2, 106, -1, 66, 2, 180, 15, 180, 78, 180, 84, 180, - -2, 112, + -2, 111, -1, 67, 2, 181, 15, 181, 78, 181, 84, 181, - -2, 114, + -2, 113, -1, 68, 2, 182, 15, 182, @@ -317,220 +310,215 @@ var yyExca = [...]int16{ 15, 186, 78, 186, 84, 186, - -2, 122, + -2, 119, -1, 73, 2, 187, 15, 187, 78, 187, 84, 187, -2, 123, + -1, 74, + 2, 188, + 15, 188, + 78, 188, + 84, 188, + -2, 124, + -1, 199, + 9, 236, + 12, 236, + 13, 236, + 18, 236, + 19, 236, + 25, 236, + 40, 236, + 46, 236, + 47, 236, + 50, 236, + 56, 236, + 61, 236, + 62, 236, + 63, 236, + 64, 236, + 65, 236, + 66, 236, + 67, 236, + 68, 236, + 69, 236, + 70, 236, + 71, 236, + 72, 236, + 73, 236, + 74, 236, + 78, 236, + 82, 236, + 84, 236, + 87, 236, + 88, 236, + -2, 0, -1, 200, - 12, 238, - 13, 238, - 18, 238, - 19, 238, - 25, 238, - 40, 238, - 46, 238, - 47, 238, - 50, 238, - 56, 238, - 61, 238, - 62, 238, - 63, 238, - 64, 238, - 65, 238, - 66, 238, - 67, 238, - 68, 238, - 69, 238, - 70, 238, - 71, 238, - 72, 238, - 73, 238, - 74, 238, - 78, 238, - 82, 238, - 84, 238, - 87, 238, - 88, 238, - -2, 0, - -1, 201, - 12, 238, - 13, 238, - 18, 238, - 19, 238, - 25, 238, - 40, 238, - 46, 238, - 47, 238, - 50, 238, - 56, 238, - 61, 238, - 62, 238, - 63, 238, - 64, 238, - 65, 238, - 66, 238, - 67, 238, - 68, 238, - 69, 238, - 70, 238, - 71, 238, - 72, 238, - 73, 238, - 74, 238, - 78, 238, - 82, 238, - 84, 238, - 87, 238, - 88, 238, - -2, 0, - -1, 222, - 21, 236, - -2, 0, - -1, 293, - 21, 237, + 9, 236, + 12, 236, + 13, 236, + 18, 236, + 19, 236, + 25, 236, + 40, 236, + 46, 236, + 47, 236, + 50, 236, + 56, 236, + 61, 236, + 62, 236, + 63, 236, + 64, 236, + 65, 236, + 66, 236, + 67, 236, + 68, 236, + 69, 236, + 70, 236, + 71, 236, + 72, 236, + 73, 236, + 74, 236, + 78, 236, + 82, 236, + 84, 236, + 87, 236, + 88, 236, -2, 0, } const yyPrivate = 57344 -const yyLast = 794 +const yyLast = 727 var yyAct = [...]int16{ - 152, 331, 329, 275, 336, 154, 226, 39, 192, 148, - 289, 288, 158, 117, 81, 178, 228, 106, 105, 6, - 156, 108, 198, 132, 199, 153, 238, 109, 200, 201, - 300, 107, 244, 120, 326, 325, 322, 153, 190, 59, - 320, 302, 266, 118, 110, 193, 161, 349, 297, 196, - 127, 260, 153, 296, 291, 259, 121, 153, 161, 350, - 240, 241, 162, 112, 242, 113, 348, 330, 295, 347, - 258, 111, 255, 161, 162, 229, 231, 233, 234, 235, - 243, 245, 248, 249, 250, 251, 252, 256, 257, 162, - 153, 230, 232, 236, 237, 239, 246, 247, 114, 300, - 308, 253, 254, 309, 157, 159, 160, 108, 2, 3, - 4, 5, 223, 109, 163, 307, 222, 150, 173, 167, - 170, 196, 306, 165, 151, 166, 104, 151, 194, 218, - 337, 221, 110, 76, 153, 184, 197, 153, 186, 104, - 195, 202, 203, 204, 205, 206, 207, 208, 209, 210, - 211, 212, 213, 214, 215, 216, 269, 128, 228, 217, - 35, 267, 219, 220, 265, 86, 88, 100, 238, 103, - 270, 149, 263, 188, 244, 161, 97, 98, 225, 83, - 100, 101, 103, 87, 7, 319, 271, 264, 169, 82, - 261, 162, 321, 177, 102, 262, 187, 185, 176, 318, - 317, 168, 240, 241, 268, 10, 242, 102, 316, 286, - 287, 175, 315, 290, 255, 78, 314, 229, 231, 233, - 234, 235, 243, 245, 248, 249, 250, 251, 252, 256, - 257, 313, 293, 230, 232, 236, 237, 239, 246, 247, - 179, 312, 180, 253, 254, 133, 134, 135, 136, 137, + 155, 330, 328, 274, 335, 152, 225, 39, 191, 44, + 288, 287, 156, 118, 82, 177, 55, 106, 6, 53, + 77, 109, 56, 133, 108, 22, 54, 110, 107, 171, + 159, 197, 57, 198, 199, 200, 60, 111, 325, 151, + 324, 301, 320, 307, 265, 154, 55, 75, 128, 105, + 159, 160, 349, 18, 19, 290, 54, 20, 306, 308, + 105, 319, 159, 76, 113, 305, 114, 161, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 112, 160, 179, 13, 87, 89, 264, 24, + 101, 30, 104, 150, 31, 32, 115, 98, 99, 161, + 109, 101, 102, 104, 88, 195, 110, 2, 3, 4, + 5, 263, 258, 149, 84, 162, 55, 103, 159, 172, + 166, 169, 299, 181, 83, 165, 54, 257, 103, 193, + 157, 158, 184, 180, 182, 164, 183, 196, 299, 185, + 217, 194, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 129, 348, 329, + 216, 218, 219, 55, 38, 121, 53, 77, 268, 56, + 111, 336, 22, 54, 121, 119, 266, 347, 77, 57, + 346, 176, 269, 259, 119, 7, 175, 35, 122, 260, + 160, 262, 117, 178, 75, 179, 318, 122, 270, 174, + 18, 19, 317, 267, 20, 316, 161, 187, 285, 286, + 76, 86, 289, 315, 261, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 10, + 186, 291, 13, 314, 181, 313, 24, 312, 30, 79, + 311, 31, 32, 310, 180, 182, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, - 120, 311, 310, 126, 85, 125, 83, 298, 299, 301, - 118, 303, 124, 36, 1, 292, 82, 123, 116, 304, - 305, 182, 274, 121, 80, 49, 48, 47, 46, 273, - 122, 181, 183, 277, 278, 276, 283, 285, 282, 284, - 279, 280, 281, 323, 324, 180, 131, 8, 104, 294, - 328, 37, 157, 333, 334, 335, 151, 332, 50, 45, - 339, 338, 341, 340, 77, 44, 153, 342, 343, 43, - 53, 76, 344, 55, 129, 88, 22, 54, 346, 171, - 172, 42, 130, 56, 182, 97, 98, 41, 351, 100, - 164, 103, 87, 40, 181, 183, 58, 151, 74, 9, - 9, 51, 191, 345, 18, 19, 272, 153, 20, 84, - 189, 224, 79, 119, 75, 155, 102, 57, 227, 60, - 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, - 71, 72, 73, 52, 115, 0, 13, 0, 0, 0, - 24, 0, 30, 0, 0, 31, 32, 38, 0, 53, - 76, 0, 55, 327, 0, 22, 54, 0, 0, 0, - 0, 0, 56, 0, 277, 278, 276, 283, 285, 282, - 284, 279, 280, 281, 0, 0, 0, 74, 0, 0, - 0, 0, 0, 18, 19, 0, 0, 20, 0, 0, - 0, 0, 0, 75, 0, 0, 0, 0, 60, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 0, 0, 0, 13, 0, 0, 0, 24, - 0, 30, 0, 0, 31, 32, 53, 76, 0, 55, - 0, 0, 22, 54, 0, 0, 0, 0, 0, 56, + 148, 309, 36, 127, 293, 126, 297, 298, 300, 296, + 302, 55, 222, 189, 295, 84, 221, 125, 303, 304, + 192, 54, 124, 292, 195, 83, 168, 159, 1, 294, + 227, 220, 50, 81, 163, 123, 49, 48, 78, 167, + 237, 55, 322, 323, 47, 46, 243, 132, 160, 327, + 321, 54, 332, 333, 334, 45, 331, 43, 130, 338, + 337, 340, 339, 170, 161, 59, 341, 342, 9, 9, + 42, 343, 8, 131, 239, 240, 37, 345, 241, 41, + 40, 51, 190, 344, 271, 85, 254, 350, 188, 228, + 230, 232, 233, 234, 242, 244, 247, 248, 249, 250, + 251, 255, 256, 223, 80, 229, 231, 235, 236, 238, + 245, 246, 120, 153, 55, 252, 253, 53, 77, 58, + 56, 226, 52, 22, 54, 116, 0, 0, 0, 0, + 57, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 227, 0, 0, 0, 0, 75, 0, 0, 0, 0, + 237, 18, 19, 0, 0, 20, 243, 0, 0, 0, + 224, 76, 0, 0, 0, 0, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 0, 0, 0, 13, 239, 240, 0, 24, 241, 30, + 0, 0, 31, 32, 0, 0, 254, 105, 0, 228, + 230, 232, 233, 234, 242, 244, 247, 248, 249, 250, + 251, 255, 256, 0, 0, 229, 231, 235, 236, 238, + 245, 246, 17, 77, 89, 252, 253, 0, 22, 0, + 0, 326, 0, 0, 98, 99, 0, 0, 101, 0, + 104, 88, 276, 277, 275, 282, 284, 281, 283, 278, + 279, 280, 17, 35, 0, 0, 18, 19, 22, 0, + 20, 0, 0, 0, 0, 103, 0, 0, 0, 0, + 0, 11, 12, 14, 15, 16, 21, 23, 25, 26, + 27, 28, 29, 33, 34, 0, 18, 19, 13, 0, + 20, 0, 24, 0, 30, 0, 0, 31, 32, 0, + 0, 11, 12, 14, 15, 16, 21, 23, 25, 26, + 27, 28, 29, 33, 34, 105, 0, 0, 13, 0, + 0, 0, 24, 173, 30, 0, 0, 31, 32, 0, + 0, 0, 0, 0, 105, 0, 0, 0, 0, 0, + 0, 87, 89, 90, 0, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 0, 101, 102, 104, 88, + 87, 89, 90, 0, 91, 92, 93, 94, 95, 96, + 97, 98, 99, 100, 273, 101, 102, 104, 88, 105, + 0, 272, 0, 103, 0, 276, 277, 275, 282, 284, + 281, 283, 278, 279, 280, 0, 0, 0, 105, 0, + 0, 0, 103, 0, 0, 87, 89, 90, 0, 91, + 92, 93, 0, 95, 96, 97, 98, 99, 100, 0, + 101, 102, 104, 88, 87, 89, 90, 0, 91, 92, + 0, 0, 95, 96, 0, 98, 99, 100, 0, 101, + 102, 104, 88, 0, 0, 0, 0, 103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 74, 0, 17, 76, 0, 0, - 18, 19, 22, 0, 20, 0, 0, 0, 0, 0, - 75, 0, 0, 0, 0, 60, 61, 62, 63, 64, - 65, 66, 67, 68, 69, 70, 71, 72, 73, 0, - 18, 19, 13, 0, 20, 0, 24, 0, 30, 0, - 0, 31, 32, 0, 0, 11, 12, 14, 15, 16, - 21, 23, 25, 26, 27, 28, 29, 33, 34, 17, - 35, 0, 13, 0, 0, 22, 24, 0, 30, 0, - 0, 31, 32, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 18, 19, 0, 0, 20, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 11, 12, - 14, 15, 16, 21, 23, 25, 26, 27, 28, 29, - 33, 34, 104, 0, 0, 13, 0, 0, 0, 24, - 174, 30, 0, 0, 31, 32, 0, 0, 0, 0, - 0, 104, 0, 0, 0, 0, 0, 0, 86, 88, - 89, 0, 90, 91, 92, 93, 94, 95, 96, 97, - 98, 99, 0, 100, 101, 103, 87, 86, 88, 89, - 0, 90, 91, 92, 93, 94, 95, 96, 97, 98, - 99, 0, 100, 101, 103, 87, 104, 0, 0, 0, - 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 104, 0, 0, 0, 102, - 0, 0, 86, 88, 89, 0, 90, 91, 92, 0, - 94, 95, 96, 97, 98, 99, 0, 100, 101, 103, - 87, 86, 88, 89, 0, 90, 91, 0, 0, 94, - 95, 0, 97, 98, 99, 0, 100, 101, 103, 87, - 0, 0, 0, 0, 102, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 102, + 0, 0, 0, 0, 0, 0, 103, } var yyPact = [...]int16{ - 17, 174, 567, 567, 397, 504, -1000, -1000, -1000, 147, + 16, 175, 500, 500, 154, 470, -1000, -1000, -1000, 174, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 264, -1000, 262, -1000, 647, + -1000, -1000, -1000, -1000, -1000, 273, -1000, 209, -1000, 580, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 29, 117, -1000, 474, -1000, 474, 120, -1000, + -1000, -1000, 22, 155, -1000, -1000, 365, -1000, 365, 165, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 258, -1000, -1000, 270, - -1000, -1000, 261, -1000, 26, -1000, -54, -54, -54, -54, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 172, -1000, -1000, + 275, -1000, -1000, 261, -1000, 24, -1000, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, - -54, -54, 115, 18, 348, 117, -57, -1000, 186, 186, - 318, -1000, 628, 112, -1000, 191, -1000, -1000, 238, 301, - -1000, -1000, -1000, 177, -1000, 171, -1000, 33, 474, -1000, - -59, -51, -1000, 474, 474, 474, 474, 474, 474, 474, - 474, 474, 474, 474, 474, 474, 474, 474, -1000, 118, - -1000, -1000, -1000, -1000, -1000, 114, -1000, -1000, -1000, -1000, - -1000, 71, 71, 110, -1000, -1000, -1000, -1000, 156, -1000, - -1000, 48, -1000, 647, -1000, -1000, 31, -1000, 170, -1000, - -1000, -1000, -1000, -1000, 162, -1000, -1000, -1000, -1000, -1000, - 16, 135, 130, -1000, -1000, -1000, 265, 294, 186, 186, - 186, 186, 112, 112, 125, 125, 125, 711, 692, 125, - 125, 711, 112, 112, 125, 112, 294, -1000, 32, -1000, - -1000, -1000, 307, -1000, 46, -1000, -1000, -1000, -1000, -1000, + -54, -54, -54, 37, 43, 292, 155, -57, -1000, 284, + 284, 7, -1000, 561, 35, -1000, 179, -1000, -1000, 191, + 80, -1000, -1000, -1000, 112, -1000, 205, -1000, 268, 365, + -1000, -50, -45, -1000, 365, 365, 365, 365, 365, 365, + 365, 365, 365, 365, 365, 365, 365, 365, 365, -1000, + 107, -1000, -1000, 125, -1000, -1000, -1000, -1000, -1000, -1000, + 99, 99, 270, -1000, -1000, -1000, -1000, 398, -1000, -1000, + 105, -1000, 580, -1000, -1000, 163, -1000, 189, -1000, -1000, + -1000, -1000, -1000, 86, -1000, -1000, -1000, -1000, -1000, 18, + 150, 142, -1000, -1000, -1000, 617, 443, 284, 284, 284, + 284, 35, 35, 46, 46, 46, 644, 625, 46, 46, + 644, 35, 35, 46, 35, 443, -1000, 33, -1000, -1000, + -1000, 262, -1000, 267, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 474, - -1000, -1000, -1000, -1000, -1000, -1000, 80, 80, 15, 80, - 105, 105, 98, 86, -1000, -1000, 256, 255, 235, 225, - 210, 206, 202, 194, 193, 179, -1000, -1000, -1000, -1000, - -1000, -1000, 19, 190, -1000, -1000, 14, -1000, 647, -1000, - -1000, -1000, 80, -1000, 9, 8, 396, -1000, -1000, -1000, - 11, 6, 71, 71, 71, 116, 116, 11, 116, 11, - -1000, -1000, -1000, -1000, -1000, 80, 80, -1000, -1000, -1000, - 80, -1000, -1000, -1000, -1000, -1000, -1000, 71, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 45, -1000, 38, -1000, -1000, - -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 365, -1000, + -1000, -1000, -1000, -1000, -1000, 119, 119, 15, 119, 89, + 89, 41, 42, -1000, -1000, 255, 237, 234, 231, 229, + 227, 207, 199, 196, 190, -1000, -1000, -1000, -1000, -1000, + -1000, 40, -1000, -1000, -1000, 288, -1000, 580, -1000, -1000, + -1000, 119, -1000, 14, 12, 474, -1000, -1000, -1000, 103, + 11, 99, 99, 99, 157, 157, 103, 157, 103, -1000, + -1000, -1000, -1000, -1000, 119, 119, -1000, -1000, -1000, 119, + -1000, -1000, -1000, -1000, -1000, -1000, 99, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 156, -1000, 31, -1000, -1000, -1000, + -1000, } var yyPgo = [...]int16{ - 0, 394, 13, 393, 6, 15, 378, 356, 377, 375, - 373, 205, 307, 372, 14, 371, 10, 11, 370, 369, - 8, 366, 3, 4, 363, 2, 1, 0, 362, 12, - 5, 361, 353, 18, 157, 347, 342, 7, 341, 339, - 17, 334, 39, 329, 325, 319, 306, 288, 287, 286, - 285, 318, 9, 275, 274, 273, + 0, 385, 13, 382, 6, 15, 381, 325, 379, 373, + 372, 229, 332, 364, 14, 363, 10, 11, 348, 345, + 8, 344, 3, 4, 343, 2, 1, 0, 342, 12, + 5, 341, 340, 17, 157, 339, 333, 7, 330, 323, + 28, 318, 36, 317, 9, 315, 307, 305, 304, 297, + 296, 292, 288, 262, } var yyR1 = [...]int8{ - 0, 54, 54, 54, 54, 54, 54, 54, 37, 37, + 0, 52, 52, 52, 52, 52, 52, 52, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 32, 32, 32, 32, 33, 33, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, @@ -538,22 +526,22 @@ var yyR1 = [...]int8{ 41, 16, 16, 16, 16, 15, 15, 15, 4, 4, 38, 40, 40, 39, 39, 39, 47, 45, 45, 45, 31, 31, 31, 9, 9, 43, 49, 49, 49, 49, - 49, 50, 51, 51, 51, 42, 42, 42, 1, 1, - 1, 2, 2, 2, 2, 2, 2, 2, 12, 12, + 49, 49, 50, 51, 51, 51, 42, 42, 42, 1, + 1, 1, 2, 2, 2, 2, 2, 2, 2, 12, + 12, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 11, 11, 11, 11, 13, 13, - 13, 14, 14, 14, 14, 55, 19, 19, 19, 19, - 18, 18, 18, 18, 18, 18, 18, 18, 18, 28, - 28, 28, 20, 20, 20, 20, 21, 21, 21, 22, - 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, - 23, 24, 24, 24, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 6, 6, + 7, 7, 7, 7, 7, 11, 11, 11, 11, 13, + 13, 13, 14, 14, 14, 14, 53, 19, 19, 19, + 19, 18, 18, 18, 18, 18, 18, 18, 18, 18, + 28, 28, 28, 20, 20, 20, 20, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 24, 24, 24, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 8, 8, 5, - 5, 5, 5, 44, 27, 29, 29, 30, 30, 26, - 25, 25, 52, 52, 48, 10, 53, 53, 17, 17, + 6, 6, 6, 6, 6, 6, 6, 6, 8, 8, + 5, 5, 5, 5, 44, 44, 27, 29, 29, 30, + 30, 26, 25, 25, 48, 10, 17, 17, } var yyR2 = [...]int8{ @@ -564,101 +552,101 @@ var yyR2 = [...]int8{ 4, 4, 1, 0, 1, 3, 3, 1, 1, 3, 3, 3, 4, 2, 1, 3, 1, 2, 1, 1, 2, 3, 2, 3, 1, 2, 3, 3, 4, 3, - 3, 5, 3, 1, 1, 4, 6, 6, 5, 4, - 3, 2, 2, 1, 1, 3, 4, 2, 3, 1, - 2, 3, 3, 1, 3, 3, 2, 1, 2, 1, + 3, 5, 3, 1, 1, 4, 6, 5, 6, 5, + 4, 3, 2, 2, 1, 1, 3, 4, 2, 3, + 1, 2, 3, 3, 1, 3, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 3, 4, 2, 0, 3, 1, - 2, 3, 3, 2, 1, 2, 0, 3, 2, 1, - 1, 3, 1, 3, 4, 1, 3, 5, 5, 1, - 1, 1, 4, 3, 3, 2, 3, 1, 2, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, - 3, 3, 1, 2, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 3, 4, 2, 0, 3, + 1, 2, 3, 3, 2, 1, 2, 0, 3, 2, + 1, 1, 3, 1, 3, 4, 1, 3, 5, 5, + 1, 1, 1, 4, 3, 3, 2, 3, 1, 2, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 3, 3, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, - 2, 1, 1, 1, 1, 1, 0, 1, 0, 1, + 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, + 1, 1, 2, 1, 1, 1, 0, 1, } var yyChk = [...]int16{ - -1000, -54, 91, 92, 93, 94, 2, 10, -12, -7, + -1000, -52, 91, 92, 93, 94, 2, 10, -12, -7, -11, 61, 62, 78, 63, 64, 65, 12, 46, 47, 50, 66, 18, 67, 82, 68, 69, 70, 71, 72, - 84, 87, 88, 73, 74, 13, -55, -12, 10, -37, + 84, 87, 88, 73, 74, 13, -53, -12, 10, -37, -32, -35, -38, -43, -44, -45, -47, -48, -49, -50, - -51, -31, -3, 12, 19, 15, 25, -8, -7, -42, - 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, - 71, 72, 73, 74, 40, 56, 13, -51, -11, -13, - 20, -14, 12, 2, -19, 2, 40, 58, 41, 42, - 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 55, 56, 82, 57, 14, -33, -40, 2, 78, 84, - 15, -40, -37, -37, -42, -1, 20, -2, 12, -10, - 2, 25, 20, 7, 2, 4, 2, 24, -34, -41, - -36, -46, 77, -34, -34, -34, -34, -34, -34, -34, - -34, -34, -34, -34, -34, -34, -34, -34, -52, 56, - 2, 9, -27, 19, -30, -9, 2, -27, -29, 87, - 88, 40, 56, -52, 2, -40, -33, -16, 15, 2, - -16, -39, 22, -37, 22, 20, 7, 2, -5, 2, - 4, 53, 43, 54, -5, 20, -14, 25, 2, -18, - 5, -28, -20, 12, -27, -29, 16, -37, 81, 83, - 79, 80, -37, -37, -37, -37, -37, -37, -37, -37, - -37, -37, -37, -37, -37, -37, -37, -52, 15, -27, - -27, 21, 6, 2, -15, 22, -4, -6, 2, 61, - 77, 62, 78, 63, 64, 65, 79, 80, 12, 81, - 46, 47, 50, 66, 18, 67, 82, 83, 68, 69, - 70, 71, 72, 87, 88, 58, 73, 74, 22, 7, - 20, -2, 25, 2, 25, 2, 26, 26, -29, 26, - 40, 56, -21, 24, 17, -22, 30, 28, 29, 35, - 36, 37, 33, 31, 34, 32, -16, -16, -17, -16, - -17, 22, -53, -52, 2, 22, 7, 2, -37, -26, - 19, -26, 26, -26, -20, -20, 24, 17, 2, 17, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 21, 2, 22, -4, -26, 26, 26, 17, -22, -25, - 56, -26, -30, -27, -27, -27, -23, 14, -23, -25, - -23, -25, -26, -26, -26, -24, -27, 24, 21, 2, - 21, -27, + -51, -31, -3, 12, 19, 9, 15, 25, -8, -7, + -42, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 40, 56, 13, -51, -11, + -13, 20, -14, 12, 2, -19, 2, 40, 58, 41, + 42, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 55, 56, 82, 57, 14, -33, -40, 2, 78, + 84, 15, -40, -37, -37, -42, -1, 20, -2, 12, + -10, 2, 25, 20, 7, 2, 4, 2, 24, -34, + -41, -36, -46, 77, -34, -34, -34, -34, -34, -34, + -34, -34, -34, -34, -34, -34, -34, -34, -34, -44, + 56, 2, -30, -9, 2, -27, -29, 87, 88, 19, + 40, 56, -44, 2, -40, -33, -16, 15, 2, -16, + -39, 22, -37, 22, 20, 7, 2, -5, 2, 4, + 53, 43, 54, -5, 20, -14, 25, 2, -18, 5, + -28, -20, 12, -27, -29, 16, -37, 81, 83, 79, + 80, -37, -37, -37, -37, -37, -37, -37, -37, -37, + -37, -37, -37, -37, -37, -37, -44, 15, -27, -27, + 21, 6, 2, -15, 22, -4, -6, 2, 61, 77, + 62, 78, 63, 64, 65, 79, 80, 12, 81, 46, + 47, 50, 66, 18, 67, 82, 83, 68, 69, 70, + 71, 72, 87, 88, 58, 73, 74, 22, 7, 20, + -2, 25, 2, 25, 2, 26, 26, -29, 26, 40, + 56, -21, 24, 17, -22, 30, 28, 29, 35, 36, + 37, 33, 31, 34, 32, -16, -16, -17, -16, -17, + 22, -44, 21, 2, 22, 7, 2, -37, -26, 19, + -26, 26, -26, -20, -20, 24, 17, 2, 17, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 21, + 2, 22, -4, -26, 26, 26, 17, -22, -25, 56, + -26, -30, -27, -27, -27, -23, 14, -23, -25, -23, + -25, -26, -26, -26, -24, -27, 24, 21, 2, 21, + -27, } var yyDef = [...]int16{ - 0, -2, 127, 127, 0, 0, 7, 6, 1, 127, - 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, - 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, - 119, 120, 121, 122, 123, 0, 2, -2, 3, 4, + 0, -2, 128, 128, 0, 0, 7, 6, 1, 128, + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 0, 2, -2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 0, 106, 223, 0, 234, 0, 83, 84, - -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - -2, -2, -2, -2, 217, 218, 0, 5, 98, 0, - 126, 129, 0, 134, 135, 139, 43, 43, 43, 43, + 18, 19, 0, 107, 224, 225, 0, 234, 0, 84, + 85, -2, -2, -2, -2, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, 218, 219, 0, 5, 99, + 0, 127, 130, 0, 135, 136, 140, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, - 43, 43, 0, 0, 0, 0, 22, 23, 0, 0, - 0, 60, 0, 81, 82, 0, 87, 89, 0, 93, - 97, 235, 124, 0, 130, 0, 133, 138, 0, 42, - 47, 48, 44, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 67, 0, - 69, 232, 233, 224, 70, 0, 72, 227, 228, 73, - 74, 0, 0, 0, 80, 20, 21, 24, 0, 54, - 25, 0, 62, 64, 66, 85, 0, 90, 0, 96, - 219, 220, 221, 222, 0, 125, 128, 131, 132, 137, - 140, 142, 145, 149, 150, 151, 0, 26, 0, 0, - -2, -2, 27, 28, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 68, 0, 225, - 226, 75, -2, 79, 0, 53, 56, 58, 59, 188, - 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, - 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, - 209, 210, 211, 212, 213, 214, 215, 216, 61, 65, - 86, 88, 91, 95, 92, 94, 0, 0, 0, 0, - 0, 0, 0, 0, 155, 157, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 45, 46, 49, 239, - 50, 71, 0, -2, 78, 51, 0, 57, 63, 141, - 229, 143, 0, 146, 0, 0, 0, 153, 158, 154, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 76, 77, 52, 55, 144, 0, 0, 152, 156, 159, - 0, 231, 160, 161, 162, 163, 164, 0, 165, 166, - 167, 168, 147, 148, 230, 0, 172, 0, 170, 173, - 169, 171, + 43, 43, 43, 0, 0, 0, 0, 22, 23, 0, + 0, 0, 60, 0, 82, 83, 0, 88, 90, 0, + 94, 98, 235, 125, 0, 131, 0, 134, 139, 0, + 42, 47, 48, 44, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 67, + 0, 69, 70, 0, 72, 229, 230, 73, 74, 226, + 0, 0, 0, 81, 20, 21, 24, 0, 54, 25, + 0, 62, 64, 66, 86, 0, 91, 0, 97, 220, + 221, 222, 223, 0, 126, 129, 132, 133, 138, 141, + 143, 146, 150, 151, 152, 0, 26, 0, 0, -2, + -2, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 68, 0, 227, 228, + 75, 0, 80, 0, 53, 56, 58, 59, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 61, 65, 87, + 89, 92, 96, 93, 95, 0, 0, 0, 0, 0, + 0, 0, 0, 156, 158, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 45, 46, 49, 237, 50, + 71, 0, 77, 79, 51, 0, 57, 63, 142, 231, + 144, 0, 147, 0, 0, 0, 154, 159, 155, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 76, + 78, 52, 55, 145, 0, 0, 153, 157, 160, 0, + 233, 161, 162, 163, 164, 165, 0, 166, 167, 168, + 169, 148, 149, 232, 0, 173, 0, 171, 174, 170, + 172, } var yyTok1 = [...]int8{ @@ -1304,19 +1292,23 @@ yydefault: case 67: yyDollar = yyS[yypt-3 : yypt+1] { - yylex.(*parser).addOffset(yyDollar[1].node, yyDollar[3].duration) + numLit, _ := yyDollar[3].node.(*NumberLiteral) + dur := time.Duration(numLit.Val*1000) * time.Millisecond + yylex.(*parser).addOffset(yyDollar[1].node, dur) yyVAL.node = yyDollar[1].node } case 68: yyDollar = yyS[yypt-4 : yypt+1] { - yylex.(*parser).addOffset(yyDollar[1].node, -yyDollar[4].duration) + numLit, _ := yyDollar[4].node.(*NumberLiteral) + dur := time.Duration(numLit.Val*1000) * time.Millisecond + yylex.(*parser).addOffset(yyDollar[1].node, -dur) yyVAL.node = yyDollar[1].node } case 69: yyDollar = yyS[yypt-3 : yypt+1] { - yylex.(*parser).unexpected("offset", "duration") + yylex.(*parser).unexpected("offset", "integer or duration") yyVAL.node = yyDollar[1].node } case 70: @@ -1355,48 +1347,61 @@ yydefault: yylex.(*parser).addParseErrf(errRange, errMsg) } + numLit, _ := yyDollar[3].node.(*NumberLiteral) yyVAL.node = &MatrixSelector{ VectorSelector: yyDollar[1].node.(Expr), - Range: yyDollar[3].duration, + Range: time.Duration(numLit.Val*1000) * time.Millisecond, EndPos: yylex.(*parser).lastClosing, } } case 76: yyDollar = yyS[yypt-6 : yypt+1] { + numLitRange, _ := yyDollar[3].node.(*NumberLiteral) + numLitStep, _ := yyDollar[5].node.(*NumberLiteral) yyVAL.node = &SubqueryExpr{ - Expr: yyDollar[1].node.(Expr), - Range: yyDollar[3].duration, - Step: yyDollar[5].duration, - + Expr: yyDollar[1].node.(Expr), + Range: time.Duration(numLitRange.Val*1000) * time.Millisecond, + Step: time.Duration(numLitStep.Val*1000) * time.Millisecond, EndPos: yyDollar[6].item.Pos + 1, } } case 77: + yyDollar = yyS[yypt-5 : yypt+1] + { + numLitRange, _ := yyDollar[3].node.(*NumberLiteral) + yyVAL.node = &SubqueryExpr{ + Expr: yyDollar[1].node.(Expr), + Range: time.Duration(numLitRange.Val*1000) * time.Millisecond, + Step: 0, + EndPos: yyDollar[5].item.Pos + 1, + } + } + case 78: yyDollar = yyS[yypt-6 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "\"]\"") yyVAL.node = yyDollar[1].node } - case 78: + case 79: yyDollar = yyS[yypt-5 : yypt+1] { - yylex.(*parser).unexpected("subquery selector", "duration or \"]\"") + yylex.(*parser).unexpected("subquery selector", "number/duration or \"]\"") yyVAL.node = yyDollar[1].node } - case 79: + case 80: yyDollar = yyS[yypt-4 : yypt+1] { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"") yyVAL.node = yyDollar[1].node } - case 80: + case 81: yyDollar = yyS[yypt-3 : yypt+1] { - yylex.(*parser).unexpected("subquery selector", "duration") + yylex.(*parser).unexpected("subquery selector", "number/duration") yyVAL.node = yyDollar[1].node } - case 81: + case 82: yyDollar = yyS[yypt-2 : yypt+1] { if nl, ok := yyDollar[2].node.(*NumberLiteral); ok { @@ -1409,7 +1414,7 @@ yydefault: yyVAL.node = &UnaryExpr{Op: yyDollar[1].item.Typ, Expr: yyDollar[2].node.(Expr), StartPos: yyDollar[1].item.Pos} } } - case 82: + case 83: yyDollar = yyS[yypt-2 : yypt+1] { vs := yyDollar[2].node.(*VectorSelector) @@ -1418,7 +1423,7 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 83: + case 84: yyDollar = yyS[yypt-1 : yypt+1] { vs := &VectorSelector{ @@ -1429,14 +1434,14 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 84: + case 85: yyDollar = yyS[yypt-1 : yypt+1] { vs := yyDollar[1].node.(*VectorSelector) yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 85: + case 86: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1444,7 +1449,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item), } } - case 86: + case 87: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1452,7 +1457,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[4].item), } } - case 87: + case 88: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1460,7 +1465,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[2].item), } } - case 88: + case 89: yyDollar = yyS[yypt-3 : yypt+1] { if yyDollar[1].matchers != nil { @@ -1469,38 +1474,32 @@ yydefault: yyVAL.matchers = yyDollar[1].matchers } } - case 89: + case 90: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher} } - case 90: + case 91: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label matching", "\",\" or \"}\"") yyVAL.matchers = yyDollar[1].matchers } - case 91: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) - } case 92: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) } case 93: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) + } + case 94: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.matcher = yylex.(*parser).newMetricNameMatcher(yyDollar[1].item) } - case 94: - yyDollar = yyS[yypt-3 : yypt+1] - { - yylex.(*parser).unexpected("label matching", "string") - yyVAL.matcher = nil - } case 95: yyDollar = yyS[yypt-3 : yypt+1] { @@ -1508,89 +1507,95 @@ yydefault: yyVAL.matcher = nil } case 96: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).unexpected("label matching", "string") + yyVAL.matcher = nil + } + case 97: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label matching", "label matching operator") yyVAL.matcher = nil } - case 97: + case 98: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label matching", "identifier or \"}\"") yyVAL.matcher = nil } - case 98: + case 99: yyDollar = yyS[yypt-2 : yypt+1] { b := labels.NewBuilder(yyDollar[2].labels) b.Set(labels.MetricName, yyDollar[1].item.Val) yyVAL.labels = b.Labels() } - case 99: + case 100: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.labels = yyDollar[1].labels } - case 124: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.labels = labels.New(yyDollar[2].lblList...) - } case 125: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.labels = labels.New(yyDollar[2].lblList...) } case 126: + yyDollar = yyS[yypt-4 : yypt+1] + { + yyVAL.labels = labels.New(yyDollar[2].lblList...) + } + case 127: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.labels = labels.New() } - case 127: + case 128: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.labels = labels.New() } - case 128: + case 129: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label) } - case 129: + case 130: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.lblList = []labels.Label{yyDollar[1].label} } - case 130: + case 131: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\",\" or \"}\"") yyVAL.lblList = yyDollar[1].lblList } - case 131: + case 132: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } - case 132: + case 133: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 133: + case 134: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } - case 134: + case 135: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } - case 135: + case 136: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).generatedParserResult = &seriesDescription{ @@ -1598,33 +1603,33 @@ yydefault: values: yyDollar[2].series, } } - case 136: + case 137: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.series = []SequenceValue{} } - case 137: + case 138: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } - case 138: + case 139: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.series = yyDollar[1].series } - case 139: + case 140: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } - case 140: + case 141: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Omitted: true}} } - case 141: + case 142: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1632,12 +1637,12 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true}) } } - case 142: + case 143: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } - case 143: + case 144: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1646,7 +1651,7 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float}) } } - case 144: + case 145: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1656,12 +1661,12 @@ yydefault: yyDollar[1].float += yyDollar[2].float } } - case 145: + case 146: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}} } - case 146: + case 147: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1671,7 +1676,7 @@ yydefault: //$1 += $2 } } - case 147: + case 148: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1680,7 +1685,7 @@ yydefault: } yyVAL.series = val } - case 148: + case 149: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1689,7 +1694,7 @@ yydefault: } yyVAL.series = val } - case 149: + case 150: yyDollar = yyS[yypt-1 : yypt+1] { if yyDollar[1].item.Val != "stale" { @@ -1697,124 +1702,124 @@ yydefault: } yyVAL.float = math.Float64frombits(value.StaleNaN) } - case 152: - yyDollar = yyS[yypt-4 : yypt+1] - { - yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) - } case 153: - yyDollar = yyS[yypt-3 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } case 154: yyDollar = yyS[yypt-3 : yypt+1] { - m := yylex.(*parser).newMap() - yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) + yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } case 155: - yyDollar = yyS[yypt-2 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } case 156: + yyDollar = yyS[yypt-2 : yypt+1] + { + m := yylex.(*parser).newMap() + yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) + } + case 157: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors)) } - case 157: + case 158: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.descriptors = yyDollar[1].descriptors } - case 158: + case 159: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]") } - case 159: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["schema"] = yyDollar[3].int - } case 160: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["sum"] = yyDollar[3].float + yyVAL.descriptors["schema"] = yyDollar[3].int } case 161: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["count"] = yyDollar[3].float + yyVAL.descriptors["sum"] = yyDollar[3].float } case 162: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["z_bucket"] = yyDollar[3].float + yyVAL.descriptors["count"] = yyDollar[3].float } case 163: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float + yyVAL.descriptors["z_bucket"] = yyDollar[3].float } case 164: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set + yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float } case 165: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set } case 166: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["offset"] = yyDollar[3].int + yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set } case 167: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["offset"] = yyDollar[3].int } case 168: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_offset"] = yyDollar[3].int + yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set } case 169: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = yyDollar[2].bucket_set + yyVAL.descriptors = yylex.(*parser).newMap() + yyVAL.descriptors["n_offset"] = yyDollar[3].int } case 170: - yyDollar = yyS[yypt-3 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.bucket_set = yyDollar[2].bucket_set } case 171: yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) + yyVAL.bucket_set = yyDollar[2].bucket_set } case 172: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) + } + case 173: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.bucket_set = []float64{yyDollar[1].float} } - case 223: + case 224: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &NumberLiteral{ @@ -1822,22 +1827,36 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 224: + case 225: + yyDollar = yyS[yypt-1 : yypt+1] + { + var err error + var dur time.Duration + dur, err = parseDuration(yyDollar[1].item.Val) + if err != nil { + yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err) + } + yyVAL.node = &NumberLiteral{ + Val: dur.Seconds(), + PosRange: yyDollar[1].item.PositionRange(), + } + } + case 226: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 225: + case 227: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = yyDollar[2].float } - case 226: + case 228: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = -yyDollar[2].float } - case 229: + case 231: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1846,29 +1865,15 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 230: + case 232: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.int = -int64(yyDollar[2].uint) } - case 231: - yyDollar = yyS[yypt-1 : yypt+1] - { - yyVAL.int = int64(yyDollar[1].uint) - } - case 232: - yyDollar = yyS[yypt-1 : yypt+1] - { - var err error - yyVAL.duration, err = parseDuration(yyDollar[1].item.Val) - if err != nil { - yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err) - } - } case 233: yyDollar = yyS[yypt-1 : yypt+1] { - yyVAL.duration = yylex.(*parser).parseNumberLiteral(yyDollar[1].float) + yyVAL.int = int64(yyDollar[1].uint) } case 234: yyDollar = yyS[yypt-1 : yypt+1] @@ -1888,11 +1893,6 @@ yydefault: } } case 236: - yyDollar = yyS[yypt-0 : yypt+1] - { - yyVAL.duration = 0 - } - case 238: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.strings = nil diff --git a/promql/parser/parse.go b/promql/parser/parse.go index c614c7ad6..6f73e2427 100644 --- a/promql/parser/parse.go +++ b/promql/parser/parse.go @@ -849,10 +849,6 @@ func parseDuration(ds string) (time.Duration, error) { return time.Duration(dur), nil } -func (p *parser) parseNumberLiteral(ts float64) time.Duration { - return time.Duration(ts * float64(time.Second)) -} - // parseGenerated invokes the yacc generated parser. // The generated parser gets the provided startSymbol injected into // the lexer stream, based on which grammar will be used. diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 8f9a116b9..8ef5775a6 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -2152,6 +2152,96 @@ var testExpr = []struct { EndPos: 27, }, }, + { + input: `foo[3ms] @ 2.345`, + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "foo", + Timestamp: makeInt64Pointer(2345), + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"), + }, + PosRange: posrange.PositionRange{ + Start: 0, + End: 3, + }, + }, + Range: 3 * time.Millisecond, + EndPos: 16, + }, + }, + { + input: `foo[4s180ms] @ 2.345`, + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "foo", + Timestamp: makeInt64Pointer(2345), + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"), + }, + PosRange: posrange.PositionRange{ + Start: 0, + End: 3, + }, + }, + Range: 4*time.Second + 180*time.Millisecond, + EndPos: 20, + }, + }, + { + input: `foo[4.18] @ 2.345`, + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "foo", + Timestamp: makeInt64Pointer(2345), + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"), + }, + PosRange: posrange.PositionRange{ + Start: 0, + End: 3, + }, + }, + Range: 4*time.Second + 180*time.Millisecond, + EndPos: 17, + }, + }, + { + input: `foo[4s18ms] @ 2.345`, + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "foo", + Timestamp: makeInt64Pointer(2345), + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"), + }, + PosRange: posrange.PositionRange{ + Start: 0, + End: 3, + }, + }, + Range: 4*time.Second + 18*time.Millisecond, + EndPos: 19, + }, + }, + { + input: `foo[4.018] @ 2.345`, + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "foo", + Timestamp: makeInt64Pointer(2345), + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"), + }, + PosRange: posrange.PositionRange{ + Start: 0, + End: 3, + }, + }, + Range: 4*time.Second + 18*time.Millisecond, + EndPos: 18, + }, + }, { input: `test{a="b"}[5y] @ 1603774699`, expected: &MatrixSelector{ @@ -2245,7 +2335,7 @@ var testExpr = []struct { { input: `some_metric[5m] OFFSET`, fail: true, - errMsg: "unexpected end of input in offset, expected duration", + errMsg: "unexpected end of input in offset, expected integer or duration", }, { input: `some_metric OFFSET 1m[5m]`, diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index ca02b337f..b8b36d91e 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -244,10 +244,16 @@ eval instant at 50m deriv(testcounter_reset_middle[100m]) eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600) {} 76.81818181818181 +eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h) + {} 76.81818181818181 + # intercept at t = 3000+3600 = 6600 eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) {} 76.81818181818181 +eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 1h) + {} 76.81818181818181 + # intercept at t = 600+3600 = 4200 eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) {} 51.36363636363637 From bd4ea118e9bdc41bb79c1c1967d7674389a63987 Mon Sep 17 00:00:00 2001 From: darshanime Date: Fri, 15 Mar 2024 00:18:51 +0530 Subject: [PATCH 14/81] Allow durations for number rule Signed-off-by: darshanime --- promql/parser/generated_parser.y | 22 +- promql/parser/generated_parser.y.go | 463 ++++++++++---------- promql/parser/lex.go | 2 +- promql/parser/parse_test.go | 30 +- promql/promqltest/testdata/at_modifier.test | 8 + 5 files changed, 288 insertions(+), 237 deletions(-) diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index 940526845..b99e67424 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -428,7 +428,7 @@ offset_expr: expr OFFSET number_duration_literal $$ = $1 } | expr OFFSET error - { yylex.(*parser).unexpected("offset", "integer or duration"); $$ = $1 } + { yylex.(*parser).unexpected("offset", "number or duration"); $$ = $1 } ; /* * @ modifiers. @@ -504,11 +504,11 @@ subquery_expr : expr LEFT_BRACKET number_duration_literal COLON number_duratio | expr LEFT_BRACKET number_duration_literal COLON number_duration_literal error { yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 } | expr LEFT_BRACKET number_duration_literal COLON error - { yylex.(*parser).unexpected("subquery selector", "number/duration or \"]\""); $$ = $1 } + { yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\""); $$ = $1 } | expr LEFT_BRACKET number_duration_literal error { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 } | expr LEFT_BRACKET error - { yylex.(*parser).unexpected("subquery selector", "number/duration"); $$ = $1 } + { yylex.(*parser).unexpected("subquery selector", "number or duration"); $$ = $1 } ; /* @@ -902,7 +902,21 @@ number_duration_literal : NUMBER } ; -number : NUMBER { $$ = yylex.(*parser).number($1.Val) } ; +number : NUMBER + { + $$ = yylex.(*parser).number($1.Val) + } + | DURATION + { + var err error + var dur time.Duration + dur, err = parseDuration($1.Val) + if err != nil { + yylex.(*parser).addParseErr($1.PositionRange(), err) + } + $$ = dur.Seconds() + } + ; signed_number : ADD number { $$ = $2 } | SUB number { $$ = -$2 } diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 8b8d9f400..423082daf 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -323,198 +323,198 @@ var yyExca = [...]int16{ 78, 188, 84, 188, -2, 124, - -1, 199, - 9, 236, - 12, 236, - 13, 236, - 18, 236, - 19, 236, - 25, 236, - 40, 236, - 46, 236, - 47, 236, - 50, 236, - 56, 236, - 61, 236, - 62, 236, - 63, 236, - 64, 236, - 65, 236, - 66, 236, - 67, 236, - 68, 236, - 69, 236, - 70, 236, - 71, 236, - 72, 236, - 73, 236, - 74, 236, - 78, 236, - 82, 236, - 84, 236, - 87, 236, - 88, 236, - -2, 0, -1, 200, - 9, 236, - 12, 236, - 13, 236, - 18, 236, - 19, 236, - 25, 236, - 40, 236, - 46, 236, - 47, 236, - 50, 236, - 56, 236, - 61, 236, - 62, 236, - 63, 236, - 64, 236, - 65, 236, - 66, 236, - 67, 236, - 68, 236, - 69, 236, - 70, 236, - 71, 236, - 72, 236, - 73, 236, - 74, 236, - 78, 236, - 82, 236, - 84, 236, - 87, 236, - 88, 236, + 9, 237, + 12, 237, + 13, 237, + 18, 237, + 19, 237, + 25, 237, + 40, 237, + 46, 237, + 47, 237, + 50, 237, + 56, 237, + 61, 237, + 62, 237, + 63, 237, + 64, 237, + 65, 237, + 66, 237, + 67, 237, + 68, 237, + 69, 237, + 70, 237, + 71, 237, + 72, 237, + 73, 237, + 74, 237, + 78, 237, + 82, 237, + 84, 237, + 87, 237, + 88, 237, + -2, 0, + -1, 201, + 9, 237, + 12, 237, + 13, 237, + 18, 237, + 19, 237, + 25, 237, + 40, 237, + 46, 237, + 47, 237, + 50, 237, + 56, 237, + 61, 237, + 62, 237, + 63, 237, + 64, 237, + 65, 237, + 66, 237, + 67, 237, + 68, 237, + 69, 237, + 70, 237, + 71, 237, + 72, 237, + 73, 237, + 74, 237, + 78, 237, + 82, 237, + 84, 237, + 87, 237, + 88, 237, -2, 0, } const yyPrivate = 57344 -const yyLast = 727 +const yyLast = 728 var yyAct = [...]int16{ - 155, 330, 328, 274, 335, 152, 225, 39, 191, 44, - 288, 287, 156, 118, 82, 177, 55, 106, 6, 53, - 77, 109, 56, 133, 108, 22, 54, 110, 107, 171, - 159, 197, 57, 198, 199, 200, 60, 111, 325, 151, - 324, 301, 320, 307, 265, 154, 55, 75, 128, 105, - 159, 160, 349, 18, 19, 290, 54, 20, 306, 308, - 105, 319, 159, 76, 113, 305, 114, 161, 61, 62, + 155, 331, 329, 275, 336, 152, 226, 39, 192, 44, + 289, 288, 156, 118, 82, 178, 55, 106, 6, 53, + 77, 109, 56, 133, 108, 22, 54, 110, 107, 172, + 300, 198, 57, 199, 200, 201, 60, 111, 326, 151, + 325, 302, 321, 308, 266, 154, 55, 75, 128, 105, + 291, 300, 160, 18, 19, 309, 54, 20, 307, 218, + 105, 320, 159, 76, 113, 306, 114, 330, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, - 73, 74, 112, 160, 179, 13, 87, 89, 264, 24, - 101, 30, 104, 150, 31, 32, 115, 98, 99, 161, - 109, 101, 102, 104, 88, 195, 110, 2, 3, 4, - 5, 263, 258, 149, 84, 162, 55, 103, 159, 172, - 166, 169, 299, 181, 83, 165, 54, 257, 103, 193, - 157, 158, 184, 180, 182, 164, 183, 196, 299, 185, - 217, 194, 201, 202, 203, 204, 205, 206, 207, 208, - 209, 210, 211, 212, 213, 214, 215, 129, 348, 329, - 216, 218, 219, 55, 38, 121, 53, 77, 268, 56, - 111, 336, 22, 54, 121, 119, 266, 347, 77, 57, - 346, 176, 269, 259, 119, 7, 175, 35, 122, 260, - 160, 262, 117, 178, 75, 179, 318, 122, 270, 174, - 18, 19, 317, 267, 20, 316, 161, 187, 285, 286, - 76, 86, 289, 315, 261, 61, 62, 63, 64, 65, - 66, 67, 68, 69, 70, 71, 72, 73, 74, 10, - 186, 291, 13, 314, 181, 313, 24, 312, 30, 79, - 311, 31, 32, 310, 180, 182, 134, 135, 136, 137, - 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, - 148, 309, 36, 127, 293, 126, 297, 298, 300, 296, - 302, 55, 222, 189, 295, 84, 221, 125, 303, 304, - 192, 54, 124, 292, 195, 83, 168, 159, 1, 294, - 227, 220, 50, 81, 163, 123, 49, 48, 78, 167, - 237, 55, 322, 323, 47, 46, 243, 132, 160, 327, - 321, 54, 332, 333, 334, 45, 331, 43, 130, 338, - 337, 340, 339, 170, 161, 59, 341, 342, 9, 9, - 42, 343, 8, 131, 239, 240, 37, 345, 241, 41, - 40, 51, 190, 344, 271, 85, 254, 350, 188, 228, - 230, 232, 233, 234, 242, 244, 247, 248, 249, 250, - 251, 255, 256, 223, 80, 229, 231, 235, 236, 238, - 245, 246, 120, 153, 55, 252, 253, 53, 77, 58, - 56, 226, 52, 22, 54, 116, 0, 0, 0, 0, - 57, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 227, 0, 0, 0, 0, 75, 0, 0, 0, 0, - 237, 18, 19, 0, 0, 20, 243, 0, 0, 0, - 224, 76, 0, 0, 0, 0, 61, 62, 63, 64, + 73, 74, 112, 161, 180, 13, 87, 89, 265, 24, + 101, 30, 104, 150, 31, 32, 115, 98, 99, 162, + 109, 101, 102, 104, 88, 349, 110, 2, 3, 4, + 5, 264, 196, 149, 111, 163, 160, 103, 337, 173, + 167, 170, 84, 182, 348, 166, 159, 347, 103, 194, + 157, 158, 83, 181, 183, 165, 184, 197, 77, 186, + 185, 195, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 129, 269, 263, + 217, 160, 219, 220, 55, 38, 35, 53, 77, 267, + 56, 159, 270, 22, 54, 121, 297, 188, 7, 259, + 57, 296, 262, 161, 319, 119, 318, 317, 271, 179, + 261, 180, 161, 260, 258, 75, 295, 84, 122, 162, + 187, 18, 19, 316, 268, 20, 315, 83, 162, 286, + 287, 76, 314, 290, 313, 81, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, - 0, 0, 0, 13, 239, 240, 0, 24, 241, 30, - 0, 0, 31, 32, 0, 0, 254, 105, 0, 228, - 230, 232, 233, 234, 242, 244, 247, 248, 249, 250, - 251, 255, 256, 0, 0, 229, 231, 235, 236, 238, - 245, 246, 17, 77, 89, 252, 253, 0, 22, 0, - 0, 326, 0, 0, 98, 99, 0, 0, 101, 0, - 104, 88, 276, 277, 275, 282, 284, 281, 283, 278, - 279, 280, 17, 35, 0, 0, 18, 19, 22, 0, - 20, 0, 0, 0, 0, 103, 0, 0, 0, 0, - 0, 11, 12, 14, 15, 16, 21, 23, 25, 26, - 27, 28, 29, 33, 34, 0, 18, 19, 13, 0, - 20, 0, 24, 0, 30, 0, 0, 31, 32, 0, - 0, 11, 12, 14, 15, 16, 21, 23, 25, 26, - 27, 28, 29, 33, 34, 105, 0, 0, 13, 0, - 0, 0, 24, 173, 30, 0, 0, 31, 32, 0, - 0, 0, 0, 0, 105, 0, 0, 0, 0, 0, - 0, 87, 89, 90, 0, 91, 92, 93, 94, 95, - 96, 97, 98, 99, 100, 0, 101, 102, 104, 88, - 87, 89, 90, 0, 91, 92, 93, 94, 95, 96, - 97, 98, 99, 100, 273, 101, 102, 104, 88, 105, - 0, 272, 0, 103, 0, 276, 277, 275, 282, 284, - 281, 283, 278, 279, 280, 0, 0, 0, 105, 0, - 0, 0, 103, 0, 0, 87, 89, 90, 0, 91, - 92, 93, 0, 95, 96, 97, 98, 99, 100, 0, - 101, 102, 104, 88, 87, 89, 90, 0, 91, 92, - 0, 0, 95, 96, 0, 98, 99, 100, 0, 101, - 102, 104, 88, 0, 0, 0, 0, 103, 0, 0, + 182, 86, 292, 13, 55, 10, 312, 24, 311, 30, + 181, 183, 31, 32, 54, 79, 134, 135, 136, 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 310, 127, 36, 126, 1, 121, 298, 299, 301, + 164, 303, 49, 48, 190, 294, 119, 55, 160, 304, + 305, 193, 55, 160, 117, 196, 223, 54, 159, 122, + 222, 228, 54, 159, 293, 350, 50, 47, 46, 169, + 132, 238, 78, 323, 324, 221, 45, 244, 43, 161, + 328, 322, 168, 333, 334, 335, 130, 332, 171, 177, + 339, 338, 341, 340, 176, 162, 125, 342, 343, 42, + 59, 124, 344, 9, 9, 240, 241, 175, 346, 242, + 131, 8, 41, 40, 123, 37, 51, 255, 351, 191, + 229, 231, 233, 234, 235, 243, 245, 248, 249, 250, + 251, 252, 256, 257, 345, 272, 230, 232, 236, 237, + 239, 246, 247, 85, 189, 55, 253, 254, 53, 77, + 224, 56, 80, 120, 22, 54, 153, 58, 227, 52, + 116, 57, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 228, 0, 0, 0, 0, 75, 0, 0, 0, + 0, 238, 18, 19, 0, 0, 20, 244, 0, 0, + 0, 225, 76, 0, 0, 0, 0, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 0, 0, 0, 13, 240, 241, 0, 24, 242, + 30, 0, 0, 31, 32, 0, 0, 255, 105, 0, + 229, 231, 233, 234, 235, 243, 245, 248, 249, 250, + 251, 252, 256, 257, 0, 0, 230, 232, 236, 237, + 239, 246, 247, 17, 77, 89, 253, 254, 0, 22, + 0, 0, 327, 0, 0, 98, 99, 0, 0, 101, + 0, 104, 88, 277, 278, 276, 283, 285, 282, 284, + 279, 280, 281, 17, 35, 0, 0, 18, 19, 22, + 0, 20, 0, 0, 0, 0, 103, 0, 0, 0, + 0, 0, 11, 12, 14, 15, 16, 21, 23, 25, + 26, 27, 28, 29, 33, 34, 0, 18, 19, 13, + 0, 20, 0, 24, 0, 30, 0, 0, 31, 32, + 0, 0, 11, 12, 14, 15, 16, 21, 23, 25, + 26, 27, 28, 29, 33, 34, 105, 0, 0, 13, + 0, 0, 0, 24, 174, 30, 0, 0, 31, 32, + 0, 0, 0, 0, 0, 105, 0, 0, 0, 0, + 0, 0, 87, 89, 90, 0, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 0, 101, 102, 104, + 88, 87, 89, 90, 0, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 274, 101, 102, 104, 88, + 105, 0, 273, 0, 103, 0, 277, 278, 276, 283, + 285, 282, 284, 279, 280, 281, 0, 0, 0, 105, + 0, 0, 0, 103, 0, 0, 87, 89, 90, 0, + 91, 92, 93, 0, 95, 96, 97, 98, 99, 100, + 0, 101, 102, 104, 88, 87, 89, 90, 0, 91, + 92, 0, 0, 95, 96, 0, 98, 99, 100, 0, + 101, 102, 104, 88, 0, 0, 0, 0, 103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 103, + 0, 0, 0, 0, 0, 0, 0, 103, } var yyPact = [...]int16{ - 16, 175, 500, 500, 154, 470, -1000, -1000, -1000, 174, + 16, 168, 501, 501, 155, 471, -1000, -1000, -1000, 153, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 273, -1000, 209, -1000, 580, + -1000, -1000, -1000, -1000, -1000, 195, -1000, 229, -1000, 581, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 22, 155, -1000, -1000, 365, -1000, 365, 165, + -1000, -1000, 22, 99, -1000, -1000, 366, -1000, 366, 125, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 172, -1000, -1000, - 275, -1000, -1000, 261, -1000, 24, -1000, -54, -54, -54, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 264, -1000, -1000, + 324, -1000, -1000, 260, -1000, 24, -1000, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, -54, - -54, -54, -54, 37, 43, 292, 155, -57, -1000, 284, - 284, 7, -1000, 561, 35, -1000, 179, -1000, -1000, 191, - 80, -1000, -1000, -1000, 112, -1000, 205, -1000, 268, 365, - -1000, -50, -45, -1000, 365, 365, 365, 365, 365, 365, - 365, 365, 365, 365, 365, 365, 365, 365, 365, -1000, - 107, -1000, -1000, 125, -1000, -1000, -1000, -1000, -1000, -1000, - 99, 99, 270, -1000, -1000, -1000, -1000, 398, -1000, -1000, - 105, -1000, 580, -1000, -1000, 163, -1000, 189, -1000, -1000, - -1000, -1000, -1000, 86, -1000, -1000, -1000, -1000, -1000, 18, - 150, 142, -1000, -1000, -1000, 617, 443, 284, 284, 284, - 284, 35, 35, 46, 46, 46, 644, 625, 46, 46, - 644, 35, 35, 46, 35, 443, -1000, 33, -1000, -1000, - -1000, 262, -1000, 267, -1000, -1000, -1000, -1000, -1000, -1000, + -54, -54, -54, 37, 43, 268, 99, -57, -1000, 297, + 297, 7, -1000, 562, 35, -1000, 317, -1000, -1000, 187, + 80, -1000, -1000, -1000, 120, -1000, 175, -1000, 269, 366, + -1000, -50, -45, -1000, 366, 366, 366, 366, 366, 366, + 366, 366, 366, 366, 366, 366, 366, 366, 366, -1000, + 225, -1000, -1000, 44, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 107, 107, 284, -1000, -1000, -1000, -1000, 399, -1000, + -1000, 172, -1000, 581, -1000, -1000, 173, -1000, 157, -1000, + -1000, -1000, -1000, -1000, 86, -1000, -1000, -1000, -1000, -1000, + 18, 143, 132, -1000, -1000, -1000, 618, 444, 297, 297, + 297, 297, 35, 35, 46, 46, 46, 645, 626, 46, + 46, 645, 35, 35, 46, 35, 444, -1000, 28, -1000, + -1000, -1000, 273, -1000, 174, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 365, -1000, - -1000, -1000, -1000, -1000, -1000, 119, 119, 15, 119, 89, - 89, 41, 42, -1000, -1000, 255, 237, 234, 231, 229, - 227, 207, 199, 196, 190, -1000, -1000, -1000, -1000, -1000, - -1000, 40, -1000, -1000, -1000, 288, -1000, 580, -1000, -1000, - -1000, 119, -1000, 14, 12, 474, -1000, -1000, -1000, 103, - 11, 99, 99, 99, 157, 157, 103, 157, 103, -1000, - -1000, -1000, -1000, -1000, 119, 119, -1000, -1000, -1000, 119, - -1000, -1000, -1000, -1000, -1000, -1000, 99, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 156, -1000, 31, -1000, -1000, -1000, - -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 366, + -1000, -1000, -1000, -1000, -1000, -1000, 32, 32, 15, 32, + 96, 96, 41, 38, -1000, -1000, 255, 232, 230, 208, + 206, 200, 197, 181, 180, 178, -1000, -1000, -1000, -1000, + -1000, -1000, 40, -1000, -1000, -1000, 289, -1000, 581, -1000, + -1000, -1000, 32, -1000, 14, 12, 475, -1000, -1000, -1000, + 11, 152, 107, 107, 107, 104, 104, 11, 104, 11, + -1000, -1000, -1000, -1000, -1000, 32, 32, -1000, -1000, -1000, + 32, -1000, -1000, -1000, -1000, -1000, -1000, 107, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 103, -1000, 274, -1000, -1000, + -1000, -1000, } var yyPgo = [...]int16{ - 0, 385, 13, 382, 6, 15, 381, 325, 379, 373, - 372, 229, 332, 364, 14, 363, 10, 11, 348, 345, - 8, 344, 3, 4, 343, 2, 1, 0, 342, 12, - 5, 341, 340, 17, 157, 339, 333, 7, 330, 323, - 28, 318, 36, 317, 9, 315, 307, 305, 304, 297, - 296, 292, 288, 262, + 0, 390, 13, 389, 6, 15, 388, 330, 387, 386, + 383, 235, 341, 382, 14, 380, 10, 11, 374, 373, + 8, 365, 3, 4, 364, 2, 1, 0, 349, 12, + 5, 346, 343, 17, 157, 342, 340, 7, 329, 318, + 28, 316, 36, 308, 9, 306, 300, 298, 297, 273, + 272, 296, 265, 263, } var yyR1 = [...]int8{ @@ -540,8 +540,8 @@ var yyR1 = [...]int8{ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 8, 8, - 5, 5, 5, 5, 44, 44, 27, 29, 29, 30, - 30, 26, 25, 25, 48, 10, 17, 17, + 5, 5, 5, 5, 44, 44, 27, 27, 29, 29, + 30, 30, 26, 25, 25, 48, 10, 17, 17, } var yyR2 = [...]int8{ @@ -567,8 +567,8 @@ var yyR2 = [...]int8{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, - 1, 1, 2, 1, 1, 1, 0, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 1, 1, 1, 0, 1, } var yyChk = [...]int16{ @@ -588,26 +588,26 @@ var yyChk = [...]int16{ -41, -36, -46, 77, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -34, -44, 56, 2, -30, -9, 2, -27, -29, 87, 88, 19, - 40, 56, -44, 2, -40, -33, -16, 15, 2, -16, - -39, 22, -37, 22, 20, 7, 2, -5, 2, 4, - 53, 43, 54, -5, 20, -14, 25, 2, -18, 5, - -28, -20, 12, -27, -29, 16, -37, 81, 83, 79, - 80, -37, -37, -37, -37, -37, -37, -37, -37, -37, - -37, -37, -37, -37, -37, -37, -44, 15, -27, -27, - 21, 6, 2, -15, 22, -4, -6, 2, 61, 77, - 62, 78, 63, 64, 65, 79, 80, 12, 81, 46, - 47, 50, 66, 18, 67, 82, 83, 68, 69, 70, - 71, 72, 87, 88, 58, 73, 74, 22, 7, 20, - -2, 25, 2, 25, 2, 26, 26, -29, 26, 40, - 56, -21, 24, 17, -22, 30, 28, 29, 35, 36, - 37, 33, 31, 34, 32, -16, -16, -17, -16, -17, - 22, -44, 21, 2, 22, 7, 2, -37, -26, 19, - -26, 26, -26, -20, -20, 24, 17, 2, 17, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 21, - 2, 22, -4, -26, 26, 26, 17, -22, -25, 56, - -26, -30, -27, -27, -27, -23, 14, -23, -25, -23, - -25, -26, -26, -26, -24, -27, 24, 21, 2, 21, - -27, + 9, 40, 56, -44, 2, -40, -33, -16, 15, 2, + -16, -39, 22, -37, 22, 20, 7, 2, -5, 2, + 4, 53, 43, 54, -5, 20, -14, 25, 2, -18, + 5, -28, -20, 12, -27, -29, 16, -37, 81, 83, + 79, 80, -37, -37, -37, -37, -37, -37, -37, -37, + -37, -37, -37, -37, -37, -37, -37, -44, 15, -27, + -27, 21, 6, 2, -15, 22, -4, -6, 2, 61, + 77, 62, 78, 63, 64, 65, 79, 80, 12, 81, + 46, 47, 50, 66, 18, 67, 82, 83, 68, 69, + 70, 71, 72, 87, 88, 58, 73, 74, 22, 7, + 20, -2, 25, 2, 25, 2, 26, 26, -29, 26, + 40, 56, -21, 24, 17, -22, 30, 28, 29, 35, + 36, 37, 33, 31, 34, 32, -16, -16, -17, -16, + -17, 22, -44, 21, 2, 22, 7, 2, -37, -26, + 19, -26, 26, -26, -20, -20, 24, 17, 2, 17, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 21, 2, 22, -4, -26, 26, 26, 17, -22, -25, + 56, -26, -30, -27, -27, -27, -23, 14, -23, -25, + -23, -25, -26, -26, -26, -24, -27, 24, 21, 2, + 21, -27, } var yyDef = [...]int16{ @@ -616,37 +616,37 @@ var yyDef = [...]int16{ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 0, 2, -2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 0, 107, 224, 225, 0, 234, 0, 84, + 18, 19, 0, 107, 224, 225, 0, 235, 0, 84, 85, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, 218, 219, 0, 5, 99, 0, 127, 130, 0, 135, 136, 140, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 0, 0, 0, 0, 22, 23, 0, 0, 0, 60, 0, 82, 83, 0, 88, 90, 0, - 94, 98, 235, 125, 0, 131, 0, 134, 139, 0, + 94, 98, 236, 125, 0, 131, 0, 134, 139, 0, 42, 47, 48, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 67, - 0, 69, 70, 0, 72, 229, 230, 73, 74, 226, - 0, 0, 0, 81, 20, 21, 24, 0, 54, 25, - 0, 62, 64, 66, 86, 0, 91, 0, 97, 220, - 221, 222, 223, 0, 126, 129, 132, 133, 138, 141, - 143, 146, 150, 151, 152, 0, 26, 0, 0, -2, - -2, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 68, 0, 227, 228, - 75, 0, 80, 0, 53, 56, 58, 59, 189, 190, - 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, - 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, - 211, 212, 213, 214, 215, 216, 217, 61, 65, 87, - 89, 92, 96, 93, 95, 0, 0, 0, 0, 0, - 0, 0, 0, 156, 158, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 45, 46, 49, 237, 50, - 71, 0, 77, 79, 51, 0, 57, 63, 142, 231, - 144, 0, 147, 0, 0, 0, 154, 159, 155, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 76, - 78, 52, 55, 145, 0, 0, 153, 157, 160, 0, - 233, 161, 162, 163, 164, 165, 0, 166, 167, 168, - 169, 148, 149, 232, 0, 173, 0, 171, 174, 170, - 172, + 0, 69, 70, 0, 72, 230, 231, 73, 74, 226, + 227, 0, 0, 0, 81, 20, 21, 24, 0, 54, + 25, 0, 62, 64, 66, 86, 0, 91, 0, 97, + 220, 221, 222, 223, 0, 126, 129, 132, 133, 138, + 141, 143, 146, 150, 151, 152, 0, 26, 0, 0, + -2, -2, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 68, 0, 228, + 229, 75, 0, 80, 0, 53, 56, 58, 59, 189, + 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, 61, 65, + 87, 89, 92, 96, 93, 95, 0, 0, 0, 0, + 0, 0, 0, 0, 156, 158, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 45, 46, 49, 238, + 50, 71, 0, 77, 79, 51, 0, 57, 63, 142, + 232, 144, 0, 147, 0, 0, 0, 154, 159, 155, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 76, 78, 52, 55, 145, 0, 0, 153, 157, 160, + 0, 234, 161, 162, 163, 164, 165, 0, 166, 167, + 168, 169, 148, 149, 233, 0, 173, 0, 171, 174, + 170, 172, } var yyTok1 = [...]int8{ @@ -1308,7 +1308,7 @@ yydefault: case 69: yyDollar = yyS[yypt-3 : yypt+1] { - yylex.(*parser).unexpected("offset", "integer or duration") + yylex.(*parser).unexpected("offset", "number or duration") yyVAL.node = yyDollar[1].node } case 70: @@ -1386,7 +1386,7 @@ yydefault: case 79: yyDollar = yyS[yypt-5 : yypt+1] { - yylex.(*parser).unexpected("subquery selector", "number/duration or \"]\"") + yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\"") yyVAL.node = yyDollar[1].node } case 80: @@ -1398,7 +1398,7 @@ yydefault: case 81: yyDollar = yyS[yypt-3 : yypt+1] { - yylex.(*parser).unexpected("subquery selector", "number/duration") + yylex.(*parser).unexpected("subquery selector", "number or duration") yyVAL.node = yyDollar[1].node } case 82: @@ -1847,16 +1847,27 @@ yydefault: yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } case 227: - yyDollar = yyS[yypt-2 : yypt+1] + yyDollar = yyS[yypt-1 : yypt+1] { - yyVAL.float = yyDollar[2].float + var err error + var dur time.Duration + dur, err = parseDuration(yyDollar[1].item.Val) + if err != nil { + yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err) + } + yyVAL.float = dur.Seconds() } case 228: + yyDollar = yyS[yypt-2 : yypt+1] + { + yyVAL.float = yyDollar[2].float + } + case 229: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = -yyDollar[2].float } - case 231: + case 232: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1865,17 +1876,17 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 232: + case 233: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.int = -int64(yyDollar[2].uint) } - case 233: + case 234: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.int = int64(yyDollar[1].uint) } - case 234: + case 235: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &StringLiteral{ @@ -1883,7 +1894,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 235: + case 236: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.item = Item{ @@ -1892,7 +1903,7 @@ yydefault: Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), } } - case 236: + case 237: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.strings = nil diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 0e06ca525..18abd49ea 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -969,7 +969,7 @@ func (l *Lexer) scanNumber() bool { // Handle digits at the end since we already consumed before this loop. l.acceptRun(digitPattern) } - // empty string is not a valid number + // Empty string is not a valid number. if l.pos == initialPos { return false } diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 8ef5775a6..35c5f54a1 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -2278,6 +2278,24 @@ var testExpr = []struct { EndPos: 7, }, }, + { + input: `some_metric[5m] @ 1m`, + expected: &MatrixSelector{ + VectorSelector: &VectorSelector{ + Name: "some_metric", + Timestamp: makeInt64Pointer(60000), + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some_metric"), + }, + PosRange: posrange.PositionRange{ + Start: 0, + End: 11, + }, + }, + Range: 5 * time.Minute, + EndPos: 20, + }, + }, { input: `foo[5mm]`, fail: true, @@ -2335,18 +2353,13 @@ var testExpr = []struct { { input: `some_metric[5m] OFFSET`, fail: true, - errMsg: "unexpected end of input in offset, expected integer or duration", + errMsg: "unexpected end of input in offset, expected number or duration", }, { input: `some_metric OFFSET 1m[5m]`, fail: true, errMsg: "1:22: parse error: no offset modifiers allowed before range", }, - { - input: `some_metric[5m] @ 1m`, - fail: true, - errMsg: "1:19: parse error: unexpected duration \"1m\" in @, expected timestamp", - }, { input: `some_metric[5m] @`, fail: true, @@ -3031,6 +3044,11 @@ var testExpr = []struct { errMsg: "illegal character U+002E '.' in escape sequence", }, // Subquery. + { + input: `foo{bar="baz"}[`, + fail: true, + errMsg: `1:16: parse error: bad number or duration syntax: ""`, + }, { input: `foo{bar="baz"}[10m:6s]`, expected: &SubqueryExpr{ diff --git a/promql/promqltest/testdata/at_modifier.test b/promql/promqltest/testdata/at_modifier.test index e11640ae0..35f90ee67 100644 --- a/promql/promqltest/testdata/at_modifier.test +++ b/promql/promqltest/testdata/at_modifier.test @@ -10,6 +10,14 @@ eval instant at 10s metric @ 100 metric{job="1"} 10 metric{job="2"} 20 +eval instant at 10s metric @ 100s + metric{job="1"} 10 + metric{job="2"} 20 + +eval instant at 10s metric @ 1m40s + metric{job="1"} 10 + metric{job="2"} 20 + eval instant at 10s metric @ 100 offset 50s metric{job="1"} 5 metric{job="2"} 10 From 1a1b09fc3d53ed75fce8fc2becbc288bfde35e85 Mon Sep 17 00:00:00 2001 From: darshanime Date: Fri, 19 Apr 2024 18:52:50 +0530 Subject: [PATCH 15/81] Add a note about the new syntax Signed-off-by: darshanime --- docs/querying/basics.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index fee7e63c4..b7584adba 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -81,6 +81,16 @@ Examples: 0x8f -Inf NaN + + +As of version 2.53, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change. + +Examples: + + 1s # Equivalent to 1.0 + 2m # Equivalent to 120.0 + 1ms # Equivalent to 0.001 + ## Time series selectors @@ -224,6 +234,15 @@ Here are some examples of valid time durations: 5m 10s + +As of version 2.53, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change. + +Examples: + + 1.0 # Equivalent to 1s + 0.001 # Equivalent to 1ms + 120 # Equivalent to 2m + ### Offset modifier The `offset` modifier allows changing the time offset for individual From 706e19912bcea855e398def9c3b6b9c102089345 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 10 Jul 2024 16:59:33 +0200 Subject: [PATCH 16/81] adjust version number in documentation Signed-off-by: beorn7 --- docs/querying/basics.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index b7584adba..1c72adb3e 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -83,7 +83,7 @@ Examples: NaN -As of version 2.53, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change. +As of version 2.54, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change. Examples: @@ -235,7 +235,7 @@ Here are some examples of valid time durations: 10s -As of version 2.53, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change. +As of version 2.54, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change. Examples: From dd48eab9810298ab58b2b358c5a9165177be435d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Jul 2024 15:35:14 +0000 Subject: [PATCH 17/81] build(deps): bump @codemirror/autocomplete in /web/ui Bumps [@codemirror/autocomplete](https://github.com/codemirror/autocomplete) from 6.16.2 to 6.17.0. - [Changelog](https://github.com/codemirror/autocomplete/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/autocomplete/compare/6.16.2...6.17.0) --- updated-dependencies: - dependency-name: "@codemirror/autocomplete" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 10 +++++----- web/ui/react-app/package.json | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 538cf26f6..556117ca8 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -33,7 +33,7 @@ "lru-cache": "^7.18.3" }, "devDependencies": { - "@codemirror/autocomplete": "^6.16.2", + "@codemirror/autocomplete": "^6.17.0", "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.0", "@codemirror/state": "^6.3.3", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index ca1bed9ac..6caf7c3ed 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -37,7 +37,7 @@ "lru-cache": "^7.18.3" }, "devDependencies": { - "@codemirror/autocomplete": "^6.16.2", + "@codemirror/autocomplete": "^6.17.0", "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.0", "@codemirror/state": "^6.3.3", @@ -2027,9 +2027,9 @@ "license": "MIT" }, "node_modules/@codemirror/autocomplete": { - "version": "6.16.2", - "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.16.2.tgz", - "integrity": "sha512-MjfDrHy0gHKlPWsvSsikhO1+BOh+eBHNgfH1OXs1+DAf30IonQldgMM3kxLDTG9ktE7kDLaA1j/l7KMPA4KNfw==", + "version": "6.17.0", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.17.0.tgz", + "integrity": "sha512-fdfj6e6ZxZf8yrkMHUSJJir7OJkHkZKaOZGzLWIYp2PZ3jd+d+UjG8zVPqJF6d3bKxkhvXTPan/UZ1t7Bqm0gA==", "dependencies": { "@codemirror/language": "^6.0.0", "@codemirror/state": "^6.0.0", @@ -19333,7 +19333,7 @@ "name": "@prometheus-io/app", "version": "0.53.1", "dependencies": { - "@codemirror/autocomplete": "^6.16.2", + "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.0", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index f7b5d9d4d..79d803ca7 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -3,7 +3,7 @@ "version": "0.53.1", "private": true, "dependencies": { - "@codemirror/autocomplete": "^6.16.2", + "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.0", From dca82e0805d075c024a738c5aa207865365c297a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Jul 2024 15:35:15 +0000 Subject: [PATCH 18/81] build(deps-dev): bump ts-jest from 29.1.4 to 29.2.1 in /web/ui Bumps [ts-jest](https://github.com/kulshekhar/ts-jest) from 29.1.4 to 29.2.1. - [Release notes](https://github.com/kulshekhar/ts-jest/releases) - [Changelog](https://github.com/kulshekhar/ts-jest/blob/main/CHANGELOG.md) - [Commits](https://github.com/kulshekhar/ts-jest/compare/v29.1.4...v29.2.1) --- updated-dependencies: - dependency-name: ts-jest dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- web/ui/package-lock.json | 9 +++++---- web/ui/package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index ca1bed9ac..50ca78b9e 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -21,7 +21,7 @@ "jest-fetch-mock": "^3.0.3", "prettier": "^2.8.8", "react-scripts": "^5.0.1", - "ts-jest": "^29.1.4", + "ts-jest": "^29.2.1", "typescript": "^4.9.5" }, "engines": { @@ -18028,12 +18028,13 @@ "license": "MIT" }, "node_modules/ts-jest": { - "version": "29.1.4", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.4.tgz", - "integrity": "sha512-YiHwDhSvCiItoAgsKtoLFCuakDzDsJ1DLDnSouTaTmdOcOwIkSzbLXduaQ6M5DRVhuZC/NYaaZ/mtHbWMv/S6Q==", + "version": "29.2.1", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.2.1.tgz", + "integrity": "sha512-7obwtH5gw0b0XZi0wmprCSvGSvHliMBI47lPnU47vmbxWS6B+v1X94yWFo1f1vt9k/he+gttsrXjkxmgY41XNQ==", "dev": true, "dependencies": { "bs-logger": "0.x", + "ejs": "^3.0.0", "fast-json-stable-stringify": "2.x", "jest-util": "^29.0.0", "json5": "^2.2.3", diff --git a/web/ui/package.json b/web/ui/package.json index 487a682ae..87cdd920d 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -25,7 +25,7 @@ "jest-fetch-mock": "^3.0.3", "prettier": "^2.8.8", "react-scripts": "^5.0.1", - "ts-jest": "^29.1.4", + "ts-jest": "^29.2.1", "typescript": "^4.9.5" }, "version": "0.53.1" From cb8b6d75049faa08203cfa2b7326b3d9391f7872 Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Thu, 11 Jul 2024 14:16:43 +0200 Subject: [PATCH 19/81] Allow built-in tests to be customized The RunBuiltinTests function accepts a concrete type which makes it hard to exclude certain tests from the suite. It would be great if we could skip tests which might not be critical in order to unblock updates. By accepting an interface instead, we can inject a custom implementation which would skips select test cases. Signed-off-by: Filip Petkovski --- promql/promqltest/test.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index f3a773be8..83137e661 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -55,6 +55,11 @@ const ( DefaultMaxSamplesPerQuery = 10000 ) +type TBRun interface { + testing.TB + Run(string, func(*testing.T)) bool +} + var testStartTime = time.Unix(0, 0).UTC() // LoadedStorage returns storage with generated data using the provided load statements. @@ -89,7 +94,7 @@ func NewTestEngine(enablePerStepStats bool, lookbackDelta time.Duration, maxSamp } // RunBuiltinTests runs an acceptance test suite against the provided engine. -func RunBuiltinTests(t *testing.T, engine promql.QueryEngine) { +func RunBuiltinTests(t TBRun, engine promql.QueryEngine) { t.Cleanup(func() { parser.EnableExperimentalFunctions = false }) parser.EnableExperimentalFunctions = true From caa71fb3c74f2fb10bcdf2512994e3b098d91227 Mon Sep 17 00:00:00 2001 From: machine424 Date: Mon, 8 Jul 2024 18:53:17 +0200 Subject: [PATCH 20/81] chore(storage/remote): collect maxTimestamp when value is 0 as well. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change enables the PrometheusRemoteWriteBehind alert’s expression to be evaluated even when the remote endpoint has never been reached. As a result, PrometheusRemoteWriteBehind will fire to easily detect configuration mistakes (such as incorrect endpoint URLs) or unrecoverable connectivity issues. See https://github.com/prometheus/prometheus/issues/14350 for details. Signed-off-by: machine424 --- storage/remote/max_timestamp.go | 6 ------ storage/remote/queue_manager.go | 2 +- storage/remote/queue_manager_test.go | 2 +- storage/remote/write.go | 2 +- 4 files changed, 3 insertions(+), 9 deletions(-) diff --git a/storage/remote/max_timestamp.go b/storage/remote/max_timestamp.go index 3a0a6d6fd..bb67d9bb9 100644 --- a/storage/remote/max_timestamp.go +++ b/storage/remote/max_timestamp.go @@ -39,9 +39,3 @@ func (m *maxTimestamp) Get() float64 { defer m.mtx.Unlock() return m.value } - -func (m *maxTimestamp) Collect(c chan<- prometheus.Metric) { - if m.Get() > 0 { - m.Gauge.Collect(c) - } -} diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index fb13da70d..4bef9909c 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -232,7 +232,7 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager Namespace: namespace, Subsystem: subsystem, Name: "queue_highest_sent_timestamp_seconds", - Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch.", + Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch. Initialized to 0 when no data has been sent yet.", ConstLabels: constLabels, }), } diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 9ab563eda..5227c2d6a 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -60,7 +60,7 @@ func newHighestTimestampMetric() *maxTimestamp { Namespace: namespace, Subsystem: subsystem, Name: "highest_timestamp_in_seconds", - Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.", + Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet", }), } } diff --git a/storage/remote/write.go b/storage/remote/write.go index cd8cd588c..81902a8f1 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -100,7 +100,7 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, f Namespace: namespace, Subsystem: subsystem, Name: "highest_timestamp_in_seconds", - Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.", + Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet.", }), }, } From ad03ede602ab1fa37d29b6a157b101657eb14ece Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=92=B8=E9=B1=BC=E6=9A=84?= <30610597+smd1121@users.noreply.github.com> Date: Wed, 10 Jul 2024 12:44:51 +0800 Subject: [PATCH 21/81] fix markdown list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 咸鱼暄 <30610597+smd1121@users.noreply.github.com> --- docs/querying/functions.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index 9a552f697..b4b498f32 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -98,6 +98,7 @@ vector. clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`. Special cases: + - Return an empty vector if `min > max` - Return `NaN` if `min` or `max` is `NaN` From bab098a4c1874b154702c2af661f235dbe019099 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=92=B8=E9=B1=BC=E6=9A=84?= <30610597+smd1121@users.noreply.github.com> Date: Thu, 11 Jul 2024 00:27:43 +0800 Subject: [PATCH 22/81] change all lists to bullets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 咸鱼暄 <30610597+smd1121@users.noreply.github.com> --- docs/querying/functions.md | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index b4b498f32..de65e693d 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -99,8 +99,8 @@ clamps the sample values of all elements in `v` to have a lower limit of `min` a Special cases: -- Return an empty vector if `min > max` -- Return `NaN` if `min` or `max` is `NaN` +* Return an empty vector if `min > max` +* Return `NaN` if `min` or `max` is `NaN` ## `clamp_max()` @@ -350,8 +350,8 @@ a histogram. Buckets of classic histograms are cumulative. Therefore, the following should always be the case: -- The counts in the buckets are monotonically increasing (strictly non-decreasing). -- A lack of observations between the upper limits of two consecutive buckets results in equal counts +* The counts in the buckets are monotonically increasing (strictly non-decreasing). +* A lack of observations between the upper limits of two consecutive buckets results in equal counts in those two buckets. However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets @@ -693,21 +693,21 @@ ignore histogram samples. The trigonometric functions work in radians: -- `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)). -- `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)). -- `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)). -- `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)). -- `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)). -- `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)). -- `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)). -- `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)). -- `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)). -- `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)). -- `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)). -- `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)). +* `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)). +* `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)). +* `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)). +* `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)). +* `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)). +* `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)). +* `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)). +* `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)). +* `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)). +* `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)). +* `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)). +* `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)). The following are useful for converting between degrees and radians: -- `deg(v instant-vector)`: converts radians to degrees for all elements in `v`. -- `pi()`: returns pi. -- `rad(v instant-vector)`: converts degrees to radians for all elements in `v`. +* `deg(v instant-vector)`: converts radians to degrees for all elements in `v`. +* `pi()`: returns pi. +* `rad(v instant-vector)`: converts degrees to radians for all elements in `v`. From 0c87643abd84623ccd3c91bb4a57207856298dab Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Fri, 12 Jul 2024 09:11:38 +0200 Subject: [PATCH 23/81] [PRW 2.0] write_handler: Support for rc.2 spec & improved error handling for v2. (#14427) * [PRW 2.0] write_handler: Support for rc.2 spec & improved error handling for v2. Fixes: https://github.com/prometheus/prometheus/issues/14359 Signed-off-by: bwplotka * Addressed Callum comments. Signed-off-by: bwplotka * Added missing lock on flush. Signed-off-by: bwplotka * Fixed lint. Signed-off-by: bwplotka * Added tests. Signed-off-by: bwplotka * Addressed Callum's comments & updated re spec. Signed-off-by: bwplotka * Update storage/remote/write_handler_test.go Co-authored-by: Callum Styan Signed-off-by: Bartlomiej Plotka --------- Signed-off-by: bwplotka Signed-off-by: Bartlomiej Plotka Co-authored-by: Callum Styan --- storage/remote/codec_test.go | 183 ++++++----- storage/remote/queue_manager.go | 2 + storage/remote/write_handler.go | 433 +++++++++++++++---------- storage/remote/write_handler_test.go | 458 ++++++++++++++++----------- storage/remote/write_test.go | 5 +- 5 files changed, 647 insertions(+), 434 deletions(-) diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 15f8fe132..279d10e41 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -36,48 +36,48 @@ import ( "github.com/prometheus/prometheus/util/annotations" ) -var testHistogram = histogram.Histogram{ - Schema: 2, - ZeroThreshold: 1e-128, - ZeroCount: 0, - Count: 0, - Sum: 20, - PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, - PositiveBuckets: []int64{1}, - NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, - NegativeBuckets: []int64{-1}, -} - -var writeRequestFixture = &prompb.WriteRequest{ - Timeseries: []prompb.TimeSeries{ - { - Labels: []prompb.Label{ - {Name: "__name__", Value: "test_metric1"}, - {Name: "b", Value: "c"}, - {Name: "baz", Value: "qux"}, - {Name: "d", Value: "e"}, - {Name: "foo", Value: "bar"}, - }, - Samples: []prompb.Sample{{Value: 1, Timestamp: 0}}, - Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}}, - Histograms: []prompb.Histogram{prompb.FromIntHistogram(0, &testHistogram), prompb.FromFloatHistogram(1, testHistogram.ToFloat(nil))}, - }, - { - Labels: []prompb.Label{ - {Name: "__name__", Value: "test_metric1"}, - {Name: "b", Value: "c"}, - {Name: "baz", Value: "qux"}, - {Name: "d", Value: "e"}, - {Name: "foo", Value: "bar"}, - }, - Samples: []prompb.Sample{{Value: 2, Timestamp: 1}}, - Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}}, - Histograms: []prompb.Histogram{prompb.FromIntHistogram(2, &testHistogram), prompb.FromFloatHistogram(3, testHistogram.ToFloat(nil))}, - }, - }, -} - var ( + testHistogram = histogram.Histogram{ + Schema: 2, + ZeroThreshold: 1e-128, + ZeroCount: 0, + Count: 0, + Sum: 20, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{-1}, + } + + writeRequestFixture = &prompb.WriteRequest{ + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar"}, + }, + Samples: []prompb.Sample{{Value: 1, Timestamp: 1}}, + Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 1}}, + Histograms: []prompb.Histogram{prompb.FromIntHistogram(1, &testHistogram), prompb.FromFloatHistogram(2, testHistogram.ToFloat(nil))}, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "test_metric1"}, + {Name: "b", Value: "c"}, + {Name: "baz", Value: "qux"}, + {Name: "d", Value: "e"}, + {Name: "foo", Value: "bar"}, + }, + Samples: []prompb.Sample{{Value: 2, Timestamp: 2}}, + Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 2}}, + Histograms: []prompb.Histogram{prompb.FromIntHistogram(3, &testHistogram), prompb.FromFloatHistogram(4, testHistogram.ToFloat(nil))}, + }, + }, + } + writeV2RequestSeries1Metadata = metadata.Metadata{ Type: model.MetricTypeGauge, Help: "Test gauge for test purposes", @@ -88,43 +88,78 @@ var ( Help: "Test counter for test purposes", } - // writeV2RequestFixture represents the same request as writeRequestFixture, but using the v2 representation. - writeV2RequestFixture = func() *writev2.Request { - st := writev2.NewSymbolTable() - b := labels.NewScratchBuilder(0) - labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil) - exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil) - exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil) - return &writev2.Request{ - Timeseries: []writev2.TimeSeries{ - { - LabelsRefs: labelRefs, - Metadata: writev2.Metadata{ - Type: writev2.Metadata_METRIC_TYPE_GAUGE, // Same as writeV2RequestSeries1Metadata.Type, but in writev2. - HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help), - UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit), - }, - Samples: []writev2.Sample{{Value: 1, Timestamp: 0}}, - Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 0}}, - Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))}, - }, - { - LabelsRefs: labelRefs, - Metadata: writev2.Metadata{ - Type: writev2.Metadata_METRIC_TYPE_COUNTER, // Same as writeV2RequestSeries2Metadata.Type, but in writev2. - HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help), - // No unit. - }, - Samples: []writev2.Sample{{Value: 2, Timestamp: 1}}, - Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 1}}, - Histograms: []writev2.Histogram{writev2.FromIntHistogram(2, &testHistogram), writev2.FromFloatHistogram(3, testHistogram.ToFloat(nil))}, + // writeV2RequestFixture represents the same request as writeRequestFixture, + // but using the v2 representation, plus includes writeV2RequestSeries1Metadata and writeV2RequestSeries2Metadata. + // NOTE: Use TestWriteV2RequestFixture and copy the diff to regenerate if needed. + writeV2RequestFixture = &writev2.Request{ + Symbols: []string{"", "__name__", "test_metric1", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"}, + Timeseries: []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Symbolized writeRequestFixture.Timeseries[0].Labels + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_GAUGE, // writeV2RequestSeries1Metadata.Type. + + HelpRef: 15, // Symbolized writeV2RequestSeries1Metadata.Help. + UnitRef: 16, // Symbolized writeV2RequestSeries1Metadata.Unit. }, + Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}}, + Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))}, }, - Symbols: st.Symbols(), - } - }() + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Same series as first. + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, // writeV2RequestSeries2Metadata.Type. + + HelpRef: 17, // Symbolized writeV2RequestSeries2Metadata.Help. + // No unit. + }, + Samples: []writev2.Sample{{Value: 2, Timestamp: 2}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{13, 14}, Value: 2, Timestamp: 2}}, + Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))}, + }, + }, + } ) +func TestWriteV2RequestFixture(t *testing.T) { + // Generate dynamically writeV2RequestFixture, reusing v1 fixture elements. + st := writev2.NewSymbolTable() + b := labels.NewScratchBuilder(0) + labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil) + exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil) + exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[1].Exemplars[0].ToExemplar(&b, nil).Labels, nil) + expected := &writev2.Request{ + Timeseries: []writev2.TimeSeries{ + { + LabelsRefs: labelRefs, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_GAUGE, + HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help), + UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit), + }, + Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 1}}, + Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))}, + }, + { + LabelsRefs: labelRefs, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, + HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help), + // No unit. + }, + Samples: []writev2.Sample{{Value: 2, Timestamp: 2}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 2}}, + Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))}, + }, + }, + Symbols: st.Symbols(), + } + // Check if it matches static writeV2RequestFixture. + require.Equal(t, expected, writeV2RequestFixture) +} + func TestValidateLabelsAndMetricName(t *testing.T) { tests := []struct { input []prompb.Label diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 4bef9909c..5bafb9da2 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -1468,6 +1468,8 @@ func (q *queue) FlushAndShutdown(done <-chan struct{}) { for q.tryEnqueueingBatch(done) { time.Sleep(time.Second) } + q.batchMtx.Lock() + defer q.batchMtx.Unlock() q.batch = nil close(q.batchQueue) } diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index 9997811ab..d82237371 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -19,6 +19,7 @@ import ( "fmt" "io" "net/http" + "strconv" "strings" "time" @@ -27,6 +28,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" @@ -43,7 +45,8 @@ type writeHandler struct { logger log.Logger appendable storage.Appendable - samplesWithInvalidLabelsTotal prometheus.Counter + samplesWithInvalidLabelsTotal prometheus.Counter + samplesAppendedWithoutMetadata prometheus.Counter acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{} } @@ -52,6 +55,9 @@ const maxAheadTime = 10 * time.Minute // NewWriteHandler creates a http.Handler that accepts remote write requests with // the given message in acceptedProtoMsgs and writes them to the provided appendable. +// +// NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible +// as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write. func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{} for _, acc := range acceptedProtoMsgs { @@ -61,15 +67,18 @@ func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable st logger: logger, appendable: appendable, acceptedProtoMsgs: protoMsgs, - samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{ + samplesWithInvalidLabelsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ Namespace: "prometheus", Subsystem: "api", Name: "remote_write_invalid_labels_samples_total", - Help: "The total number of remote write samples which contains invalid labels.", + Help: "The total number of received remote write samples and histogram samples which were rejected due to invalid labels.", + }), + samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "remote_write_without_metadata_appended_samples_total", + Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.", }), - } - if reg != nil { - reg.MustRegister(h.samplesWithInvalidLabelsTotal) } return h } @@ -108,15 +117,15 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { contentType = appProtoContentType } - msg, err := h.parseProtoMsg(contentType) + msgType, err := h.parseProtoMsg(contentType) if err != nil { level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) return } - if _, ok := h.acceptedProtoMsgs[msg]; !ok { - err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msg, func() (ret []string) { + if _, ok := h.acceptedProtoMsgs[msgType]; !ok { + err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msgType, func() (ret []string) { for k := range h.acceptedProtoMsgs { ret = append(ret, string(k)) } @@ -154,100 +163,111 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // Now we have a decompressed buffer we can unmarshal it. - switch msg { - case config.RemoteWriteProtoMsgV1: + + if msgType == config.RemoteWriteProtoMsgV1 { + // PRW 1.0 flow has different proto message and no partial write handling. var req prompb.WriteRequest if err := proto.Unmarshal(decompressed, &req); err != nil { // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msg, "err", err.Error()) + level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } - err = h.write(r.Context(), &req) - case config.RemoteWriteProtoMsgV2: - var req writev2.Request - if err := proto.Unmarshal(decompressed, &req); err != nil { - // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msg, "err", err.Error()) - http.Error(w, err.Error(), http.StatusBadRequest) - return + if err = h.write(r.Context(), &req); err != nil { + switch { + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample): + // Indicated an out-of-order sample is a bad request to prevent retries. + http.Error(w, err.Error(), http.StatusBadRequest) + return + default: + level.Error(h.logger).Log("msg", "Error while remote writing the v1 request", "err", err.Error()) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } } - err = h.writeV2(r.Context(), &req) + w.WriteHeader(http.StatusNoContent) + return } - switch { - case err == nil: - case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample): - // Indicated an out of order sample is a bad request to prevent retries. + // Remote Write 2.x proto message handling. + var req writev2.Request + if err := proto.Unmarshal(decompressed, &req); err != nil { + // TODO(bwplotka): Add more context to responded error? + level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return - default: - level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error()) - http.Error(w, err.Error(), http.StatusInternalServerError) + } + + respStats, errHTTPCode, err := h.writeV2(r.Context(), &req) + + // Set required X-Prometheus-Remote-Write-Written-* response headers, in all cases. + respStats.SetResponseHeaders(w.Header()) + + if err != nil { + if errHTTPCode/5 == 100 { // 5xx + level.Error(h.logger).Log("msg", "Error while remote writing the v2 request", "err", err.Error()) + } + http.Error(w, err.Error(), errHTTPCode) return } - w.WriteHeader(http.StatusNoContent) } -// checkAppendExemplarError modifies the AppendExemplar's returned error based on the error cause. -func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error { - unwrappedErr := errors.Unwrap(err) - if unwrappedErr == nil { - unwrappedErr = err - } - switch { - case errors.Is(unwrappedErr, storage.ErrNotFound): - return storage.ErrNotFound - case errors.Is(unwrappedErr, storage.ErrOutOfOrderExemplar): - *outOfOrderErrs++ - level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e)) - return nil - default: - return err - } -} - func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) { outOfOrderExemplarErrs := 0 samplesWithInvalidLabels := 0 + samplesAppended := 0 - timeLimitApp := &timeLimitAppender{ + app := &timeLimitAppender{ Appender: h.appendable.Appender(ctx), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), } defer func() { if err != nil { - _ = timeLimitApp.Rollback() + _ = app.Rollback() return } - err = timeLimitApp.Commit() + err = app.Commit() + if err != nil { + h.samplesAppendedWithoutMetadata.Add(float64(samplesAppended)) + } }() b := labels.NewScratchBuilder(0) for _, ts := range req.Timeseries { ls := ts.ToLabels(&b, nil) - if !ls.IsValid() { + if !ls.Has(labels.MetricName) || !ls.IsValid() { level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String()) samplesWithInvalidLabels++ + // TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are + // potentially written. Perhaps unify with fixed writeV2 implementation a bit. continue } - err := h.appendSamples(timeLimitApp, ts.Samples, ls) - if err != nil { + if err := h.appendV1Samples(app, ts.Samples, ls); err != nil { return err } + samplesAppended += len(ts.Samples) for _, ep := range ts.Exemplars { e := ep.ToExemplar(&b, nil) - h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs) + if _, err := app.AppendExemplar(0, ls, e); err != nil { + switch { + case errors.Is(err, storage.ErrOutOfOrderExemplar): + outOfOrderExemplarErrs++ + level.Debug(h.logger).Log("msg", "Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + default: + // Since exemplar storage is still experimental, we don't fail the request on ingestion errors + level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) + } + } } - err = h.appendHistograms(timeLimitApp, ts.Histograms, ls) - if err != nil { + if err = h.appendV1Histograms(app, ts.Histograms, ls); err != nil { return err } + samplesAppended += len(ts.Histograms) } if outOfOrderExemplarErrs > 0 { @@ -256,151 +276,216 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err if samplesWithInvalidLabels > 0 { h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) } - return nil } -func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (err error) { - outOfOrderExemplarErrs := 0 +func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error { + var ref storage.SeriesRef + var err error + for _, s := range ss { + ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue()) + if err != nil { + if errors.Is(err, storage.ErrOutOfOrderSample) || + errors.Is(err, storage.ErrOutOfBounds) || + errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { + level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) + } + return err + } + } + return nil +} - timeLimitApp := &timeLimitAppender{ +func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error { + var err error + for _, hp := range hh { + if hp.IsFloatHistogram() { + _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram()) + } else { + _, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil) + } + if err != nil { + // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is + // a note indicating its inclusion in the future. + if errors.Is(err, storage.ErrOutOfOrderSample) || + errors.Is(err, storage.ErrOutOfBounds) || + errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { + level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) + } + return err + } + } + return nil +} + +const ( + prw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Written-Samples" + rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Written-Histograms" + rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Written-Exemplars" +) + +type responseStats struct { + samples int + histograms int + exemplars int +} + +func (s responseStats) SetResponseHeaders(h http.Header) { + h.Set(prw20WrittenSamplesHeader, strconv.Itoa(s.samples)) + h.Set(rw20WrittenHistogramsHeader, strconv.Itoa(s.histograms)) + h.Set(rw20WrittenExemplarsHeader, strconv.Itoa(s.exemplars)) +} + +// writeV2 is similar to write, but it works with v2 proto message, +// allows partial 4xx writes and gathers statistics. +// +// writeV2 returns the statistics. +// In error cases, writeV2, also returns statistics, but also the error that +// should be propagated to the remote write sender and httpCode to use for status. +// +// NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors. +// Once we have 5xx type of error, we immediately stop and rollback all appends. +func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ responseStats, errHTTPCode int, _ error) { + app := &timeLimitAppender{ Appender: h.appendable.Appender(ctx), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), } - defer func() { - if err != nil { - _ = timeLimitApp.Rollback() - return + rs := responseStats{} + samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &rs) + if err != nil { + if errHTTPCode/5 == 100 { + // On 5xx, we always rollback, because we expect + // sender to retry and TSDB is not idempotent. + if rerr := app.Rollback(); rerr != nil { + level.Error(h.logger).Log("msg", "writev2 rollback failed on retry-able error", "err", rerr) + } + return responseStats{}, errHTTPCode, err } - err = timeLimitApp.Commit() - }() - b := labels.NewScratchBuilder(0) + // Non-retriable (e.g. bad request error case). Can be partially written. + commitErr := app.Commit() + if commitErr != nil { + // Bad requests does not matter as we have internal error (retryable). + return responseStats{}, http.StatusInternalServerError, commitErr + } + // Bad request error happened, but rest of data (if any) was written. + h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata)) + return rs, errHTTPCode, err + } + + // All good just commit. + if err := app.Commit(); err != nil { + return responseStats{}, http.StatusInternalServerError, err + } + h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata)) + return rs, 0, nil +} + +func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *responseStats) (samplesWithoutMetadata, errHTTPCode int, err error) { + var ( + badRequestErrs []error + outOfOrderExemplarErrs, samplesWithInvalidLabels int + + b = labels.NewScratchBuilder(0) + ) for _, ts := range req.Timeseries { ls := ts.ToLabels(&b, req.Symbols) - - err := h.appendSamplesV2(timeLimitApp, ts.Samples, ls) - if err != nil { - return err + // Validate series labels early. + // NOTE(bwplotka): While spec allows UTF-8, Prometheus Receiver may impose + // specific limits and follow https://prometheus.io/docs/specs/remote_write_spec_2_0/#invalid-samples case. + if !ls.Has(labels.MetricName) || !ls.IsValid() { + badRequestErrs = append(badRequestErrs, fmt.Errorf("invalid metric name or labels, got %v", ls.String())) + samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms) + continue } + allSamplesSoFar := rs.samples + rs.histograms + var ref storage.SeriesRef + + // Samples. + for _, s := range ts.Samples { + ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue()) + if err == nil { + rs.samples++ + continue + } + // Handle append error. + if errors.Is(err, storage.ErrOutOfOrderSample) || + errors.Is(err, storage.ErrOutOfBounds) || + errors.Is(err, storage.ErrDuplicateSampleForTimestamp) || + errors.Is(err, storage.ErrTooOldSample) { + // TODO(bwplotka): Not too spammy log? + level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp) + badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) + continue + } + return 0, http.StatusInternalServerError, err + } + + // Native Histograms. + for _, hp := range ts.Histograms { + if hp.IsFloatHistogram() { + ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, nil, hp.ToFloatHistogram()) + } else { + ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, hp.ToIntHistogram(), nil) + } + if err == nil { + rs.histograms++ + continue + } + // Handle append error. + // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is + // a note indicating its inclusion in the future. + if errors.Is(err, storage.ErrOutOfOrderSample) || + errors.Is(err, storage.ErrOutOfBounds) || + errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { + // TODO(bwplotka): Not too spammy log? + level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp) + badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) + continue + } + return 0, http.StatusInternalServerError, err + } + + // Exemplars. for _, ep := range ts.Exemplars { e := ep.ToExemplar(&b, req.Symbols) - h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs) - } - - err = h.appendHistogramsV2(timeLimitApp, ts.Histograms, ls) - if err != nil { - return err + ref, err = app.AppendExemplar(ref, ls, e) + if err == nil { + rs.exemplars++ + continue + } + // Handle append error. + // TODO(bwplotka): I left the logic as in v1, but we might want to make it consistent with samples and histograms. + // Since exemplar storage is still experimental, we don't fail in anyway, the request on ingestion errors. + if errors.Is(err, storage.ErrOutOfOrderExemplar) { + outOfOrderExemplarErrs++ + level.Debug(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + continue + } + level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) } m := ts.ToMetadata(req.Symbols) - if _, err = timeLimitApp.UpdateMetadata(0, ls, m); err != nil { + if _, err = app.UpdateMetadata(ref, ls, m); err != nil { level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err) + // Metadata is attached to each series, so since Prometheus does not reject sample without metadata information, + // we don't report remote write error either. We increment metric instead. + samplesWithoutMetadata += (rs.samples + rs.histograms) - allSamplesSoFar } } if outOfOrderExemplarErrs > 0 { - _ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) + level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) } + h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) - return nil -} - -func (h *writeHandler) appendExemplar(app storage.Appender, e exemplar.Exemplar, labels labels.Labels, outOfOrderExemplarErrs *int) { - _, err := app.AppendExemplar(0, labels, e) - err = h.checkAppendExemplarError(err, e, outOfOrderExemplarErrs) - if err != nil { - // Since exemplar storage is still experimental, we don't fail the request on ingestion errors - level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", err) + if len(badRequestErrs) == 0 { + return samplesWithoutMetadata, 0, nil } -} - -func (h *writeHandler) appendSamples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error { - var ref storage.SeriesRef - var err error - for _, s := range ss { - ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue()) - if err != nil { - unwrappedErr := errors.Unwrap(err) - if unwrappedErr == nil { - unwrappedErr = err - } - if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) { - level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) - } - return err - } - } - return nil -} - -func (h *writeHandler) appendSamplesV2(app storage.Appender, ss []writev2.Sample, labels labels.Labels) error { - var ref storage.SeriesRef - var err error - for _, s := range ss { - ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue()) - if err != nil { - unwrappedErr := errors.Unwrap(err) - if unwrappedErr == nil { - unwrappedErr = err - } - if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) { - level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) - } - return err - } - } - return nil -} - -func (h *writeHandler) appendHistograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error { - var err error - for _, hp := range hh { - if hp.IsFloatHistogram() { - _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram()) - } else { - _, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil) - } - if err != nil { - unwrappedErr := errors.Unwrap(err) - if unwrappedErr == nil { - unwrappedErr = err - } - // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is - // a note indicating its inclusion in the future. - if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) { - level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) - } - return err - } - } - return nil -} - -func (h *writeHandler) appendHistogramsV2(app storage.Appender, hh []writev2.Histogram, labels labels.Labels) error { - var err error - for _, hp := range hh { - if hp.IsFloatHistogram() { - _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram()) - } else { - _, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil) - } - if err != nil { - unwrappedErr := errors.Unwrap(err) - if unwrappedErr == nil { - unwrappedErr = err - } - // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is - // a note indicating its inclusion in the future. - if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) { - level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) - } - return err - } - } - return nil + // TODO(bwplotka): Better concat formatting? Perhaps add size limit? + return samplesWithoutMetadata, http.StatusBadRequest, errors.Join(badRequestErrs...) } // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 24bd7059a..9b5fb1a6e 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -16,6 +16,7 @@ package remote import ( "bytes" "context" + "errors" "fmt" "io" "math" @@ -27,6 +28,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/gogo/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" @@ -290,64 +292,224 @@ func TestRemoteWriteHandler_V1Message(t *testing.T) { } } +func expectHeaderValue(t testing.TB, expected int, got string) { + t.Helper() + + require.NotEmpty(t, got) + i, err := strconv.Atoi(got) + require.NoError(t, err) + require.Equal(t, expected, i) +} + func TestRemoteWriteHandler_V2Message(t *testing.T) { - payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") - require.NoError(t, err) + // V2 supports partial writes for non-retriable errors, so test them. + for _, tc := range []struct { + desc string + input []writev2.TimeSeries + expectedCode int + expectedRespBody string - req, err := http.NewRequest("", "", bytes.NewReader(payload)) - require.NoError(t, err) + commitErr error + appendSampleErr error + appendHistogramErr error + appendExemplarErr error + updateMetadataErr error + }{ + { + desc: "All timeseries accepted", + input: writeV2RequestFixture.Timeseries, + expectedCode: http.StatusNoContent, + }, + { + desc: "Partial write; first series with invalid labels (no metric name)", + input: append( + // Series with test_metric1="test_metric1" labels. + []writev2.TimeSeries{{LabelsRefs: []uint32{2, 2}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}}, + writeV2RequestFixture.Timeseries...), + expectedCode: http.StatusBadRequest, + expectedRespBody: "invalid metric name or labels, got {test_metric1=\"test_metric1\"}\n", + }, + { + desc: "Partial write; first series with invalid labels (empty metric name)", + input: append( + // Series with __name__="" labels. + []writev2.TimeSeries{{LabelsRefs: []uint32{1, 0}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}}, + writeV2RequestFixture.Timeseries...), + expectedCode: http.StatusBadRequest, + expectedRespBody: "invalid metric name or labels, got {__name__=\"\"}\n", + }, + { + desc: "Partial write; first series with one OOO sample", + input: func() []writev2.TimeSeries { + f := proto.Clone(writeV2RequestFixture).(*writev2.Request) + f.Timeseries[0].Samples = append(f.Timeseries[0].Samples, writev2.Sample{Value: 2, Timestamp: 0}) + return f.Timeseries + }(), + expectedCode: http.StatusBadRequest, + expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n", + }, + { + desc: "Partial write; first series with one dup sample", + input: func() []writev2.TimeSeries { + f := proto.Clone(writeV2RequestFixture).(*writev2.Request) + f.Timeseries[0].Samples = append(f.Timeseries[0].Samples, f.Timeseries[0].Samples[0]) + return f.Timeseries + }(), + expectedCode: http.StatusBadRequest, + expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n", + }, + { + desc: "Partial write; first series with one OOO histogram sample", + input: func() []writev2.TimeSeries { + f := proto.Clone(writeV2RequestFixture).(*writev2.Request) + f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))) + return f.Timeseries + }(), + expectedCode: http.StatusBadRequest, + expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n", + }, + { + desc: "Partial write; first series with one dup histogram sample", + input: func() []writev2.TimeSeries { + f := proto.Clone(writeV2RequestFixture).(*writev2.Request) + f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, f.Timeseries[0].Histograms[1]) + return f.Timeseries + }(), + expectedCode: http.StatusBadRequest, + expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n", + }, + // Non retriable errors from various parts. + { + desc: "Internal sample append error; rollback triggered", + input: writeV2RequestFixture.Timeseries, + appendSampleErr: errors.New("some sample internal append error"), - req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2]) - req.Header.Set("Content-Encoding", string(SnappyBlockCompression)) - req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) + expectedCode: http.StatusInternalServerError, + expectedRespBody: "some sample internal append error\n", + }, + { + desc: "Internal histogram sample append error; rollback triggered", + input: writeV2RequestFixture.Timeseries, + appendHistogramErr: errors.New("some histogram sample internal append error"), - appendable := &mockAppendable{} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) + expectedCode: http.StatusInternalServerError, + expectedRespBody: "some histogram sample internal append error\n", + }, + { + desc: "Partial write; skipped exemplar; exemplar storage errs are noop", + input: writeV2RequestFixture.Timeseries, + appendExemplarErr: errors.New("some exemplar append error"), - recorder := httptest.NewRecorder() - handler.ServeHTTP(recorder, req) + expectedCode: http.StatusNoContent, + }, + { + desc: "Partial write; skipped metadata; metadata storage errs are noop", + input: writeV2RequestFixture.Timeseries, + updateMetadataErr: errors.New("some metadata update error"), - resp := recorder.Result() - require.Equal(t, http.StatusNoContent, resp.StatusCode) + expectedCode: http.StatusNoContent, + }, + { + desc: "Internal commit error; rollback triggered", + input: writeV2RequestFixture.Timeseries, + commitErr: errors.New("storage error"), - b := labels.NewScratchBuilder(0) - i := 0 - j := 0 - k := 0 - for _, ts := range writeV2RequestFixture.Timeseries { - ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols) + expectedCode: http.StatusInternalServerError, + expectedRespBody: "storage error\n", + }, + } { + t.Run(tc.desc, func(t *testing.T) { + payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), tc.input, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") + require.NoError(t, err) - for _, s := range ts.Samples { - requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i]) + req, err := http.NewRequest("", "", bytes.NewReader(payload)) + require.NoError(t, err) - switch i { - case 0: - requireEqual(t, mockMetadata{ls, writeV2RequestSeries1Metadata}, appendable.metadata[i]) - case 1: - requireEqual(t, mockMetadata{ls, writeV2RequestSeries2Metadata}, appendable.metadata[i]) - default: - t.Fatal("more series/samples then expected") + req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2]) + req.Header.Set("Content-Encoding", string(SnappyBlockCompression)) + req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) + + appendable := &mockAppendable{ + commitErr: tc.commitErr, + appendSampleErr: tc.appendSampleErr, + appendHistogramErr: tc.appendHistogramErr, + appendExemplarErr: tc.appendExemplarErr, + updateMetadataErr: tc.updateMetadataErr, } - i++ - } - for _, e := range ts.Exemplars { - exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels - requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j]) - j++ - } - for _, hp := range ts.Histograms { - if hp.IsFloatHistogram() { - fh := hp.ToFloatHistogram() - requireEqual(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k]) + handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) + + recorder := httptest.NewRecorder() + handler.ServeHTTP(recorder, req) + + resp := recorder.Result() + require.Equal(t, tc.expectedCode, resp.StatusCode) + respBody, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, tc.expectedRespBody, string(respBody)) + + if tc.expectedCode == http.StatusInternalServerError { + // We don't expect writes for partial writes with retry-able code. + expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Samples")) + expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Histograms")) + expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars")) + + require.Empty(t, len(appendable.samples)) + require.Empty(t, len(appendable.histograms)) + require.Empty(t, len(appendable.exemplars)) + require.Empty(t, len(appendable.metadata)) + return + } + + // Double check mandatory 2.0 stats. + // writeV2RequestFixture has 2 series with 1 sample, 2 histograms, 1 exemplar each. + expectHeaderValue(t, 2, resp.Header.Get("X-Prometheus-Remote-Write-Written-Samples")) + expectHeaderValue(t, 4, resp.Header.Get("X-Prometheus-Remote-Write-Written-Histograms")) + if tc.appendExemplarErr != nil { + expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars")) } else { - h := hp.ToIntHistogram() - requireEqual(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k]) + expectHeaderValue(t, 2, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars")) } - k++ - } + + // Double check what was actually appended. + var ( + b = labels.NewScratchBuilder(0) + i, j, k, m int + ) + for _, ts := range writeV2RequestFixture.Timeseries { + ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols) + + for _, s := range ts.Samples { + requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i]) + i++ + } + for _, hp := range ts.Histograms { + if hp.IsFloatHistogram() { + fh := hp.ToFloatHistogram() + requireEqual(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k]) + } else { + h := hp.ToIntHistogram() + requireEqual(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k]) + } + k++ + } + if tc.appendExemplarErr == nil { + for _, e := range ts.Exemplars { + exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels + requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j]) + j++ + } + } + if tc.updateMetadataErr == nil { + expectedMeta := ts.ToMetadata(writeV2RequestFixture.Symbols) + requireEqual(t, mockMetadata{ls, expectedMeta}, appendable.metadata[m]) + m++ + } + } + }) } } +// NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message. func TestOutOfOrderSample_V1Message(t *testing.T) { for _, tc := range []struct { Name string @@ -372,7 +534,7 @@ func TestOutOfOrderSample_V1Message(t *testing.T) { req, err := http.NewRequest("", "", bytes.NewReader(payload)) require.NoError(t, err) - appendable := &mockAppendable{latestSample: 100} + appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}} handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() @@ -384,49 +546,10 @@ func TestOutOfOrderSample_V1Message(t *testing.T) { } } -func TestOutOfOrderSample_V2Message(t *testing.T) { - for _, tc := range []struct { - Name string - Timestamp int64 - }{ - { - Name: "historic", - Timestamp: 0, - }, - { - Name: "future", - Timestamp: math.MaxInt64, - }, - } { - t.Run(tc.Name, func(t *testing.T) { - payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{ - LabelsRefs: []uint32{1, 2}, - Samples: []writev2.Sample{{Value: 1, Timestamp: tc.Timestamp}}, - }}, []string{"", "__name__", "metric1"}, nil, nil, nil, "snappy") - require.NoError(t, err) - - req, err := http.NewRequest("", "", bytes.NewReader(payload)) - require.NoError(t, err) - - req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2]) - req.Header.Set("Content-Encoding", string(SnappyBlockCompression)) - req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) - - appendable := &mockAppendable{latestSample: 100} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) - - recorder := httptest.NewRecorder() - handler.ServeHTTP(recorder, req) - - resp := recorder.Result() - require.Equal(t, http.StatusBadRequest, resp.StatusCode) - }) - } -} - // This test case currently aims to verify that the WriteHandler endpoint // don't fail on exemplar ingestion errors since the exemplar storage is // still experimental. +// NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message. func TestOutOfOrderExemplar_V1Message(t *testing.T) { tests := []struct { Name string @@ -453,7 +576,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) { req, err := http.NewRequest("", "", bytes.NewReader(payload)) require.NoError(t, err) - appendable := &mockAppendable{latestExemplar: 100} + appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}} handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() @@ -466,49 +589,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) { } } -func TestOutOfOrderExemplar_V2Message(t *testing.T) { - tests := []struct { - Name string - Timestamp int64 - }{ - { - Name: "historic", - Timestamp: 0, - }, - { - Name: "future", - Timestamp: math.MaxInt64, - }, - } - - for _, tc := range tests { - t.Run(tc.Name, func(t *testing.T) { - payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{ - LabelsRefs: []uint32{1, 2}, - Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{3, 4}, Value: 1, Timestamp: tc.Timestamp}}, - }}, []string{"", "__name__", "metric1", "foo", "bar"}, nil, nil, nil, "snappy") - require.NoError(t, err) - - req, err := http.NewRequest("", "", bytes.NewReader(payload)) - require.NoError(t, err) - - req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2]) - req.Header.Set("Content-Encoding", string(SnappyBlockCompression)) - req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) - - appendable := &mockAppendable{latestExemplar: 100} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) - - recorder := httptest.NewRecorder() - handler.ServeHTTP(recorder, req) - - resp := recorder.Result() - // TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental. - require.Equal(t, http.StatusNoContent, resp.StatusCode) - }) - } -} - +// NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message. func TestOutOfOrderHistogram_V1Message(t *testing.T) { for _, tc := range []struct { Name string @@ -533,7 +614,7 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) { req, err := http.NewRequest("", "", bytes.NewReader(payload)) require.NoError(t, err) - appendable := &mockAppendable{latestHistogram: 100} + appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}} handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) recorder := httptest.NewRecorder() @@ -545,46 +626,6 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) { } } -func TestOutOfOrderHistogram_V2Message(t *testing.T) { - for _, tc := range []struct { - Name string - Timestamp int64 - }{ - { - Name: "historic", - Timestamp: 0, - }, - { - Name: "future", - Timestamp: math.MaxInt64, - }, - } { - t.Run(tc.Name, func(t *testing.T) { - payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{ - LabelsRefs: []uint32{0, 1}, - Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))}, - }}, []string{"__name__", "metric1"}, nil, nil, nil, "snappy") - require.NoError(t, err) - - req, err := http.NewRequest("", "", bytes.NewReader(payload)) - require.NoError(t, err) - - req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2]) - req.Header.Set("Content-Encoding", string(SnappyBlockCompression)) - req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) - - appendable := &mockAppendable{latestHistogram: 100} - handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) - - recorder := httptest.NewRecorder() - handler.ServeHTTP(recorder, req) - - resp := recorder.Result() - require.Equal(t, http.StatusBadRequest, resp.StatusCode) - }) - } -} - func BenchmarkRemoteWriteHandler(b *testing.B) { const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte" var reqs []*http.Request @@ -719,15 +760,20 @@ func genSeriesWithSample(numSeries int, ts int64) []prompb.TimeSeries { } type mockAppendable struct { - latestSample int64 + latestSample map[uint64]int64 samples []mockSample - latestExemplar int64 + latestExemplar map[uint64]int64 exemplars []mockExemplar - latestHistogram int64 + latestHistogram map[uint64]int64 histograms []mockHistogram metadata []mockMetadata - commitErr error + // optional errors to inject. + commitErr error + appendSampleErr error + appendHistogramErr error + appendExemplarErr error + updateMetadataErr error } type mockSample struct { @@ -765,48 +811,92 @@ func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...inte } func (m *mockAppendable) Appender(_ context.Context) storage.Appender { + if m.latestSample == nil { + m.latestSample = map[uint64]int64{} + } + if m.latestHistogram == nil { + m.latestHistogram = map[uint64]int64{} + } + if m.latestExemplar == nil { + m.latestExemplar = map[uint64]int64{} + } return m } func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - if t < m.latestSample { - return 0, storage.ErrOutOfOrderSample + if m.appendSampleErr != nil { + return 0, m.appendSampleErr } - m.latestSample = t + latestTs := m.latestSample[l.Hash()] + if t < latestTs { + return 0, storage.ErrOutOfOrderSample + } + if t == latestTs { + return 0, storage.ErrDuplicateSampleForTimestamp + } + + m.latestSample[l.Hash()] = t m.samples = append(m.samples, mockSample{l, t, v}) return 0, nil } func (m *mockAppendable) Commit() error { + if m.commitErr != nil { + _ = m.Rollback() // As per Commit method contract. + } return m.commitErr } -func (*mockAppendable) Rollback() error { - return fmt.Errorf("not implemented") +func (m *mockAppendable) Rollback() error { + m.samples = m.samples[:0] + m.exemplars = m.exemplars[:0] + m.histograms = m.histograms[:0] + m.metadata = m.metadata[:0] + return nil } func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { - if e.Ts < m.latestExemplar { - return 0, storage.ErrOutOfOrderExemplar + if m.appendExemplarErr != nil { + return 0, m.appendExemplarErr } - m.latestExemplar = e.Ts + latestTs := m.latestExemplar[l.Hash()] + if e.Ts < latestTs { + return 0, storage.ErrOutOfOrderExemplar + } + if e.Ts == latestTs { + return 0, storage.ErrDuplicateExemplar + } + + m.latestExemplar[l.Hash()] = e.Ts m.exemplars = append(m.exemplars, mockExemplar{l, e.Labels, e.Ts, e.Value}) return 0, nil } func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { - if t < m.latestHistogram { - return 0, storage.ErrOutOfOrderSample + if m.appendHistogramErr != nil { + return 0, m.appendHistogramErr } - m.latestHistogram = t + latestTs := m.latestHistogram[l.Hash()] + if t < latestTs { + return 0, storage.ErrOutOfOrderSample + } + if t == latestTs { + return 0, storage.ErrDuplicateSampleForTimestamp + } + + m.latestHistogram[l.Hash()] = t m.histograms = append(m.histograms, mockHistogram{l, t, h, fh}) return 0, nil } func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) { + if m.updateMetadataErr != nil { + return 0, m.updateMetadataErr + } + m.metadata = append(m.metadata, mockMetadata{l: l, m: mp}) return 0, nil } diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go index 648ec4b17..6e7422a58 100644 --- a/storage/remote/write_test.go +++ b/storage/remote/write_test.go @@ -369,7 +369,7 @@ func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) { } func TestOTLPWriteHandler(t *testing.T) { - exportRequest := generateOTLPWriteRequest(t) + exportRequest := generateOTLPWriteRequest() buf, err := exportRequest.MarshalProto() require.NoError(t, err) @@ -392,7 +392,7 @@ func TestOTLPWriteHandler(t *testing.T) { require.Len(t, appendable.exemplars, 1) // 1 (exemplar) } -func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest { +func generateOTLPWriteRequest() pmetricotlp.ExportRequest { d := pmetric.NewMetrics() // Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram @@ -422,6 +422,7 @@ func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest { counterDataPoint.Attributes().PutStr("foo.bar", "baz") counterExemplar := counterDataPoint.Exemplars().AppendEmpty() + counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) counterExemplar.SetDoubleValue(10.0) counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7}) From 506729480e970dfabd1ff1484127e49b4fa671e7 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Sun, 14 Jul 2024 14:02:13 +0200 Subject: [PATCH 24/81] golangci-lint: Don't use deprecated config params Replace deprecated golangci-lint configuration parameters with current equivalents: * `run.skip-files` => `issues.exclude-files` * `run.skip-dirs` => `issues.exclude-dirs` Signed-off-by: Arve Knudsen --- .golangci.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 026d68a31..e924fe3d5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,12 +1,5 @@ run: timeout: 15m - skip-files: - # Skip autogenerated files. - - ^.*\.(pb|y)\.go$ - skip-dirs: - # Copied it from a different source - - storage/remote/otlptranslator/prometheusremotewrite - - storage/remote/otlptranslator/prometheus output: sort-results: true @@ -33,6 +26,13 @@ linters: issues: max-same-issues: 0 + exclude-files: + # Skip autogenerated files. + - ^.*\.(pb|y)\.go$ + exclude-dirs: + # Copied it from a different source + - storage/remote/otlptranslator/prometheusremotewrite + - storage/remote/otlptranslator/prometheus exclude-rules: - linters: - gocritic From c458cb73a4925cfcc3ea184536f30cb0e49ba60c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 14 Jul 2024 16:12:25 +0000 Subject: [PATCH 25/81] build(deps-dev): bump ts-jest from 29.2.1 to 29.2.2 in /web/ui Bumps [ts-jest](https://github.com/kulshekhar/ts-jest) from 29.2.1 to 29.2.2. - [Release notes](https://github.com/kulshekhar/ts-jest/releases) - [Changelog](https://github.com/kulshekhar/ts-jest/blob/main/CHANGELOG.md) - [Commits](https://github.com/kulshekhar/ts-jest/compare/v29.2.1...v29.2.2) --- updated-dependencies: - dependency-name: ts-jest dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- web/ui/package-lock.json | 8 ++++---- web/ui/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 50ca78b9e..3c4511eba 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -21,7 +21,7 @@ "jest-fetch-mock": "^3.0.3", "prettier": "^2.8.8", "react-scripts": "^5.0.1", - "ts-jest": "^29.2.1", + "ts-jest": "^29.2.2", "typescript": "^4.9.5" }, "engines": { @@ -18028,9 +18028,9 @@ "license": "MIT" }, "node_modules/ts-jest": { - "version": "29.2.1", - "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.2.1.tgz", - "integrity": "sha512-7obwtH5gw0b0XZi0wmprCSvGSvHliMBI47lPnU47vmbxWS6B+v1X94yWFo1f1vt9k/he+gttsrXjkxmgY41XNQ==", + "version": "29.2.2", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.2.2.tgz", + "integrity": "sha512-sSW7OooaKT34AAngP6k1VS669a0HdLxkQZnlC7T76sckGCokXFnvJ3yRlQZGRTAoV5K19HfSgCiSwWOSIfcYlg==", "dev": true, "dependencies": { "bs-logger": "0.x", diff --git a/web/ui/package.json b/web/ui/package.json index 87cdd920d..92bfbea5e 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -25,7 +25,7 @@ "jest-fetch-mock": "^3.0.3", "prettier": "^2.8.8", "react-scripts": "^5.0.1", - "ts-jest": "^29.2.1", + "ts-jest": "^29.2.2", "typescript": "^4.9.5" }, "version": "0.53.1" From 18a40e31d489648f53a21ea675fa327f26f4a07b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 23:18:46 +0000 Subject: [PATCH 26/81] build(deps): bump the go-opentelemetry-io group across 1 directory with 2 updates Bumps the go-opentelemetry-io group with 2 updates in the / directory: [go.opentelemetry.io/collector/pdata](https://github.com/open-telemetry/opentelemetry-collector) and [go.opentelemetry.io/collector/semconv](https://github.com/open-telemetry/opentelemetry-collector). Updates `go.opentelemetry.io/collector/pdata` from 1.8.0 to 1.11.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/pdata/v1.8.0...pdata/v1.11.0) Updates `go.opentelemetry.io/collector/semconv` from 0.101.0 to 0.104.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-collector/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-collector/blob/main/CHANGELOG-API.md) - [Commits](https://github.com/open-telemetry/opentelemetry-collector/compare/v0.101.0...v0.104.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/collector/pdata dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io - dependency-name: go.opentelemetry.io/collector/semconv dependency-type: direct:production update-type: version-update:semver-minor dependency-group: go-opentelemetry-io ... Signed-off-by: dependabot[bot] Signed-off-by: Arve Knudsen --- documentation/examples/remote_storage/go.mod | 2 +- go.mod | 10 ++++++---- go.sum | 16 ++++++++-------- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 2491bbe2d..348b27dfc 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -1,6 +1,6 @@ module github.com/prometheus/prometheus/documentation/examples/remote_storage -go 1.21 +go 1.21.0 require ( github.com/alecthomas/kingpin/v2 v2.4.0 diff --git a/go.mod b/go.mod index ce2f0714a..4107f3a09 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/prometheus/prometheus -go 1.21 +go 1.21.0 + +toolchain go1.22.5 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 @@ -60,8 +62,8 @@ require ( github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.8.0 - go.opentelemetry.io/collector/semconv v0.101.0 + go.opentelemetry.io/collector/pdata v1.11.0 + go.opentelemetry.io/collector/semconv v0.104.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 go.opentelemetry.io/otel v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 @@ -83,7 +85,7 @@ require ( google.golang.org/api v0.183.0 google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 google.golang.org/grpc v1.64.0 - google.golang.org/protobuf v1.34.1 + google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.29.3 diff --git a/go.sum b/go.sum index 956b9d894..dc4a7ecfd 100644 --- a/go.sum +++ b/go.sum @@ -396,8 +396,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -723,10 +723,10 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= -go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= -go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= -go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= +go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= +go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= +go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= +go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= @@ -1119,8 +1119,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 5c2590c358311c22aa995fd524e3b5d23f51f71d Mon Sep 17 00:00:00 2001 From: B1F030 Date: Mon, 15 Jul 2024 10:54:54 +0800 Subject: [PATCH 27/81] revert main.go Signed-off-by: B1F030 --- cmd/prometheus/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index 787a28992..1d844ddba 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -343,7 +343,7 @@ func main() { a.Flag("web.page-title", "Document title of Prometheus instance."). Default("Prometheus Time Series Collection and Processing Server").StringVar(&cfg.web.PageTitle) - a.Flag("web.cors.origin", `Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com'`). + a.Flag("web.cors.origin", `Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com'`). Default(".*").StringVar(&cfg.corsRegexString) serverOnlyFlag(a, "storage.tsdb.path", "Base path for metrics storage."). From 15ac95ddd07b607914fb7d04c26f2231aadf25b5 Mon Sep 17 00:00:00 2001 From: B1F030 Date: Mon, 15 Jul 2024 11:33:58 +0800 Subject: [PATCH 28/81] change documentcli Signed-off-by: B1F030 --- util/documentcli/documentcli.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/documentcli/documentcli.go b/util/documentcli/documentcli.go index 720a7c9c7..b3f143cf6 100644 --- a/util/documentcli/documentcli.go +++ b/util/documentcli/documentcli.go @@ -75,7 +75,7 @@ func createFlagRow(flag *kingpin.FlagModel) []string { name = fmt.Sprintf(`-%c, --%s`, flag.Short, flag.Name) } - return []string{name, flag.Help, defaultVal} + return []string{name, strings.ReplaceAll(flag.Help, "|", "\|"), defaultVal} } func writeFlagTable(writer io.Writer, level int, fgm *kingpin.FlagGroupModel) error { From 3648386345f76f7fc00b3d0dd68b2e0f8e9b4a42 Mon Sep 17 00:00:00 2001 From: B1F030 Date: Mon, 15 Jul 2024 11:59:58 +0800 Subject: [PATCH 29/81] fix fmt Signed-off-by: B1F030 --- util/documentcli/documentcli.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/documentcli/documentcli.go b/util/documentcli/documentcli.go index b3f143cf6..9de2bb8d4 100644 --- a/util/documentcli/documentcli.go +++ b/util/documentcli/documentcli.go @@ -75,7 +75,7 @@ func createFlagRow(flag *kingpin.FlagModel) []string { name = fmt.Sprintf(`-%c, --%s`, flag.Short, flag.Name) } - return []string{name, strings.ReplaceAll(flag.Help, "|", "\|"), defaultVal} + return []string{name, strings.ReplaceAll(flag.Help, "|", `\|`), defaultVal} } func writeFlagTable(writer io.Writer, level int, fgm *kingpin.FlagGroupModel) error { From 2ca4d162360d9aae5f3ad56fef3fbb3b93ec4aa4 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Mon, 15 Jul 2024 14:09:02 +1000 Subject: [PATCH 30/81] storage: add `AtT` method to `MemoizedSeriesIterator` Signed-off-by: Charles Korn --- storage/memoized_iterator.go | 5 +++++ storage/memoized_iterator_test.go | 10 ++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/storage/memoized_iterator.go b/storage/memoized_iterator.go index 4ab2aa5d7..273b3caa1 100644 --- a/storage/memoized_iterator.go +++ b/storage/memoized_iterator.go @@ -136,6 +136,11 @@ func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHist return b.it.AtFloatHistogram(nil) } +// AtT returns the timestamp of the current element of the iterator. +func (b *MemoizedSeriesIterator) AtT() int64 { + return b.it.AtT() +} + // Err returns the last encountered error. func (b *MemoizedSeriesIterator) Err() error { return b.it.Err() diff --git a/storage/memoized_iterator_test.go b/storage/memoized_iterator_test.go index d1cd56517..81e517f96 100644 --- a/storage/memoized_iterator_test.go +++ b/storage/memoized_iterator_test.go @@ -29,13 +29,15 @@ func TestMemoizedSeriesIterator(t *testing.T) { sampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram) { if efh == nil { ts, v := it.At() - require.Equal(t, ets, ts, "timestamp mismatch") - require.Equal(t, ev, v, "value mismatch") + require.Equal(t, ets, ts, "At() timestamp mismatch") + require.Equal(t, ev, v, "At() value mismatch") } else { ts, fh := it.AtFloatHistogram() - require.Equal(t, ets, ts, "timestamp mismatch") - require.Equal(t, efh, fh, "histogram mismatch") + require.Equal(t, ets, ts, "AtFloatHistogram() timestamp mismatch") + require.Equal(t, efh, fh, "AtFloatHistogram() histogram mismatch") } + + require.Equal(t, ets, it.AtT(), "AtT() timestamp mismatch") } prevSampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram, eok bool) { ts, v, fh, ok := it.PeekPrev() From c0f1b452bcde0efc32b32141aad61b8de3332252 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 09:52:55 +0200 Subject: [PATCH 31/81] build(deps): bump @codemirror/view from 6.27.0 to 6.28.3 in /web/ui (#14379) Bumps [@codemirror/view](https://github.com/codemirror/view) from 6.27.0 to 6.28.3. - [Changelog](https://github.com/codemirror/view/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/view/compare/6.27.0...6.28.3) --- updated-dependencies: - dependency-name: "@codemirror/view" dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 10 +++++----- web/ui/react-app/package.json | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 556117ca8..05df82a51 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -37,7 +37,7 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.0", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.22.1", + "@codemirror/view": "^6.28.3", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index af4a8a5d3..6e9d8a969 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -41,7 +41,7 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.0", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.22.1", + "@codemirror/view": "^6.28.3", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.1", @@ -2093,9 +2093,9 @@ "integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A==" }, "node_modules/@codemirror/view": { - "version": "6.27.0", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.27.0.tgz", - "integrity": "sha512-8kqX1sHbVW1lVzWwrjAbh4dR7eKhV8eIQ952JKaBXOoXE04WncoqCy4DMU701LSrPZ3N2Q4zsTawz7GQ+2mrUw==", + "version": "6.28.3", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.28.3.tgz", + "integrity": "sha512-QVqP+ko078/h9yrW+u5grX3rQhC+BkGKADRrlDaJznfPngJOv5zObiVf0+SgAWhL/Yt0nvZ+10rO3L+gU5IbFw==", "dependencies": { "@codemirror/state": "^6.4.0", "style-mod": "^4.1.0", @@ -19340,7 +19340,7 @@ "@codemirror/lint": "^6.8.0", "@codemirror/search": "^6.5.6", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.22.1", + "@codemirror/view": "^6.28.3", "@forevolve/bootstrap-dark": "^4.0.2", "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index e9bfb8655..27d0cee76 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -9,7 +9,7 @@ "@codemirror/lint": "^6.8.0", "@codemirror/search": "^6.5.6", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.22.1", + "@codemirror/view": "^6.28.3", "@forevolve/bootstrap-dark": "^4.0.2", "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", From d84282b105e40a6f5f1e3263ae6a7d735155df76 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 15 Jul 2024 09:47:16 +0100 Subject: [PATCH 32/81] Labels: use single byte as separator - small speedup Since `seps` is a variable, `seps[0]` has to be bounds-checked every time. Replacing with a constant everywhere it is used skips this overhead. Signed-off-by: Bryan Boreham --- model/labels/labels.go | 24 ++++++++++++------------ model/labels/labels_common.go | 5 +++-- model/labels/labels_dedupelabels.go | 24 ++++++++++++------------ model/labels/labels_stringlabels.go | 8 ++++---- model/labels/sharding.go | 4 ++-- model/labels/sharding_dedupelabels.go | 4 ++-- model/labels/sharding_stringlabels.go | 4 ++-- 7 files changed, 37 insertions(+), 36 deletions(-) diff --git a/model/labels/labels.go b/model/labels/labels.go index 01514abf3..cd30f4f8f 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -38,10 +38,10 @@ func (ls Labels) Bytes(buf []byte) []byte { b.WriteByte(labelSep) for i, l := range ls { if i > 0 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(l.Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(l.Value) } return b.Bytes() @@ -86,9 +86,9 @@ func (ls Labels) Hash() uint64 { } b = append(b, v.Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, v.Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b) } @@ -106,9 +106,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { i++ default: b = append(b, ls[i].Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, ls[i].Value...) - b = append(b, seps[0]) + b = append(b, sep) i++ j++ } @@ -130,9 +130,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, ls[i].Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, ls[i].Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } @@ -151,10 +151,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { i++ default: if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(ls[i].Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(ls[i].Value) i++ j++ @@ -177,10 +177,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { continue } if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(ls[i].Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(ls[i].Value) } return b.Bytes() diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go index 4bc94f84f..6db86b03c 100644 --- a/model/labels/labels_common.go +++ b/model/labels/labels_common.go @@ -29,10 +29,11 @@ const ( BucketLabel = "le" InstanceName = "instance" - labelSep = '\xfe' + labelSep = '\xfe' // Used at beginning of `Bytes` return. + sep = '\xff' // Used between labels in `Bytes` and `Hash`. ) -var seps = []byte{'\xff'} +var seps = []byte{sep} // Used with Hash, which has no WriteByte method. // Label is a key/value pair of strings. type Label struct { diff --git a/model/labels/labels_dedupelabels.go b/model/labels/labels_dedupelabels.go index 0e5bb048b..da8a88cc1 100644 --- a/model/labels/labels_dedupelabels.go +++ b/model/labels/labels_dedupelabels.go @@ -146,13 +146,13 @@ func (ls Labels) Bytes(buf []byte) []byte { b := bytes.NewBuffer(buf[:0]) for i := 0; i < len(ls.data); { if i > 0 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } var name, value string name, i = decodeString(ls.syms, ls.data, i) value, i = decodeString(ls.syms, ls.data, i) b.WriteString(name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(value) } return b.Bytes() @@ -201,9 +201,9 @@ func (ls Labels) Hash() uint64 { } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) pos = newPos } return xxhash.Sum64(b) @@ -226,9 +226,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { } if name == names[j] { b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } } @@ -252,9 +252,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } @@ -275,10 +275,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { } if lName == names[j] { if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(lName) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(lValue) } pos = newPos @@ -299,10 +299,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { } if j == len(names) || lName != names[j] { if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(lName) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(lValue) } pos = newPos diff --git a/model/labels/labels_stringlabels.go b/model/labels/labels_stringlabels.go index bccceb61f..c8bce5123 100644 --- a/model/labels/labels_stringlabels.go +++ b/model/labels/labels_stringlabels.go @@ -112,9 +112,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { } if name == names[j] { b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } } @@ -138,9 +138,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } diff --git a/model/labels/sharding.go b/model/labels/sharding.go index 5e3e89fbb..8b3a36939 100644 --- a/model/labels/sharding.go +++ b/model/labels/sharding.go @@ -39,9 +39,9 @@ func StableHash(ls Labels) uint64 { } b = append(b, v.Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, v.Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b) } diff --git a/model/labels/sharding_dedupelabels.go b/model/labels/sharding_dedupelabels.go index 5912724f9..5bf41b05d 100644 --- a/model/labels/sharding_dedupelabels.go +++ b/model/labels/sharding_dedupelabels.go @@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 { } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) pos = newPos } return xxhash.Sum64(b) diff --git a/model/labels/sharding_stringlabels.go b/model/labels/sharding_stringlabels.go index 3ad2027d8..798f268eb 100644 --- a/model/labels/sharding_stringlabels.go +++ b/model/labels/sharding_stringlabels.go @@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 { } b = append(b, v.Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, v.Value...) - b = append(b, seps[0]) + b = append(b, sep) } if h != nil { return h.Sum64() From c6c240d4ce4529e08aa48b21416b7e48a5da4351 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 11:42:41 +0200 Subject: [PATCH 33/81] build(deps): bump @codemirror/lint from 6.8.0 to 6.8.1 in /web/ui (#14383) Bumps [@codemirror/lint](https://github.com/codemirror/lint) from 6.8.0 to 6.8.1. - [Changelog](https://github.com/codemirror/lint/blob/main/CHANGELOG.md) - [Commits](https://github.com/codemirror/lint/compare/6.8.0...6.8.1) --- updated-dependencies: - dependency-name: "@codemirror/lint" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/module/codemirror-promql/package.json | 2 +- web/ui/package-lock.json | 10 +++++----- web/ui/react-app/package.json | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 05df82a51..ba924346f 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -35,7 +35,7 @@ "devDependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/language": "^6.10.2", - "@codemirror/lint": "^6.8.0", + "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.28.3", "@lezer/common": "^1.2.1", diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 6e9d8a969..62ac34e43 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -39,7 +39,7 @@ "devDependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/language": "^6.10.2", - "@codemirror/lint": "^6.8.0", + "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.28.3", "@lezer/common": "^1.2.1", @@ -2068,9 +2068,9 @@ } }, "node_modules/@codemirror/lint": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.0.tgz", - "integrity": "sha512-lsFofvaw0lnPRJlQylNsC4IRt/1lI4OD/yYslrSGVndOJfStc58v+8p9dgGiD90ktOfL7OhBWns1ZETYgz0EJA==", + "version": "6.8.1", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.1.tgz", + "integrity": "sha512-IZ0Y7S4/bpaunwggW2jYqwLuHj0QtESf5xcROewY6+lDNwZ/NzvR4t+vpYgg9m7V8UXLPYqG+lu3DF470E5Oxg==", "dependencies": { "@codemirror/state": "^6.0.0", "@codemirror/view": "^6.0.0", @@ -19337,7 +19337,7 @@ "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", "@codemirror/language": "^6.10.2", - "@codemirror/lint": "^6.8.0", + "@codemirror/lint": "^6.8.1", "@codemirror/search": "^6.5.6", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.28.3", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 27d0cee76..c8002433a 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -6,7 +6,7 @@ "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", "@codemirror/language": "^6.10.2", - "@codemirror/lint": "^6.8.0", + "@codemirror/lint": "^6.8.1", "@codemirror/search": "^6.5.6", "@codemirror/state": "^6.3.3", "@codemirror/view": "^6.28.3", From d878146c70f5b21d3f6d161d8586828099597ac4 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 15 Jul 2024 16:07:42 +0100 Subject: [PATCH 34/81] TSDB: shrink memSeries by moving bools together In each case the following member requires 8-byte alignment, so moving one beside the other shrinks memSeries from 176 to 168 bytes, when compiled with `-tags stringlabels`. Signed-off-by: Bryan Boreham --- tsdb/head.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index 30ad8139a..b7bfaa0fd 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -2099,6 +2099,7 @@ type memSeries struct { nextAt int64 // Timestamp at which to cut the next chunk. histogramChunkHasComputedEndTime bool // True if nextAt has been predicted for the current histograms chunk; false otherwise. + pendingCommit bool // Whether there are samples waiting to be committed to this series. // We keep the last value here (in addition to appending it to the chunk) so we can check for duplicates. lastValue float64 @@ -2114,8 +2115,6 @@ type memSeries struct { // txs is nil if isolation is disabled. txs *txRing - - pendingCommit bool // Whether there are samples waiting to be committed to this series. } // memSeriesOOOFields contains the fields required by memSeries From f031fe1fa32079b6eb339c8e116e7669386b707a Mon Sep 17 00:00:00 2001 From: Sean Killeen Date: Mon, 15 Jul 2024 12:30:16 -0400 Subject: [PATCH 35/81] Update storage.md to provide right-sizing advice on retention Signed-off-by: Sean Killeen --- docs/storage.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/storage.md b/docs/storage.md index 947960fe1..55d4309d3 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -137,6 +137,18 @@ will be used. Expired block cleanup happens in the background. It may take up to two hours to remove expired blocks. Blocks must be fully expired before they are removed. +## Right-Sizing Retention Size + +If you are utilizing `storage.tsdb.retention.size` to set a size limit, you +will want to consider the right size for this value relative to the storage you +have allocated for Prometheus. It is wise to reduce the retention size to provide +a buffer, ensuring that older entries will be removed before the allocated storage +for Prometheus becomes full. + +At present, we recommend setting the retention size to, at most, 80-85% of your +allocated Prometheus disk space. This increases the likelihood that older entires +will be removed prior to hitting any disk limitations. + ## Remote storage integrations Prometheus's local storage is limited to a single node's scalability and durability. From a25b626792d36e82fe95b05cb3e41f896c4ad2ac Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Mon, 3 Jun 2024 18:02:26 +0200 Subject: [PATCH 36/81] prometheusremotewrite: Support resource attribute promotion Signed-off-by: Arve Knudsen --- config/config.go | 16 ++++++++ docs/configuration/configuration.md | 4 ++ .../prometheusremotewrite/helper.go | 38 +++++++++++++------ .../prometheusremotewrite/histograms.go | 2 +- .../prometheusremotewrite/metrics_to_prw.go | 13 ++++--- .../number_data_points.go | 4 +- storage/remote/write_handler.go | 17 ++++++--- storage/remote/write_test.go | 6 ++- web/api/v1/api.go | 2 +- web/api/v1/api_test.go | 1 + 10 files changed, 75 insertions(+), 28 deletions(-) diff --git a/config/config.go b/config/config.go index c924e3098..0880d518d 100644 --- a/config/config.go +++ b/config/config.go @@ -227,6 +227,9 @@ var ( DefaultExemplarsConfig = ExemplarsConfig{ MaxExemplars: 100000, } + + // DefaultOTLPConfig is the default OTLP configuration. + DefaultOTLPConfig = OTLPConfig{} ) // Config is the top-level configuration for Prometheus's config files. @@ -242,6 +245,7 @@ type Config struct { RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` + OTLPConfig OTLPConfig `yaml:"otlp,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -1304,3 +1308,15 @@ func getGoGCEnv() int { } return DefaultRuntimeConfig.GoGC } + +// OTLPConfig is the configuration for writing to the OTLP endpoint. +type OTLPConfig struct { + PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultOTLPConfig + type plain OTLPConfig + return unmarshal((*plain)(c)) +} diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 35976871b..5675210b7 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -152,6 +152,10 @@ alerting: remote_write: [ - ... ] +# Settings related to the OTLP receiver feature. +otlp: + [ promote_resource_attributes: [, ...] | default = [ ] ] + # Settings related to the remote read feature. remote_read: [ - ... ] diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 257133853..f2d7ecd4e 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -65,14 +65,14 @@ type bucketBoundsData struct { bound float64 } -// byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds +// byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds. type byBucketBoundsData []bucketBoundsData func (m byBucketBoundsData) Len() int { return len(m) } func (m byBucketBoundsData) Less(i, j int) bool { return m[i].bound < m[j].bound } func (m byBucketBoundsData) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -// ByLabelName enables the usage of sort.Sort() with a slice of labels +// ByLabelName enables the usage of sort.Sort() with a slice of labels. type ByLabelName []prompb.Label func (a ByLabelName) Len() int { return len(a) } @@ -115,14 +115,23 @@ var seps = []byte{'\xff'} // createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values. // Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and // if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized. -func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, +// If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels. +func createAttributes(resource pcommon.Resource, attributes pcommon.Map, settings Settings, ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label { resourceAttrs := resource.Attributes() serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName) instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID) + promotedAttrs := make([]prompb.Label, 0, len(settings.PromoteResourceAttributes)) + for _, name := range settings.PromoteResourceAttributes { + if value, exists := resourceAttrs.Get(name); exists { + promotedAttrs = append(promotedAttrs, prompb.Label{Name: name, Value: value.AsString()}) + } + } + sort.Stable(ByLabelName(promotedAttrs)) + // Calculate the maximum possible number of labels we could return so we can preallocate l - maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2 + maxLabelCount := attributes.Len() + len(settings.ExternalLabels) + len(promotedAttrs) + len(extras)/2 if haveServiceName { maxLabelCount++ @@ -132,9 +141,6 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa maxLabelCount++ } - // map ensures no duplicate label name - l := make(map[string]string, maxLabelCount) - // Ensure attributes are sorted by key for consistent merging of keys which // collide when sanitized. labels := make([]prompb.Label, 0, maxLabelCount) @@ -148,6 +154,8 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa }) sort.Stable(ByLabelName(labels)) + // map ensures no duplicate label names. + l := make(map[string]string, maxLabelCount) for _, label := range labels { var finalKey = prometheustranslator.NormalizeLabel(label.Name) if existingValue, alreadyExists := l[finalKey]; alreadyExists { @@ -157,6 +165,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa } } + for _, lbl := range promotedAttrs { + normalized := prometheustranslator.NormalizeLabel(lbl.Name) + if _, exists := l[normalized]; !exists { + l[normalized] = lbl.Value + } + } + // Map service.name + service.namespace to job if haveServiceName { val := serviceName.AsString() @@ -169,7 +184,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa if haveInstanceID { l[model.InstanceLabel] = instance.AsString() } - for key, value := range externalLabels { + for key, value := range settings.ExternalLabels { // External labels have already been sanitized if _, alreadyExists := l[key]; alreadyExists { // Skip external labels if they are overridden by metric attributes @@ -232,7 +247,7 @@ func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.Histogra for x := 0; x < dataPoints.Len(); x++ { pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) - baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false) + baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false) // If the sum is unset, it indicates the _sum metric point should be // omitted @@ -408,7 +423,7 @@ func (c *PrometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDat for x := 0; x < dataPoints.Len(); x++ { pt := dataPoints.At(x) timestamp := convertTimeStamp(pt.Timestamp()) - baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false) + baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false) // treat sum as a sample in an individual TimeSeries sum := &prompb.Sample{ @@ -554,7 +569,8 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta name = settings.Namespace + "_" + name } - labels := createAttributes(resource, attributes, settings.ExternalLabels, identifyingAttrs, false, model.MetricNameLabel, name) + settings.PromoteResourceAttributes = nil + labels := createAttributes(resource, attributes, settings, identifyingAttrs, false, model.MetricNameLabel, name) haveIdentifier := false for _, l := range labels { if l.Name == model.JobLabel || l.Name == model.InstanceLabel { diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index 21b3f5dd9..73528019d 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -45,7 +45,7 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr lbls := createAttributes( resource, pt.Attributes(), - settings.ExternalLabels, + settings, nil, true, model.MetricNameLabel, diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 65dac99c5..a3a789723 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -30,12 +30,13 @@ import ( ) type Settings struct { - Namespace string - ExternalLabels map[string]string - DisableTargetInfo bool - ExportCreatedMetric bool - AddMetricSuffixes bool - SendMetadata bool + Namespace string + ExternalLabels map[string]string + DisableTargetInfo bool + ExportCreatedMetric bool + AddMetricSuffixes bool + SendMetadata bool + PromoteResourceAttributes []string } // PrometheusConverter converts from OTel write format to Prometheus remote write format. diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index aafebc6c4..80ccb46c7 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -34,7 +34,7 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(dataPoints pmetric.Number labels := createAttributes( resource, pt.Attributes(), - settings.ExternalLabels, + settings, nil, true, model.MetricNameLabel, @@ -64,7 +64,7 @@ func (c *PrometheusConverter) addSumNumberDataPoints(dataPoints pmetric.NumberDa lbls := createAttributes( resource, pt.Attributes(), - settings.ExternalLabels, + settings, nil, true, model.MetricNameLabel, diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index d82237371..b69504949 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -490,21 +490,23 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. -func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler { +func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler { rwHandler := &writeHandler{ logger: logger, appendable: appendable, } return &otlpWriteHandler{ - logger: logger, - rwHandler: rwHandler, + logger: logger, + rwHandler: rwHandler, + configFunc: configFunc, } } type otlpWriteHandler struct { - logger log.Logger - rwHandler *writeHandler + logger log.Logger + rwHandler *writeHandler + configFunc func() config.Config } func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -515,9 +517,12 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } + otlpCfg := h.configFunc().OTLPConfig + converter := otlptranslator.NewPrometheusConverter() if err := converter.FromMetrics(req.Metrics(), otlptranslator.Settings{ - AddMetricSuffixes: true, + AddMetricSuffixes: true, + PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, }); err != nil { level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err) } diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go index 6e7422a58..83dfffbae 100644 --- a/storage/remote/write_test.go +++ b/storage/remote/write_test.go @@ -379,7 +379,11 @@ func TestOTLPWriteHandler(t *testing.T) { req.Header.Set("Content-Type", "application/x-protobuf") appendable := &mockAppendable{} - handler := NewOTLPWriteHandler(nil, appendable) + handler := NewOTLPWriteHandler(nil, appendable, func() config.Config { + return config.Config{ + OTLPConfig: config.DefaultOTLPConfig, + } + }) recorder := httptest.NewRecorder() handler.ServeHTTP(recorder, req) diff --git a/web/api/v1/api.go b/web/api/v1/api.go index 03854787f..d58be211f 100644 --- a/web/api/v1/api.go +++ b/web/api/v1/api.go @@ -295,7 +295,7 @@ func NewAPI( a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs) } if otlpEnabled { - a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap) + a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap, configFunc) } return a diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index 86a57ca08..ba38ddc97 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -359,6 +359,7 @@ var samplePrometheusCfg = config.Config{ ScrapeConfigs: []*config.ScrapeConfig{}, RemoteWriteConfigs: []*config.RemoteWriteConfig{}, RemoteReadConfigs: []*config.RemoteReadConfig{}, + OTLPConfig: config.OTLPConfig{}, } var sampleFlagMap = map[string]string{ From c39776c5b5f5e0e7496f8aca3112063a655a50f2 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Fri, 12 Jul 2024 00:49:36 +0200 Subject: [PATCH 37/81] promql: Add NHCB tests This adds equivalent NHCB tests to the existing classic histogram tests. Signed-off-by: beorn7 --- promql/promqltest/testdata/histograms.test | 191 +++++++++++++++++++-- 1 file changed, 181 insertions(+), 10 deletions(-) diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test index e1fb1d85a..349a1e79c 100644 --- a/promql/promqltest/testdata/histograms.test +++ b/promql/promqltest/testdata/histograms.test @@ -73,22 +73,32 @@ eval instant at 50m histogram_count(testhistogram3) {start="positive"} 110 {start="negative"} 20 +# Classic way of accessing the count still works. +eval instant at 50m testhistogram3_count + testhistogram3_count{start="positive"} 110 + testhistogram3_count{start="negative"} 20 + # Test histogram_sum. eval instant at 50m histogram_sum(testhistogram3) {start="positive"} 330 {start="negative"} 80 -# Test histogram_avg. +# Classic way of accessing the sum still works. +eval instant at 50m testhistogram3_sum + testhistogram3_sum{start="positive"} 330 + testhistogram3_sum{start="negative"} 80 + +# Test histogram_avg. This has no classic equivalent. eval instant at 50m histogram_avg(testhistogram3) {start="positive"} 3 {start="negative"} 4 -# Test histogram_stddev. +# Test histogram_stddev. This has no classic equivalent. eval instant at 50m histogram_stddev(testhistogram3) {start="positive"} 2.8189265757336734 {start="negative"} 4.182715937754936 -# Test histogram_stdvar. +# Test histogram_stdvar. This has no classic equivalent. eval instant at 50m histogram_stdvar(testhistogram3) {start="positive"} 7.946347039377573 {start="negative"} 17.495112615949154 @@ -103,137 +113,282 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m])) {start="positive"} 0.6363636363636364 {start="negative"} 0 -# Test histogram_quantile. +# In the classic histogram, we can access the corresponding bucket (if +# it exists) and divide by the count to get the same result. + +eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count + {start="positive"} 0.6363636363636364 + +eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate(testhistogram3_count[5m]) + {start="positive"} 0.6363636363636364 + +# Test histogram_quantile, native and classic. + +eval instant at 50m histogram_quantile(0, testhistogram3) + {start="positive"} 0 + {start="negative"} -0.25 eval instant at 50m histogram_quantile(0, testhistogram3_bucket) {start="positive"} 0 {start="negative"} -0.25 +eval instant at 50m histogram_quantile(0.25, testhistogram3) + {start="positive"} 0.055 + {start="negative"} -0.225 + eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket) {start="positive"} 0.055 {start="negative"} -0.225 +eval instant at 50m histogram_quantile(0.5, testhistogram3) + {start="positive"} 0.125 + {start="negative"} -0.2 + eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket) {start="positive"} 0.125 {start="negative"} -0.2 +eval instant at 50m histogram_quantile(0.75, testhistogram3) + {start="positive"} 0.45 + {start="negative"} -0.15 + eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket) {start="positive"} 0.45 {start="negative"} -0.15 +eval instant at 50m histogram_quantile(1, testhistogram3) + {start="positive"} 1 + {start="negative"} -0.1 + eval instant at 50m histogram_quantile(1, testhistogram3_bucket) {start="positive"} 1 {start="negative"} -0.1 # Quantile too low. + +eval_warn instant at 50m histogram_quantile(-0.1, testhistogram) + {start="positive"} -Inf + {start="negative"} -Inf + eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket) {start="positive"} -Inf {start="negative"} -Inf # Quantile too high. + +eval_warn instant at 50m histogram_quantile(1.01, testhistogram) + {start="positive"} +Inf + {start="negative"} +Inf + eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket) {start="positive"} +Inf {start="negative"} +Inf # Quantile invalid. + +eval_warn instant at 50m histogram_quantile(NaN, testhistogram) + {start="positive"} NaN + {start="negative"} NaN + eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket) {start="positive"} NaN {start="negative"} NaN # Quantile value in lowest bucket. + +eval instant at 50m histogram_quantile(0, testhistogram) + {start="positive"} 0 + {start="negative"} -0.2 + eval instant at 50m histogram_quantile(0, testhistogram_bucket) {start="positive"} 0 {start="negative"} -0.2 # Quantile value in highest bucket. + +eval instant at 50m histogram_quantile(1, testhistogram) + {start="positive"} 1 + {start="negative"} 0.3 + eval instant at 50m histogram_quantile(1, testhistogram_bucket) {start="positive"} 1 {start="negative"} 0.3 # Finally some useful quantiles. + +eval instant at 50m histogram_quantile(0.2, testhistogram) + {start="positive"} 0.048 + {start="negative"} -0.2 + eval instant at 50m histogram_quantile(0.2, testhistogram_bucket) {start="positive"} 0.048 {start="negative"} -0.2 +eval instant at 50m histogram_quantile(0.5, testhistogram) + {start="positive"} 0.15 + {start="negative"} -0.15 + eval instant at 50m histogram_quantile(0.5, testhistogram_bucket) {start="positive"} 0.15 {start="negative"} -0.15 +eval instant at 50m histogram_quantile(0.8, testhistogram) + {start="positive"} 0.72 + {start="negative"} 0.3 + eval instant at 50m histogram_quantile(0.8, testhistogram_bucket) {start="positive"} 0.72 {start="negative"} 0.3 # More realistic with rates. + +eval instant at 50m histogram_quantile(0.2, rate(testhistogram[5m])) + {start="positive"} 0.048 + {start="negative"} -0.2 + eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m])) {start="positive"} 0.048 {start="negative"} -0.2 +eval instant at 50m histogram_quantile(0.5, rate(testhistogram[5m])) + {start="positive"} 0.15 + {start="negative"} -0.15 + eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m])) {start="positive"} 0.15 {start="negative"} -0.15 +eval instant at 50m histogram_quantile(0.8, rate(testhistogram[5m])) + {start="positive"} 0.72 + {start="negative"} 0.3 + eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) {start="positive"} 0.72 {start="negative"} 0.3 # Want results exactly in the middle of the bucket. + +eval instant at 7m histogram_quantile(1./6., testhistogram2) + {} 1 + eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket) {} 1 +eval instant at 7m histogram_quantile(0.5, testhistogram2) + {} 3 + eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket) {} 3 +eval instant at 7m histogram_quantile(5./6., testhistogram2) + {} 5 + eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket) {} 5 +eval instant at 47m histogram_quantile(1./6., rate(testhistogram2[15m])) + {} 1 + eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m])) {} 1 +eval instant at 47m histogram_quantile(0.5, rate(testhistogram2[15m])) + {} 3 + eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m])) {} 3 +eval instant at 47m histogram_quantile(5./6., rate(testhistogram2[15m])) + {} 5 + eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) {} 5 -# Aggregated histogram: Everything in one. +# Aggregated histogram: Everything in one. Note how native histograms +# don't require aggregation by le. + +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m]))) + {} 0.075 + eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.075 +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m]))) + {} 0.1277777777777778 + eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.1277777777777778 # Aggregated histogram: Everything in one. Now with avg, which does not change anything. + +eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[5m]))) + {} 0.075 + eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.075 +eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[5m]))) + {} 0.12777777777777778 + eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le)) {} 0.12777777777777778 # Aggregated histogram: By instance. + +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (instance)) + {instance="ins1"} 0.075 + {instance="ins2"} 0.075 + eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) {instance="ins1"} 0.075 {instance="ins2"} 0.075 +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (instance)) + {instance="ins1"} 0.1333333333 + {instance="ins2"} 0.125 + eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 # Aggregated histogram: By job. + +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job)) + {job="job1"} 0.1 + {job="job2"} 0.0642857142857143 + eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) {job="job1"} 0.1 {job="job2"} 0.0642857142857143 +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job)) + {job="job1"} 0.14 + {job="job2"} 0.1125 + eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) {job="job1"} 0.14 {job="job2"} 0.1125 # Aggregated histogram: By job and instance. + +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job, instance)) + {instance="ins1", job="job1"} 0.11 + {instance="ins2", job="job1"} 0.09 + {instance="ins1", job="job2"} 0.06 + {instance="ins2", job="job2"} 0.0675 + eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job, instance)) + {instance="ins1", job="job1"} 0.15 + {instance="ins2", job="job1"} 0.1333333333333333 + {instance="ins1", job="job2"} 0.1 + {instance="ins2", job="job2"} 0.1166666666666667 + eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 @@ -241,18 +396,32 @@ eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bu {instance="ins2", job="job2"} 0.1166666666666667 # The unaggregated histogram for comparison. Same result as the previous one. + +eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[5m])) + {instance="ins1", job="job1"} 0.11 + {instance="ins2", job="job1"} 0.09 + {instance="ins1", job="job2"} 0.06 + {instance="ins2", job="job2"} 0.0675 + eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m])) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 +eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[5m])) + {instance="ins1", job="job1"} 0.15 + {instance="ins2", job="job1"} 0.13333333333333333 + {instance="ins1", job="job2"} 0.1 + {instance="ins2", job="job2"} 0.11666666666666667 + eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.11666666666666667 +# All NHCBs summed into one. eval instant at 50m sum(request_duration_seconds) {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} @@ -303,11 +472,13 @@ load_with_nhcb 5m eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m])) {instance="ins1", job="job1"} NaN -# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set +# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set. # https://github.com/prometheus/prometheus/issues/9910 load_with_nhcb 5m - request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10 - request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10 - request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10 + request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10 + request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10 + request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10 -eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket$"}) +eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"}) + +eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"}) From ae82a0a9413eb0a27205428b3348ffe0743e5351 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 16 Jul 2024 14:32:24 +0200 Subject: [PATCH 38/81] Sanitize configured OTel resource attributes Signed-off-by: Arve Knudsen --- config/config.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/config/config.go b/config/config.go index 0880d518d..fd2e6e06c 100644 --- a/config/config.go +++ b/config/config.go @@ -19,6 +19,7 @@ import ( "net/url" "os" "path/filepath" + "slices" "sort" "strconv" "strings" @@ -1318,5 +1319,22 @@ type OTLPConfig struct { func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultOTLPConfig type plain OTLPConfig - return unmarshal((*plain)(c)) + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + seen := map[string]struct{}{} + i := 0 + for i < len(c.PromoteResourceAttributes) { + s := strings.TrimSpace(c.PromoteResourceAttributes[i]) + if _, exists := seen[s]; exists { + c.PromoteResourceAttributes = slices.Delete(c.PromoteResourceAttributes, i, i+1) + continue + } + + seen[s] = struct{}{} + c.PromoteResourceAttributes[i] = s + i++ + } + return nil } From ec818332dcc7a381add078d091bc3968c0d91d3f Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 16 Jul 2024 14:49:04 +0200 Subject: [PATCH 39/81] Add config tests Signed-off-by: Arve Knudsen --- config/config_test.go | 18 ++++++++++++++++++ config/testdata/conf.good.yml | 3 +++ .../otlp_sanitize_resource_attributes.good.yml | 2 ++ 3 files changed, 23 insertions(+) create mode 100644 config/testdata/otlp_sanitize_resource_attributes.good.yml diff --git a/config/config_test.go b/config/config_test.go index 3c4907a46..5822d2ceb 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -156,6 +156,12 @@ var expectedConf = &Config{ }, }, + OTLPConfig: OTLPConfig{ + PromoteResourceAttributes: []string{ + "k8s.cluster.name", "k8s.job.name", "k8s.namespace.name", + }, + }, + RemoteReadConfigs: []*RemoteReadConfig{ { URL: mustParseURL("http://remote1/read"), @@ -1471,6 +1477,18 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) { require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit) } +func TestOTLPSanitizeResourceAttributes(t *testing.T) { + want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + require.NoError(t, err) + var got Config + require.NoError(t, yaml.UnmarshalStrict(out, &got)) + + require.Equal(t, []string{"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"}, got.OTLPConfig.PromoteResourceAttributes) +} + func TestLoadConfig(t *testing.T) { // Parse a valid file that sets a global scrape timeout. This tests whether parsing // an overwritten default field in the global config permanently changes the default. diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 0e0aa2bd5..56741822c 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -45,6 +45,9 @@ remote_write: headers: name: value +otlp: + promote_resource_attributes: ["k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"] + remote_read: - url: http://remote1/read read_recent: true diff --git a/config/testdata/otlp_sanitize_resource_attributes.good.yml b/config/testdata/otlp_sanitize_resource_attributes.good.yml new file mode 100644 index 000000000..ce91302fe --- /dev/null +++ b/config/testdata/otlp_sanitize_resource_attributes.good.yml @@ -0,0 +1,2 @@ +otlp: + promote_resource_attributes: ["k8s.cluster.name", "k8s.job.name", "k8s.namespace.name", " k8s.job.name "] From 6a9df95620bf882d206be7c771ad65042400f6ca Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 16 Jul 2024 15:13:22 +0200 Subject: [PATCH 40/81] Add to changelog Signed-off-by: Arve Knudsen --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d5a91e900..8488af6dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## unreleased +* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200 + ## 2.53.1 / 2024-07-10 Fix a bug which would drop samples in remote-write if the sending flow stalled From 1d21867d8b3d23863f78799cf60b6e3598ab414c Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 16 Jul 2024 15:13:40 +0200 Subject: [PATCH 41/81] Add otlptranslator tests Signed-off-by: Arve Knudsen --- .../prometheusremotewrite/helper_test.go | 161 ++++++++++++++++++ 1 file changed, 161 insertions(+) create mode 100644 storage/remote/otlptranslator/prometheusremotewrite/helper_test.go diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go new file mode 100644 index 000000000..c4dd781ae --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go @@ -0,0 +1,161 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package prometheusremotewrite + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/prometheus/prometheus/prompb" +) + +func TestCreateAttributes(t *testing.T) { + resourceAttrs := map[string]string{ + "service.name": "service name", + "service.instance.id": "service ID", + "existent-attr": "resource value", + // This one is for testing conflict with metric attribute. + "metric-attr": "resource value", + // This one is for testing conflict with auto-generated job attribute. + "job": "resource value", + // This one is for testing conflict with auto-generated instance attribute. + "instance": "resource value", + } + + resource := pcommon.NewResource() + for k, v := range resourceAttrs { + resource.Attributes().PutStr(k, v) + } + attrs := pcommon.NewMap() + attrs.PutStr("__name__", "test_metric") + attrs.PutStr("metric-attr", "metric value") + + testCases := []struct { + name string + promoteResourceAttributes []string + expectedLabels []prompb.Label + }{ + { + name: "Successful conversion without resource attribute promotion", + promoteResourceAttributes: nil, + expectedLabels: []prompb.Label{ + { + Name: "__name__", + Value: "test_metric", + }, + { + Name: "instance", + Value: "service ID", + }, + { + Name: "job", + Value: "service name", + }, + { + Name: "metric_attr", + Value: "metric value", + }, + }, + }, + { + name: "Successful conversion with resource attribute promotion", + promoteResourceAttributes: []string{"non-existent-attr", "existent-attr"}, + expectedLabels: []prompb.Label{ + { + Name: "__name__", + Value: "test_metric", + }, + { + Name: "instance", + Value: "service ID", + }, + { + Name: "job", + Value: "service name", + }, + { + Name: "metric_attr", + Value: "metric value", + }, + { + Name: "existent_attr", + Value: "resource value", + }, + }, + }, + { + name: "Successful conversion with resource attribute promotion, conflicting resource attributes are ignored", + promoteResourceAttributes: []string{"non-existent-attr", "existent-attr", "metric-attr", "job", "instance"}, + expectedLabels: []prompb.Label{ + { + Name: "__name__", + Value: "test_metric", + }, + { + Name: "instance", + Value: "service ID", + }, + { + Name: "job", + Value: "service name", + }, + { + Name: "existent_attr", + Value: "resource value", + }, + { + Name: "metric_attr", + Value: "metric value", + }, + }, + }, + { + name: "Successful conversion with resource attribute promotion, attributes are only promoted once", + promoteResourceAttributes: []string{"existent-attr", "existent-attr"}, + expectedLabels: []prompb.Label{ + { + Name: "__name__", + Value: "test_metric", + }, + { + Name: "instance", + Value: "service ID", + }, + { + Name: "job", + Value: "service name", + }, + { + Name: "existent_attr", + Value: "resource value", + }, + { + Name: "metric_attr", + Value: "metric value", + }, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + settings := Settings{ + PromoteResourceAttributes: tc.promoteResourceAttributes, + } + lbls := createAttributes(resource, attrs, settings, nil, false) + + assert.ElementsMatch(t, lbls, tc.expectedLabels) + }) + } +} From 2e0e4e9ce963e18652d7ca0ba363aa5e6f11d1b4 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 8 Jul 2024 09:48:27 -0700 Subject: [PATCH 42/81] Add support for handling multiple chunks in OOO head MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Carrie Edwards Co-authored by: Jeanette Tan : Co-authored by: György Krajcsovits : Signed-off-by: Carrie Edwards Co-authored by: Fiona Liao : --- tsdb/head_append.go | 93 +++++++++++++++++++++--------------- tsdb/head_test.go | 4 +- tsdb/ooo_head.go | 106 ++++++++++++++++++++++++++++++++++-------- tsdb/ooo_head_read.go | 25 ++++++---- 4 files changed, 162 insertions(+), 66 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 62c3727e2..b92fa2e7f 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -846,16 +846,17 @@ func (a *headAppender) Commit() (err error) { // number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled) floatOOBRejected int - inOrderMint int64 = math.MaxInt64 - inOrderMaxt int64 = math.MinInt64 - ooomint int64 = math.MaxInt64 - ooomaxt int64 = math.MinInt64 - wblSamples []record.RefSample - oooMmapMarkers map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef - oooRecords [][]byte - oooCapMax = a.head.opts.OutOfOrderCapMax.Load() - series *memSeries - appendChunkOpts = chunkOpts{ + inOrderMint int64 = math.MaxInt64 + inOrderMaxt int64 = math.MinInt64 + ooomint int64 = math.MaxInt64 + ooomaxt int64 = math.MinInt64 + wblSamples []record.RefSample + oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef + oooMmapMarkersCount int + oooRecords [][]byte + oooCapMax = a.head.opts.OutOfOrderCapMax.Load() + series *memSeries + appendChunkOpts = chunkOpts{ chunkDiskMapper: a.head.chunkDiskMapper, chunkRange: a.head.chunkRange.Load(), samplesPerChunk: a.head.opts.SamplesPerChunk, @@ -872,6 +873,7 @@ func (a *headAppender) Commit() (err error) { // WBL is not enabled. So no need to collect. wblSamples = nil oooMmapMarkers = nil + oooMmapMarkersCount = 0 return } // The m-map happens before adding a new sample. So we collect @@ -880,12 +882,14 @@ func (a *headAppender) Commit() (err error) { // WBL Before this Commit(): [old samples before this commit for chunk 1] // WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3] if oooMmapMarkers != nil { - markers := make([]record.RefMmapMarker, 0, len(oooMmapMarkers)) - for ref, mmapRef := range oooMmapMarkers { - markers = append(markers, record.RefMmapMarker{ - Ref: ref, - MmapRef: mmapRef, - }) + markers := make([]record.RefMmapMarker, 0, oooMmapMarkersCount) + for ref, mmapRefs := range oooMmapMarkers { + for _, mmapRef := range mmapRefs { + markers = append(markers, record.RefMmapMarker{ + Ref: ref, + MmapRef: mmapRef, + }) + } } r := enc.MmapMarkers(markers, a.head.getBytesBuffer()) oooRecords = append(oooRecords, r) @@ -928,11 +932,11 @@ func (a *headAppender) Commit() (err error) { case oooSample: // Sample is OOO and OOO handling is enabled // and the delta is within the OOO tolerance. - var mmapRef chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRef = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax) + var mmapRefs []chunks.ChunkDiskMapperRef + ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax) if chunkCreated { r, ok := oooMmapMarkers[series.ref] - if !ok || r != 0 { + if !ok || r != nil { // !ok means there are no markers collected for these samples yet. So we first flush the samples // before setting this m-map marker. @@ -943,9 +947,16 @@ func (a *headAppender) Commit() (err error) { } if oooMmapMarkers == nil { - oooMmapMarkers = make(map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef) + oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) + } + if len(mmapRefs) > 0 { + oooMmapMarkers[series.ref] = mmapRefs + oooMmapMarkersCount += len(mmapRefs) + } else { + // No chunk was written to disk, so we need to set an initial marker for this series. + oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} + oooMmapMarkersCount++ } - oooMmapMarkers[series.ref] = mmapRef } if ok { wblSamples = append(wblSamples, s) @@ -1069,14 +1080,14 @@ func (a *headAppender) Commit() (err error) { } // insert is like append, except it inserts. Used for OOO samples. -func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRef chunks.ChunkDiskMapperRef) { +func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { if s.ooo == nil { s.ooo = &memSeriesOOOFields{} } c := s.ooo.oooHeadChunk if c == nil || c.chunk.NumSamples() == int(oooCapMax) { // Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks. - c, mmapRef = s.cutNewOOOHeadChunk(t, chunkDiskMapper) + c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper) chunkCreated = true } @@ -1089,7 +1100,7 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk c.maxTime = t } } - return ok, chunkCreated, mmapRef + return ok, chunkCreated, mmapRefs } // chunkOpts are chunk-level options that are passed when appending to a memSeries. @@ -1431,7 +1442,7 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange // cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk. // The caller must ensure that s.ooo is not nil. -func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, chunks.ChunkDiskMapperRef) { +func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) { ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper) s.ooo.oooHeadChunk = &oooHeadChunk{ @@ -1443,21 +1454,29 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk return s.ooo.oooHeadChunk, ref } -func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) chunks.ChunkDiskMapperRef { +func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) []chunks.ChunkDiskMapperRef { if s.ooo == nil || s.ooo.oooHeadChunk == nil { - // There is no head chunk, so nothing to m-map here. - return 0 + // OOO is not enabled or there is no head chunk, so nothing to m-map here. + return nil + } + chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(math.MinInt64, math.MaxInt64) + if err != nil { + handleChunkWriteError(err) + return nil + } + chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, 1) + for _, memchunk := range chks { + chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, memchunk.chunk, true, handleChunkWriteError) + chunkRefs = append(chunkRefs, chunkRef) + s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{ + ref: chunkRef, + numSamples: uint16(memchunk.chunk.NumSamples()), + minTime: memchunk.minTime, + maxTime: memchunk.maxTime, + }) } - xor, _ := s.ooo.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality. - chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, xor, true, handleChunkWriteError) - s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{ - ref: chunkRef, - numSamples: uint16(xor.NumSamples()), - minTime: s.ooo.oooHeadChunk.minTime, - maxTime: s.ooo.oooHeadChunk.maxTime, - }) s.ooo.oooHeadChunk = nil - return chunkRef + return chunkRefs } // mmapChunks will m-map all but first chunk on s.headChunks list. diff --git a/tsdb/head_test.go b/tsdb/head_test.go index fa4834516..2456d6a1b 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -4792,9 +4792,11 @@ func TestWBLReplay(t *testing.T) { require.False(t, ok) require.NotNil(t, ms) - xor, err := ms.ooo.oooHeadChunk.chunk.ToXOR() + chks, err := ms.ooo.oooHeadChunk.chunk.ToEncodedChunks(math.MinInt64, math.MaxInt64) require.NoError(t, err) + require.Len(t, chks, 1) + xor := chks[0].chunk.(*chunkenc.XORChunk) it := xor.Iterator(nil) actOOOSamples := make([]sample, 0, len(expOOOSamples)) for it.Next() == chunkenc.ValFloat { diff --git a/tsdb/ooo_head.go b/tsdb/ooo_head.go index 7f2110fa6..d90163a18 100644 --- a/tsdb/ooo_head.go +++ b/tsdb/ooo_head.go @@ -15,11 +15,11 @@ package tsdb import ( "fmt" + "github.com/prometheus/prometheus/tsdb/chunkenc" "sort" "github.com/oklog/ulid" - "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/tombstones" ) @@ -74,24 +74,22 @@ func (o *OOOChunk) NumSamples() int { return len(o.samples) } -func (o *OOOChunk) ToXOR() (*chunkenc.XORChunk, error) { - x := chunkenc.NewXORChunk() - app, err := x.Appender() - if err != nil { - return nil, err - } - for _, s := range o.samples { - app.Append(s.t, s.f) - } - return x, nil -} - -func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk, error) { - x := chunkenc.NewXORChunk() - app, err := x.Appender() - if err != nil { - return nil, err +// ToEncodedChunks returns chunks with the samples in the OOOChunk. +// +//nolint:revive // unexported-return. +func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error) { + if len(o.samples) == 0 { + return nil, nil } + // The most common case is that there will be a single chunk, with the same type of samples in it - this is always true for float samples. + chks = make([]memChunk, 0, 1) + var ( + cmint int64 + cmaxt int64 + chunk chunkenc.Chunk + app chunkenc.Appender + ) + prevEncoding := chunkenc.EncNone // Yes we could call the chunk for this, but this is more efficient. for _, s := range o.samples { if s.t < mint { continue @@ -99,9 +97,77 @@ func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk, if s.t > maxt { break } - app.Append(s.t, s.f) + encoding := chunkenc.EncXOR + if s.h != nil { + encoding = chunkenc.EncHistogram + } else if s.fh != nil { + encoding = chunkenc.EncFloatHistogram + } + + // prevApp is the appender for the previous sample. + prevApp := app + + if encoding != prevEncoding { // For the first sample, this will always be true as EncNone != EncXOR | EncHistogram | EncFloatHistogram + if prevEncoding != chunkenc.EncNone { + chks = append(chks, memChunk{chunk, cmint, cmaxt, nil}) + } + cmint = s.t + switch encoding { + case chunkenc.EncXOR: + chunk = chunkenc.NewXORChunk() + case chunkenc.EncHistogram: + chunk = chunkenc.NewHistogramChunk() + case chunkenc.EncFloatHistogram: + chunk = chunkenc.NewFloatHistogramChunk() + default: + chunk = chunkenc.NewXORChunk() + } + app, err = chunk.Appender() + if err != nil { + return + } + } + switch encoding { + case chunkenc.EncXOR: + app.Append(s.t, s.f) + case chunkenc.EncHistogram: + // Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway. + prevHApp, _ := prevApp.(*chunkenc.HistogramAppender) + var ( + newChunk chunkenc.Chunk + recoded bool + ) + newChunk, recoded, app, _ = app.AppendHistogram(prevHApp, s.t, s.h, false) + if newChunk != nil { // A new chunk was allocated. + if !recoded { + chks = append(chks, memChunk{chunk, cmint, cmaxt, nil}) + } + chunk = newChunk + cmint = s.t + } + case chunkenc.EncFloatHistogram: + // Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway. + prevHApp, _ := prevApp.(*chunkenc.FloatHistogramAppender) + var ( + newChunk chunkenc.Chunk + recoded bool + ) + newChunk, recoded, app, _ = app.AppendFloatHistogram(prevHApp, s.t, s.fh, false) + if newChunk != nil { // A new chunk was allocated. + if !recoded { + chks = append(chks, memChunk{chunk, cmint, cmaxt, nil}) + } + chunk = newChunk + cmint = s.t + } + } + cmaxt = s.t + prevEncoding = encoding } - return x, nil + if prevEncoding != chunkenc.EncNone { + chks = append(chks, memChunk{chunk, cmint, cmaxt, nil}) + } + return chks, nil } var _ BlockReader = &OOORangeHead{} diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 47972c3cc..aedda49dd 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -108,11 +108,19 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra c := s.ooo.oooHeadChunk if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 { ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks)))) - var xor chunkenc.Chunk if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least. - xor, _ = c.chunk.ToXOR() // Ignoring error because it can't fail. + chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(c.minTime, c.maxTime) + if err != nil { + handleChunkWriteError(err) + return nil + } + for _, chk := range chks { + addChunk(c.minTime, c.maxTime, ref, chk.chunk) + } + } else { + var enc chunkenc.Chunk + addChunk(c.minTime, c.maxTime, ref, enc) } - addChunk(c.minTime, c.maxTime, ref, xor) } } for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- { @@ -341,14 +349,15 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, continue } - mmapRef := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper) - if mmapRef == 0 && len(ms.ooo.oooMmappedChunks) > 0 { + mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper) + if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 { // Nothing was m-mapped. So take the mmapRef from the existing slice if it exists. - mmapRef = ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref + mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref} } - seq, off := mmapRef.Unpack() + lastMmapRef := mmapRefs[len(mmapRefs)-1] + seq, off := lastMmapRef.Unpack() if seq > lastSeq || (seq == lastSeq && off > lastOff) { - ch.lastMmapRef, lastSeq, lastOff = mmapRef, seq, off + ch.lastMmapRef, lastSeq, lastOff = lastMmapRef, seq, off } if len(ms.ooo.oooMmappedChunks) > 0 { ch.postings = append(ch.postings, seriesRef) From fb2ebe1c3d615a7e2926b8fadf515853a59fdc7c Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 8 Jul 2024 10:34:31 -0700 Subject: [PATCH 43/81] Fix linting Signed-off-by: Carrie Edwards --- tsdb/ooo_head.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tsdb/ooo_head.go b/tsdb/ooo_head.go index d90163a18..b2556d62e 100644 --- a/tsdb/ooo_head.go +++ b/tsdb/ooo_head.go @@ -15,9 +15,10 @@ package tsdb import ( "fmt" - "github.com/prometheus/prometheus/tsdb/chunkenc" "sort" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/oklog/ulid" "github.com/prometheus/prometheus/tsdb/chunks" From e0d7cef545df335b47212784e836c161fea51de4 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Tue, 9 Jul 2024 13:10:14 -0700 Subject: [PATCH 44/81] Rename some variables in Commit() Signed-off-by: Carrie Edwards --- tsdb/head_append.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index b92fa2e7f..4a594f314 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -848,8 +848,8 @@ func (a *headAppender) Commit() (err error) { inOrderMint int64 = math.MaxInt64 inOrderMaxt int64 = math.MinInt64 - ooomint int64 = math.MaxInt64 - ooomaxt int64 = math.MinInt64 + oooMinT int64 = math.MaxInt64 + oooMaxT int64 = math.MinInt64 wblSamples []record.RefSample oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef oooMmapMarkersCount int @@ -960,11 +960,11 @@ func (a *headAppender) Commit() (err error) { } if ok { wblSamples = append(wblSamples, s) - if s.T < ooomint { - ooomint = s.T + if s.T < oooMinT { + oooMinT = s.T } - if s.T > ooomaxt { - ooomaxt = s.T + if s.T > oooMaxT { + oooMaxT = s.T } floatOOOAccepted++ } else { @@ -1064,7 +1064,7 @@ func (a *headAppender) Commit() (err error) { a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended)) a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOOAccepted)) a.head.updateMinMaxTime(inOrderMint, inOrderMaxt) - a.head.updateMinOOOMaxOOOTime(ooomint, ooomaxt) + a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT) collectOOORecords() if a.head.wbl != nil { From 7e020bb4e9e3bde3ce117a187809c8aea3d4fed4 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 15 Jul 2024 13:15:07 -0700 Subject: [PATCH 45/81] Refactor Signed-off-by: Carrie Edwards --- tsdb/head_append.go | 2 +- tsdb/ooo_head_read.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 4a594f314..c6898c10c 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -940,7 +940,7 @@ func (a *headAppender) Commit() (err error) { // !ok means there are no markers collected for these samples yet. So we first flush the samples // before setting this m-map marker. - // r != 0 means we have already m-mapped a chunk for this series in the same Commit(). + // r != nil means we have already m-mapped a chunk for this series in the same Commit(). // Hence, before we m-map again, we should add the samples and m-map markers // seen till now to the WBL records. collectOOORecords() diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index aedda49dd..14d3724e8 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -118,8 +118,8 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra addChunk(c.minTime, c.maxTime, ref, chk.chunk) } } else { - var enc chunkenc.Chunk - addChunk(c.minTime, c.maxTime, ref, enc) + var emptyChunk chunkenc.Chunk + addChunk(c.minTime, c.maxTime, ref, emptyChunk) } } } From 79b53bd3de89df9ea2a87078c8180b4d201593c2 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Mon, 15 Jul 2024 13:15:41 -0700 Subject: [PATCH 46/81] Refactor TestWBLReplay to use scenarios Signed-off-by: Carrie Edwards Co-authored by: Fiona Liao : --- tsdb/head_test.go | 45 ++++++++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 2456d6a1b..c192c8a07 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -4730,6 +4730,14 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { // TestWBLReplay checks the replay at a low level. func TestWBLReplay(t *testing.T) { + for name, scenario := range sampleTypeScenarios { + t.Run(name, func(t *testing.T) { + testWBLReplay(t, scenario) + }) + } +} + +func testWBLReplay(t *testing.T, scenario sampleTypeScenario) { dir := t.TempDir() wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) require.NoError(t, err) @@ -4745,11 +4753,11 @@ func TestWBLReplay(t *testing.T) { require.NoError(t, err) require.NoError(t, h.Init(0)) - var expOOOSamples []sample + var expOOOSamples []chunks.Sample l := labels.FromStrings("foo", "bar") - appendSample := func(mins int64, isOOO bool) { + appendSample := func(mins int64, val float64, isOOO bool) { app := h.Appender(context.Background()) - ts, v := mins*time.Minute.Milliseconds(), float64(mins) + ts, v := mins*time.Minute.Milliseconds(), val _, err := app.Append(0, l, ts, v) require.NoError(t, err) require.NoError(t, app.Commit()) @@ -4760,15 +4768,15 @@ func TestWBLReplay(t *testing.T) { } // In-order sample. - appendSample(60, false) + appendSample(60, 60, false) // Out of order samples. - appendSample(40, true) - appendSample(35, true) - appendSample(50, true) - appendSample(55, true) - appendSample(59, true) - appendSample(31, true) + appendSample(40, 40, true) + appendSample(35, 35, true) + appendSample(50, 50, true) + appendSample(55, 55, true) + appendSample(59, 59, true) + appendSample(31, 31, true) // Check that Head's time ranges are set properly. require.Equal(t, 60*time.Minute.Milliseconds(), h.MinTime()) @@ -4796,20 +4804,19 @@ func TestWBLReplay(t *testing.T) { require.NoError(t, err) require.Len(t, chks, 1) - xor := chks[0].chunk.(*chunkenc.XORChunk) - it := xor.Iterator(nil) - actOOOSamples := make([]sample, 0, len(expOOOSamples)) - for it.Next() == chunkenc.ValFloat { - ts, v := it.At() - actOOOSamples = append(actOOOSamples, sample{t: ts, f: v}) - } + it := chks[0].chunk.Iterator(nil) + actOOOSamples, err := storage.ExpandSamples(it, nil) + require.NoError(t, err) // OOO chunk will be sorted. Hence sort the expected samples. sort.Slice(expOOOSamples, func(i, j int) bool { - return expOOOSamples[i].t < expOOOSamples[j].t + return expOOOSamples[i].T() < expOOOSamples[j].T() }) - require.Equal(t, expOOOSamples, actOOOSamples) + // Passing in true for the 'ignoreCounterResets' parameter prevents differences in counter reset headers + // from being factored in to the sample comparison + // TODO(fionaliao): understand counter reset behaviour, might want to modify this later + requireEqualSamples(t, l.String(), expOOOSamples, actOOOSamples, true) require.NoError(t, h.Close()) } From 02f05cbf2c5d27eefa60fe9d8e47408b745fd052 Mon Sep 17 00:00:00 2001 From: Carrie Edwards Date: Tue, 16 Jul 2024 09:01:08 -0700 Subject: [PATCH 47/81] Ensure lastMmapRef set to 0 if mmapRefs is empty Signed-off-by: Carrie Edwards --- tsdb/ooo_head_read.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index 14d3724e8..2eebd8115 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -349,12 +349,17 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, continue } + var lastMmapRef chunks.ChunkDiskMapperRef mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper) if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 { // Nothing was m-mapped. So take the mmapRef from the existing slice if it exists. mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref} } - lastMmapRef := mmapRefs[len(mmapRefs)-1] + if len(mmapRefs) == 0 { + lastMmapRef = 0 + } else { + lastMmapRef = mmapRefs[len(mmapRefs)-1] + } seq, off := lastMmapRef.Unpack() if seq > lastSeq || (seq == lastSeq && off > lastOff) { ch.lastMmapRef, lastSeq, lastOff = lastMmapRef, seq, off From 6b4ff79ce04254ae452b5a89bfde5d5dbb2431fe Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 17 Jul 2024 16:30:36 +0100 Subject: [PATCH 48/81] Update Go dependencies Prior to preparing v2.54 release. Note not updating to k8s.io 1.30, which would force Go 1.22. Signed-off-by: Bryan Boreham --- documentation/examples/remote_storage/go.mod | 2 +- go.mod | 94 ++++----- go.sum | 189 ++++++++++--------- 3 files changed, 143 insertions(+), 142 deletions(-) diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 348b27dfc..4c41a6606 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -10,7 +10,7 @@ require ( github.com/influxdata/influxdb v1.11.5 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/common v0.55.0 - github.com/prometheus/prometheus v0.52.1 + github.com/prometheus/prometheus v0.53.1 github.com/stretchr/testify v1.9.0 ) diff --git a/go.mod b/go.mod index 4107f3a09..44c0aca7a 100644 --- a/go.mod +++ b/go.mod @@ -5,20 +5,20 @@ go 1.21.0 toolchain go1.22.5 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 github.com/Code-Hex/go-generics-cache v1.5.1 github.com/KimMachineGun/automemlimit v0.6.1 github.com/alecthomas/kingpin/v2 v2.4.0 - github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 - github.com/aws/aws-sdk-go v1.53.16 + github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 + github.com/aws/aws-sdk-go v1.54.19 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 - github.com/digitalocean/godo v1.117.0 - github.com/docker/docker v26.1.3+incompatible + github.com/digitalocean/godo v1.118.0 + github.com/docker/docker v27.0.3+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.12.0 github.com/envoyproxy/protoc-gen-validate v1.0.4 @@ -31,60 +31,60 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.6.0 - github.com/google/pprof v0.0.0-20240528025155-186aa0362fba + github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da github.com/google/uuid v1.6.0 - github.com/gophercloud/gophercloud v1.12.0 + github.com/gophercloud/gophercloud v1.13.0 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.29.1 - github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d - github.com/hetznercloud/hcloud-go/v2 v2.9.0 + github.com/hashicorp/consul/api v1.29.2 + github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 + github.com/hetznercloud/hcloud-go/v2 v2.10.2 github.com/ionos-cloud/sdk-go/v6 v6.1.11 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.8 + github.com/klauspost/compress v1.17.9 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b - github.com/linode/linodego v1.35.0 - github.com/miekg/dns v1.1.59 + github.com/linode/linodego v1.37.0 + github.com/miekg/dns v1.1.61 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 - github.com/ovh/go-ovh v1.5.1 + github.com/ovh/go-ovh v1.6.0 github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.54.0 + github.com/prometheus/common v0.55.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.11.0 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/stretchr/testify v1.9.0 github.com/vultr/govultr/v2 v2.17.2 - go.opentelemetry.io/collector/pdata v1.11.0 - go.opentelemetry.io/collector/semconv v0.104.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 - go.opentelemetry.io/otel v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 - go.opentelemetry.io/otel/sdk v1.27.0 - go.opentelemetry.io/otel/trace v1.27.0 + go.opentelemetry.io/collector/pdata v1.12.0 + go.opentelemetry.io/collector/semconv v0.105.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 + go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 + go.opentelemetry.io/otel/sdk v1.28.0 + go.opentelemetry.io/otel/trace v1.28.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 - golang.org/x/net v0.26.0 + golang.org/x/net v0.27.0 golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.21.0 + golang.org/x/sys v0.22.0 golang.org/x/text v0.16.0 golang.org/x/time v0.5.0 - golang.org/x/tools v0.22.0 - google.golang.org/api v0.183.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 - google.golang.org/grpc v1.64.0 + golang.org/x/tools v0.23.0 + google.golang.org/api v0.188.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d + google.golang.org/grpc v1.65.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -92,14 +92,14 @@ require ( k8s.io/apimachinery v0.29.3 k8s.io/client-go v0.29.3 k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.120.1 + k8s.io/klog/v2 v2.130.1 ) require ( - cloud.google.com/go/auth v0.5.1 // indirect + cloud.google.com/go/auth v0.7.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/compute/metadata v0.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect + cloud.google.com/go/compute/metadata v0.4.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect @@ -107,7 +107,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cilium/ebpf v0.11.0 // indirect - github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect + github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -121,7 +121,7 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-kit/kit v0.12.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.22.2 // indirect github.com/go-openapi/errors v0.22.0 // indirect @@ -134,7 +134,7 @@ require ( github.com/go-resty/resty/v2 v2.13.1 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/glog v1.2.0 // indirect + github.com/golang/glog v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect @@ -142,7 +142,7 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/googleapis/gax-go/v2 v2.12.5 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect @@ -178,20 +178,20 @@ require ( github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect - golang.org/x/crypto v0.24.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + golang.org/x/crypto v0.25.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect - golang.org/x/mod v0.18.0 // indirect - golang.org/x/term v0.21.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/term v0.22.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect diff --git a/go.sum b/go.sum index dc4a7ecfd..bb515753d 100644 --- a/go.sum +++ b/go.sum @@ -12,8 +12,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= -cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= +cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts= +cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -22,8 +22,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.4.0 h1:vHzJCWaM4g8XIcm8kopr3XmDA4Gy/lblD3EhhSux05c= +cloud.google.com/go/compute/metadata v0.4.0/go.mod h1:SIQh1Kkb4ZJ8zJ874fqVkslA29PRXuleyj6vOzlbK7M= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -36,12 +36,12 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= @@ -75,8 +75,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc= -github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= +github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= @@ -120,8 +120,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= @@ -143,14 +143,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw= -github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= +github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4= +github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= +github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -210,8 +210,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= @@ -251,8 +251,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= -github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -319,8 +319,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g= -github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g= +github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= @@ -332,10 +332,10 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= -github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= -github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= -github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= +github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= +github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0= +github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -353,10 +353,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= -github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= -github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= -github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= +github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw= +github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk= +github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= +github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= @@ -409,13 +409,13 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= -github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc= -github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY= -github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg= +github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I= +github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufWh844gUw1az5OUvaeq8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -454,8 +454,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -472,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do= -github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0= +github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso= +github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -500,8 +500,8 @@ github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwU github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= -github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= +github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= +github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -573,8 +573,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI= -github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= +github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= +github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -625,8 +625,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= @@ -639,8 +639,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -650,8 +650,8 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= @@ -694,6 +694,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -723,28 +724,28 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= -go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= -go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= -go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA= +go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= +go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= +go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -773,8 +774,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -809,8 +810,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -856,8 +857,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -946,16 +947,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1025,8 +1026,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1046,8 +1047,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= -google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= +google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw= +google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1084,10 +1085,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY= +google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b h1:04+jVzTs2XBnOZcPsLnmrTGqltqJbZQ1Ey26hjYdQQ0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1106,8 +1107,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 8f9069f41b20915ee9db856db3881c48b4dcf4cf Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 17 Jul 2024 17:00:50 +0100 Subject: [PATCH 49/81] Bump Otel semconv version to 1.26.0 Signed-off-by: Bryan Boreham --- tracing/tracing.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tracing/tracing.go b/tracing/tracing.go index 3b1c61532..6b9319ecb 100644 --- a/tracing/tracing.go +++ b/tracing/tracing.go @@ -30,7 +30,7 @@ import ( "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/noop" "google.golang.org/grpc/credentials" From ce3bd4abea1f395d7f1fcfe01ded0daeaac79fdd Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Wed, 17 Jul 2024 17:03:32 +0100 Subject: [PATCH 50/81] Update for Docker deprecation Signed-off-by: Bryan Boreham --- discovery/moby/network.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/discovery/moby/network.go b/discovery/moby/network.go index 794d2e607..ea1ca66bc 100644 --- a/discovery/moby/network.go +++ b/discovery/moby/network.go @@ -17,7 +17,7 @@ import ( "context" "strconv" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/prometheus/prometheus/util/strutil" @@ -34,7 +34,7 @@ const ( ) func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix string) (map[string]map[string]string, error) { - networks, err := client.NetworkList(ctx, types.NetworkListOptions{}) + networks, err := client.NetworkList(ctx, network.ListOptions{}) if err != nil { return nil, err } From 932918cd3fe003d0f4fd0fe153b610a068fe9fea Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Thu, 18 Jul 2024 10:40:47 +0200 Subject: [PATCH 51/81] OTLPConfig.UnmarshalYAML: Return error on invalid input Signed-off-by: Arve Knudsen --- config/config.go | 22 ++++++++++--------- config/config_test.go | 22 +++++++++++++------ .../otlp_sanitize_resource_attributes.bad.yml | 2 ++ ...otlp_sanitize_resource_attributes.good.yml | 2 +- 4 files changed, 30 insertions(+), 18 deletions(-) create mode 100644 config/testdata/otlp_sanitize_resource_attributes.bad.yml diff --git a/config/config.go b/config/config.go index fd2e6e06c..913983881 100644 --- a/config/config.go +++ b/config/config.go @@ -19,7 +19,6 @@ import ( "net/url" "os" "path/filepath" - "slices" "sort" "strconv" "strings" @@ -1324,17 +1323,20 @@ func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } seen := map[string]struct{}{} - i := 0 - for i < len(c.PromoteResourceAttributes) { - s := strings.TrimSpace(c.PromoteResourceAttributes[i]) - if _, exists := seen[s]; exists { - c.PromoteResourceAttributes = slices.Delete(c.PromoteResourceAttributes, i, i+1) + var err error + for i, attr := range c.PromoteResourceAttributes { + attr = strings.TrimSpace(attr) + if attr == "" { + err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute")) + continue + } + if _, exists := seen[attr]; exists { + err = errors.Join(err, fmt.Errorf("duplicated promoted OTel resource attribute %q", attr)) continue } - seen[s] = struct{}{} - c.PromoteResourceAttributes[i] = s - i++ + seen[attr] = struct{}{} + c.PromoteResourceAttributes[i] = attr } - return nil + return err } diff --git a/config/config_test.go b/config/config_test.go index 5822d2ceb..b684fdb50 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1478,15 +1478,23 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) { } func TestOTLPSanitizeResourceAttributes(t *testing.T) { - want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger()) - require.NoError(t, err) + t.Run("good config", func(t *testing.T) { + want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger()) + require.NoError(t, err) - out, err := yaml.Marshal(want) - require.NoError(t, err) - var got Config - require.NoError(t, yaml.UnmarshalStrict(out, &got)) + out, err := yaml.Marshal(want) + require.NoError(t, err) + var got Config + require.NoError(t, yaml.UnmarshalStrict(out, &got)) - require.Equal(t, []string{"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"}, got.OTLPConfig.PromoteResourceAttributes) + require.Equal(t, []string{"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"}, got.OTLPConfig.PromoteResourceAttributes) + }) + + t.Run("bad config", func(t *testing.T) { + _, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, log.NewNopLogger()) + require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`) + require.ErrorContains(t, err, `empty promoted OTel resource attribute`) + }) } func TestLoadConfig(t *testing.T) { diff --git a/config/testdata/otlp_sanitize_resource_attributes.bad.yml b/config/testdata/otlp_sanitize_resource_attributes.bad.yml new file mode 100644 index 000000000..37ec5d120 --- /dev/null +++ b/config/testdata/otlp_sanitize_resource_attributes.bad.yml @@ -0,0 +1,2 @@ +otlp: + promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name", "k8s.job.name", ""] diff --git a/config/testdata/otlp_sanitize_resource_attributes.good.yml b/config/testdata/otlp_sanitize_resource_attributes.good.yml index ce91302fe..67247e774 100644 --- a/config/testdata/otlp_sanitize_resource_attributes.good.yml +++ b/config/testdata/otlp_sanitize_resource_attributes.good.yml @@ -1,2 +1,2 @@ otlp: - promote_resource_attributes: ["k8s.cluster.name", "k8s.job.name", "k8s.namespace.name", " k8s.job.name "] + promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name"] From a6fa8a471c58b4c3dc2027bcd03bdf42cd9bdb49 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 18 Jul 2024 12:30:55 +0200 Subject: [PATCH 52/81] docs: Correct and clarify histogram bucket and resolution limits Previously, the config doc suggested that scrapes are simply failed if the bucket limit is exceeded. However, instead Prometheus first attempts to reduce the resolution. This will succeed in almost all non-pathological cases. However, in the rare case that it doesn't, the scrape will be failed after all. For the resolution limit AKA `native_histogram_min_bucket_factor`, the logic is a bit different. Once the highest factor, i.e. schema -4, is reached, the scrape will still succeed. Signed-off-by: beorn7 --- docs/configuration/configuration.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 35976871b..ff24082e4 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -458,13 +458,15 @@ metric_relabel_configs: [ keep_dropped_targets: | default = 0 ] # Limit on total number of positive and negative buckets allowed in a single -# native histogram. If this is exceeded, the entire scrape will be treated as -# failed. 0 means no limit. +# native histogram. The resolution of a histogram with more buckets will be +# reduced until the number of buckets is within the limit. If the limit cannot +# be reached, the scrape will fail. +# 0 means no limit. [ native_histogram_bucket_limit: | default = 0 ] # Lower limit for the growth factor of one bucket to the next in each native # histogram. The resolution of a histogram with a lower growth factor will be -# reduced until it is within the limit. +# reduced as much as possible until it is within the limit. # To set an upper limit for the schema (equivalent to "scale" in OTel's # exponential histograms), use the following factor limits: # From a60e5ce362f53802b20230add513e586e5775f91 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Fri, 19 Jul 2024 19:53:40 +0200 Subject: [PATCH 53/81] [PRW 2.0] Added Sender and RW Handler support for Response Stats. (#14444) * [PRW 2.0] Added Sender support for Response Stats. Chained on top of https://github.com/prometheus/prometheus/pull/14427 Fixes https://github.com/prometheus/prometheus/issues/14359 Signed-off-by: bwplotka * Addressed comments. Signed-off-by: bwplotka * move write stats to it's own file Signed-off-by: Callum Styan * Clean up header usage Signed-off-by: Callum Styan * add missing license to new stats file Signed-off-by: Callum Styan * Addressed all comments. Signed-off-by: bwplotka --------- Signed-off-by: bwplotka Signed-off-by: Callum Styan Co-authored-by: Callum Styan --- CHANGELOG.md | 3 + cmd/promtool/metrics.go | 3 +- storage/remote/client.go | 31 ++++--- storage/remote/client_test.go | 8 +- storage/remote/queue_manager.go | 125 ++++++++++++++++++++------- storage/remote/queue_manager_test.go | 50 +++++------ storage/remote/stats.go | 107 +++++++++++++++++++++++ storage/remote/write_handler.go | 60 +++++-------- storage/remote/write_handler_test.go | 16 ++-- 9 files changed, 276 insertions(+), 127 deletions(-) create mode 100644 storage/remote/stats.go diff --git a/CHANGELOG.md b/CHANGELOG.md index d5a91e900..e7314d041 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,9 @@ ## unreleased +* [FEATURE] Remote-Write: Add sender and receiver support for [Remote Write 2.0-rc.2](https://prometheus.io/docs/specs/remote_write_spec_2_0/) specification #14395 #14427 #14444 +* [ENHANCEMENT] Remote-Write: 1.x messages against Remote Write 2.x Receivers will have now correct values for `prometheus_storage__failed_total` in case of partial errors #14444 + ## 2.53.1 / 2024-07-10 Fix a bug which would drop samples in remote-write if the sending flow stalled diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go index 46246b672..6d162f459 100644 --- a/cmd/promtool/metrics.go +++ b/cmd/promtool/metrics.go @@ -101,6 +101,7 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin return successExitCode } +// TODO(bwplotka): Add PRW 2.0 support. func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool { metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels) if err != nil { @@ -116,7 +117,7 @@ func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]s // Encode the request body into snappy encoding. compressed := snappy.Encode(nil, raw) - err = client.Store(context.Background(), compressed, 0) + _, err = client.Store(context.Background(), compressed, 0) if err != nil { fmt.Fprintln(os.Stderr, " FAILED:", err) return false diff --git a/storage/remote/client.go b/storage/remote/client.go index eff44c606..17caf7be9 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -14,7 +14,6 @@ package remote import ( - "bufio" "bytes" "context" "fmt" @@ -235,12 +234,12 @@ type RecoverableError struct { // Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled // and encoded bytes from codec.go. -func (c *Client) Store(ctx context.Context, req []byte, attempt int) error { +func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteResponseStats, error) { httpReq, err := http.NewRequest(http.MethodPost, c.urlString, bytes.NewReader(req)) if err != nil { // Errors from NewRequest are from unparsable URLs, so are not // recoverable. - return err + return WriteResponseStats{}, err } httpReq.Header.Add("Content-Encoding", string(c.writeCompression)) @@ -267,28 +266,34 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error { if err != nil { // Errors from Client.Do are from (for example) network errors, so are // recoverable. - return RecoverableError{err, defaultBackoff} + return WriteResponseStats{}, RecoverableError{err, defaultBackoff} } defer func() { io.Copy(io.Discard, httpResp.Body) httpResp.Body.Close() }() + // TODO(bwplotka): Pass logger and emit debug on error? + // Parsing error means there were some response header values we can't parse, + // we can continue handling. + rs, _ := ParseWriteResponseStats(httpResp) + //nolint:usestdlibvars - if httpResp.StatusCode/100 != 2 { - scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen)) - line := "" - if scanner.Scan() { - line = scanner.Text() - } - err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line) + if httpResp.StatusCode/100 == 2 { + return rs, nil } + + // Handling errors e.g. read potential error in the body. + // TODO(bwplotka): Pass logger and emit debug on error? + body, _ := io.ReadAll(io.LimitReader(httpResp.Body, maxErrMsgLen)) + err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, body) + //nolint:usestdlibvars if httpResp.StatusCode/100 == 5 || (c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) { - return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))} + return rs, RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))} } - return err + return rs, err } // retryAfterDuration returns the duration for the Retry-After header. In case of any errors, it diff --git a/storage/remote/client_test.go b/storage/remote/client_test.go index 2acb8e279..9184ce100 100644 --- a/storage/remote/client_test.go +++ b/storage/remote/client_test.go @@ -73,7 +73,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) { c, err := NewWriteClient(hash, conf) require.NoError(t, err) - err = c.Store(context.Background(), []byte{}, 0) + _, err = c.Store(context.Background(), []byte{}, 0) if test.err != nil { require.EqualError(t, err, test.err.Error()) } else { @@ -133,7 +133,7 @@ func TestClientRetryAfter(t *testing.T) { c := getClient(getClientConfig(serverURL, tc.retryOnRateLimit)) var recErr RecoverableError - err = c.Store(context.Background(), []byte{}, 0) + _, err = c.Store(context.Background(), []byte{}, 0) require.Equal(t, tc.expectedRecoverable, errors.As(err, &recErr), "Mismatch in expected recoverable error status.") if tc.expectedRecoverable { require.Equal(t, tc.expectedRetryAfter, recErr.retryAfter) @@ -169,7 +169,7 @@ func TestRetryAfterDuration(t *testing.T) { } } -func TestClientHeaders(t *testing.T) { +func TestClientCustomHeaders(t *testing.T) { headersToSend := map[string]string{"Foo": "Bar", "Baz": "qux"} var called bool @@ -203,7 +203,7 @@ func TestClientHeaders(t *testing.T) { c, err := NewWriteClient("c", conf) require.NoError(t, err) - err = c.Store(context.Background(), []byte{}, 0) + _, err = c.Store(context.Background(), []byte{}, 0) require.NoError(t, err) require.True(t, called, "The remote server wasn't called") diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 5bafb9da2..5b59288e6 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -391,7 +391,7 @@ func (m *queueManagerMetrics) unregister() { // external timeseries database. type WriteClient interface { // Store stores the given samples in the remote storage. - Store(ctx context.Context, req []byte, retryAttempt int) error + Store(ctx context.Context, req []byte, retryAttempt int) (WriteResponseStats, error) // Name uniquely identifies the remote storage. Name() string // Endpoint is the remote read or write endpoint for the storage client. @@ -597,14 +597,15 @@ func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []p } begin := time.Now() - err := t.storeClient.Store(ctx, req, try) + // Ignoring WriteResponseStats, because there is nothing for metadata, since it's + // embedded in v2 calls now, and we do v1 here. + _, err := t.storeClient.Store(ctx, req, try) t.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) if err != nil { span.RecordError(err) return err } - return nil } @@ -1661,8 +1662,8 @@ func populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries, sen func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error { begin := time.Now() - err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, enc) - s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, time.Since(begin)) + rs, err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, enc) + s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, rs, time.Since(begin)) return err } @@ -1670,17 +1671,29 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s // See https://github.com/prometheus/prometheus/issues/14409 func (s *shards) sendV2Samples(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error { begin := time.Now() - err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, enc) - s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, time.Since(begin)) + rs, err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, enc) + s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, rs, time.Since(begin)) return err } -func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exemplarCount, histogramCount, metadataCount int, duration time.Duration) { +func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exemplarCount, histogramCount, metadataCount int, rs WriteResponseStats, duration time.Duration) { + // Partial errors may happen -- account for that. + sampleDiff := sampleCount - rs.Samples + if sampleDiff > 0 { + s.qm.metrics.failedSamplesTotal.Add(float64(sampleDiff)) + } + histogramDiff := histogramCount - rs.Histograms + if histogramDiff > 0 { + s.qm.metrics.failedHistogramsTotal.Add(float64(histogramDiff)) + } + exemplarDiff := exemplarCount - rs.Exemplars + if exemplarDiff > 0 { + s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarDiff)) + } if err != nil { - level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "histogramCount", histogramCount, "err", err) - s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount)) - s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount)) - s.qm.metrics.failedHistogramsTotal.Add(float64(histogramCount)) + level.Error(s.qm.logger).Log("msg", "non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err) + } else if sampleDiff+exemplarDiff+histogramDiff > 0 { + level.Error(s.qm.logger).Log("msg", "we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff) } // These counters are used to calculate the dynamic sharding, and as such @@ -1688,6 +1701,7 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl s.qm.dataOut.incr(int64(sampleCount + exemplarCount + histogramCount + metadataCount)) s.qm.dataOutDuration.incr(int64(duration)) s.qm.lastSendTimestamp.Store(time.Now().Unix()) + // Pending samples/exemplars/histograms also should be subtracted, as an error means // they will not be retried. s.qm.metrics.pendingSamples.Sub(float64(sampleCount)) @@ -1699,19 +1713,29 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl } // sendSamples to the remote storage with backoff for recoverable errors. -func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error { +func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) (WriteResponseStats, error) { // Build the WriteRequest with no metadata. req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil, enc) s.qm.buildRequestLimitTimestamp.Store(lowest) if err != nil { // Failing to build the write request is non-recoverable, since it will // only error if marshaling the proto to bytes fails. - return err + return WriteResponseStats{}, err } reqSize := len(req) *buf = req + // Since we retry writes via attemptStore and sendWriteRequestWithBackoff we need + // to track the total amount of accepted data across the various attempts. + accumulatedStats := WriteResponseStats{} + var accumulatedStatsMu sync.Mutex + addStats := func(rs WriteResponseStats) { + accumulatedStatsMu.Lock() + accumulatedStats = accumulatedStats.Add(rs) + accumulatedStatsMu.Unlock() + } + // An anonymous function allows us to defer the completion of our per-try spans // without causing a memory leak, and it has the nice effect of not propagating any // parameters for sendSamplesWithBackoff/3. @@ -1759,15 +1783,19 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount)) s.qm.metrics.histogramsTotal.Add(float64(histogramCount)) s.qm.metrics.metadataTotal.Add(float64(metadataCount)) - err := s.qm.client().Store(ctx, *buf, try) + // Technically for v1, we will likely have empty response stats, but for + // newer Receivers this might be not, so used it in a best effort. + rs, err := s.qm.client().Store(ctx, *buf, try) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) + // TODO(bwplotka): Revisit this once we have Receivers doing retriable partial error + // so far we don't have those, so it's ok to potentially skew statistics. + addStats(rs) - if err != nil { - span.RecordError(err) - return err + if err == nil { + return nil } - - return nil + span.RecordError(err) + return err } onRetry := func() { @@ -1780,29 +1808,48 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti if errors.Is(err, context.Canceled) { // When there is resharding, we cancel the context for this queue, which means the data is not sent. // So we exit early to not update the metrics. - return err + return accumulatedStats, err } s.qm.metrics.sentBytesTotal.Add(float64(reqSize)) s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000)) - return err + if err == nil && !accumulatedStats.Confirmed { + // No 2.0 response headers, and we sent v1 message, so likely it's 1.0 Receiver. + // Assume success, don't rely on headers. + return WriteResponseStats{ + Samples: sampleCount, + Histograms: histogramCount, + Exemplars: exemplarCount, + }, nil + } + return accumulatedStats, err } // sendV2Samples to the remote storage with backoff for recoverable errors. -func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error { +func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) (WriteResponseStats, error) { // Build the WriteRequest with no metadata. req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil, enc) s.qm.buildRequestLimitTimestamp.Store(lowest) if err != nil { // Failing to build the write request is non-recoverable, since it will // only error if marshaling the proto to bytes fails. - return err + return WriteResponseStats{}, err } reqSize := len(req) *buf = req + // Since we retry writes via attemptStore and sendWriteRequestWithBackoff we need + // to track the total amount of accepted data across the various attempts. + accumulatedStats := WriteResponseStats{} + var accumulatedStatsMu sync.Mutex + addStats := func(rs WriteResponseStats) { + accumulatedStatsMu.Lock() + accumulatedStats = accumulatedStats.Add(rs) + accumulatedStatsMu.Unlock() + } + // An anonymous function allows us to defer the completion of our per-try spans // without causing a memory leak, and it has the nice effect of not propagating any // parameters for sendSamplesWithBackoff/3. @@ -1850,15 +1897,28 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2 s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount)) s.qm.metrics.histogramsTotal.Add(float64(histogramCount)) s.qm.metrics.metadataTotal.Add(float64(metadataCount)) - err := s.qm.client().Store(ctx, *buf, try) + rs, err := s.qm.client().Store(ctx, *buf, try) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) + // TODO(bwplotka): Revisit this once we have Receivers doing retriable partial error + // so far we don't have those, so it's ok to potentially skew statistics. + addStats(rs) - if err != nil { - span.RecordError(err) - return err + if err == nil { + // Check the case mentioned in PRW 2.0 + // https://prometheus.io/docs/specs/remote_write_spec_2_0/#required-written-response-headers. + if sampleCount+histogramCount+exemplarCount > 0 && rs.NoDataWritten() { + err = fmt.Errorf("sent v2 request with %v samples, %v histograms and %v exemplars; got 2xx, but PRW 2.0 response header statistics indicate %v samples, %v histograms and %v exemplars were accepted;"+ + " assumining failure e.g. the target only supports PRW 1.0 prometheus.WriteRequest, but does not check the Content-Type header correctly", + sampleCount, histogramCount, exemplarCount, + rs.Samples, rs.Histograms, rs.Exemplars, + ) + span.RecordError(err) + return err + } + return nil } - - return nil + span.RecordError(err) + return err } onRetry := func() { @@ -1871,13 +1931,12 @@ func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2 if errors.Is(err, context.Canceled) { // When there is resharding, we cancel the context for this queue, which means the data is not sent. // So we exit early to not update the metrics. - return err + return accumulatedStats, err } s.qm.metrics.sentBytesTotal.Add(float64(reqSize)) s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000)) - - return err + return accumulatedStats, err } func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, pendingData []writev2.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int, int) { diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 5227c2d6a..7343184fc 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -118,10 +118,10 @@ func TestBasicContentNegotiation(t *testing.T) { expectFail: true, }, { - name: "v2 talks to v1 that tries to unmarshal v2 payload with v1 proto", + name: "v2 talks to (broken) v1 that tries to unmarshal v2 payload with v1 proto", senderProtoMsg: config.RemoteWriteProtoMsgV2, receiverProtoMsg: config.RemoteWriteProtoMsgV1, injectErrs: []error{nil}, - expectFail: true, // invalid request, no timeseries + expectFail: true, // We detect this thanks to https://github.com/prometheus/prometheus/issues/14359 }, // Opposite, v1 talking to v2 only server. { @@ -130,12 +130,6 @@ func TestBasicContentNegotiation(t *testing.T) { injectErrs: []error{errors.New("pretend unrecoverable err")}, expectFail: true, }, - { - name: "v1 talks to (broken) v2 that tries to unmarshal v1 payload with v2 proto", - senderProtoMsg: config.RemoteWriteProtoMsgV1, receiverProtoMsg: config.RemoteWriteProtoMsgV2, - injectErrs: []error{nil}, - expectFail: true, // invalid request, no timeseries - }, } { t.Run(tc.name, func(t *testing.T) { dir := t.TempDir() @@ -182,7 +176,6 @@ func TestBasicContentNegotiation(t *testing.T) { if !tc.expectFail { // No error expected, so wait for data. c.waitForExpectedData(t, 5*time.Second) - require.Equal(t, 1, c.writesReceived) require.Equal(t, 0.0, client_testutil.ToFloat64(qm.metrics.failedSamplesTotal)) } else { // Wait for failure to be recorded in metrics. @@ -190,11 +183,10 @@ func TestBasicContentNegotiation(t *testing.T) { defer cancel() require.NoError(t, runutil.Retry(500*time.Millisecond, ctx.Done(), func() error { if client_testutil.ToFloat64(qm.metrics.failedSamplesTotal) != 1.0 { - return errors.New("expected one sample failed in qm metrics") + return fmt.Errorf("expected one sample failed in qm metrics; got %v", client_testutil.ToFloat64(qm.metrics.failedSamplesTotal)) } return nil })) - require.Equal(t, 0, c.writesReceived) } // samplesTotal means attempts. @@ -764,10 +756,10 @@ func TestDisableReshardOnRetry(t *testing.T) { metrics = newQueueManagerMetrics(nil, "", "") client = &MockWriteClient{ - StoreFunc: func(ctx context.Context, b []byte, i int) error { + StoreFunc: func(ctx context.Context, b []byte, i int) (WriteResponseStats, error) { onStoreCalled() - return RecoverableError{ + return WriteResponseStats{}, RecoverableError{ error: fmt.Errorf("fake error"), retryAfter: model.Duration(retryAfter), } @@ -1113,14 +1105,14 @@ func (c *TestWriteClient) SetReturnError(err error) { c.returnError = err } -func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error { +func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) (WriteResponseStats, error) { c.mtx.Lock() defer c.mtx.Unlock() if c.storeWait > 0 { time.Sleep(c.storeWait) } if c.returnError != nil { - return c.returnError + return WriteResponseStats{}, c.returnError } // nil buffers are ok for snappy, ignore cast error. if c.buf != nil { @@ -1130,14 +1122,14 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error { reqBuf, err := snappy.Decode(c.buf, req) c.buf = reqBuf if err != nil { - return err + return WriteResponseStats{}, err } // Check if we've been told to inject err for this call. if len(c.injectedErrs) > 0 { c.currErr++ if err = c.injectedErrs[c.currErr]; err != nil { - return err + return WriteResponseStats{}, err } } @@ -1156,13 +1148,10 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error { } } if err != nil { - return err - } - - if len(reqProto.Timeseries) == 0 && len(reqProto.Metadata) == 0 { - return errors.New("invalid request, no timeseries") + return WriteResponseStats{}, err } + rs := WriteResponseStats{} b := labels.NewScratchBuilder(0) for _, ts := range reqProto.Timeseries { labels := ts.ToLabels(&b, nil) @@ -1170,10 +1159,12 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error { if len(ts.Samples) > 0 { c.receivedSamples[tsID] = append(c.receivedSamples[tsID], ts.Samples...) } + rs.Samples += len(ts.Samples) if len(ts.Exemplars) > 0 { c.receivedExemplars[tsID] = append(c.receivedExemplars[tsID], ts.Exemplars...) } + rs.Exemplars += len(ts.Exemplars) for _, h := range ts.Histograms { if h.IsFloatHistogram() { @@ -1182,13 +1173,14 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error { c.receivedHistograms[tsID] = append(c.receivedHistograms[tsID], h) } } + rs.Histograms += len(ts.Histograms) } for _, m := range reqProto.Metadata { c.receivedMetadata[m.MetricFamilyName] = append(c.receivedMetadata[m.MetricFamilyName], m) } c.writesReceived++ - return nil + return rs, nil } func (c *TestWriteClient) Name() string { @@ -1256,10 +1248,10 @@ func NewTestBlockedWriteClient() *TestBlockingWriteClient { return &TestBlockingWriteClient{} } -func (c *TestBlockingWriteClient) Store(ctx context.Context, _ []byte, _ int) error { +func (c *TestBlockingWriteClient) Store(ctx context.Context, _ []byte, _ int) (WriteResponseStats, error) { c.numCalls.Inc() <-ctx.Done() - return nil + return WriteResponseStats{}, nil } func (c *TestBlockingWriteClient) NumCalls() uint64 { @@ -1278,19 +1270,19 @@ func (c *TestBlockingWriteClient) Endpoint() string { type NopWriteClient struct{} func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} } -func (c *NopWriteClient) Store(context.Context, []byte, int) error { - return nil +func (c *NopWriteClient) Store(context.Context, []byte, int) (WriteResponseStats, error) { + return WriteResponseStats{}, nil } func (c *NopWriteClient) Name() string { return "nopwriteclient" } func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" } type MockWriteClient struct { - StoreFunc func(context.Context, []byte, int) error + StoreFunc func(context.Context, []byte, int) (WriteResponseStats, error) NameFunc func() string EndpointFunc func() string } -func (c *MockWriteClient) Store(ctx context.Context, bb []byte, n int) error { +func (c *MockWriteClient) Store(ctx context.Context, bb []byte, n int) (WriteResponseStats, error) { return c.StoreFunc(ctx, bb, n) } func (c *MockWriteClient) Name() string { return c.NameFunc() } diff --git a/storage/remote/stats.go b/storage/remote/stats.go new file mode 100644 index 000000000..89d00ffc3 --- /dev/null +++ b/storage/remote/stats.go @@ -0,0 +1,107 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "errors" + "net/http" + "strconv" +) + +const ( + rw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Samples-Written" + rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Histograms-Written" + rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Exemplars-Written" +) + +// WriteResponseStats represents the response write statistics specified in https://github.com/prometheus/docs/pull/2486 +type WriteResponseStats struct { + // Samples represents X-Prometheus-Remote-Write-Written-Samples + Samples int + // Histograms represents X-Prometheus-Remote-Write-Written-Histograms + Histograms int + // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars + Exemplars int + + // Confirmed means we can trust those statistics from the point of view + // of the PRW 2.0 spec. When parsed from headers, it means we got at least one + // response header from the Receiver to confirm those numbers, meaning it must + // be a at least 2.0 Receiver. See ParseWriteResponseStats for details. + Confirmed bool +} + +// NoDataWritten returns true if statistics indicate no data was written. +func (s WriteResponseStats) NoDataWritten() bool { + return (s.Samples + s.Histograms + s.Exemplars) == 0 +} + +// AllSamples returns both float and histogram sample numbers. +func (s WriteResponseStats) AllSamples() int { + return s.Samples + s.Histograms +} + +// Add returns the sum of this WriteResponseStats plus the given WriteResponseStats. +func (s WriteResponseStats) Add(rs WriteResponseStats) WriteResponseStats { + s.Confirmed = rs.Confirmed + s.Samples += rs.Samples + s.Histograms += rs.Histograms + s.Exemplars += rs.Exemplars + return s +} + +// SetHeaders sets response headers in a given response writer. +// Make sure to use it before http.ResponseWriter.WriteHeader and .Write. +func (s WriteResponseStats) SetHeaders(w http.ResponseWriter) { + h := w.Header() + h.Set(rw20WrittenSamplesHeader, strconv.Itoa(s.Samples)) + h.Set(rw20WrittenHistogramsHeader, strconv.Itoa(s.Histograms)) + h.Set(rw20WrittenExemplarsHeader, strconv.Itoa(s.Exemplars)) +} + +// ParseWriteResponseStats returns WriteResponseStats parsed from the response headers. +// +// As per 2.0 spec, missing header means 0. However, abrupt HTTP errors, 1.0 Receivers +// or buggy 2.0 Receivers might result in no response headers specified and that +// might NOT necessarily mean nothing was written. To represent that we set +// s.Confirmed = true only when see at least on response header. +// +// Error is returned when any of the header fails to parse as int64. +func ParseWriteResponseStats(r *http.Response) (s WriteResponseStats, err error) { + var ( + errs []error + h = r.Header + ) + if v := h.Get(rw20WrittenSamplesHeader); v != "" { // Empty means zero. + s.Confirmed = true + if s.Samples, err = strconv.Atoi(v); err != nil { + s.Samples = 0 + errs = append(errs, err) + } + } + if v := h.Get(rw20WrittenHistogramsHeader); v != "" { // Empty means zero. + s.Confirmed = true + if s.Histograms, err = strconv.Atoi(v); err != nil { + s.Histograms = 0 + errs = append(errs, err) + } + } + if v := h.Get(rw20WrittenExemplarsHeader); v != "" { // Empty means zero. + s.Confirmed = true + if s.Exemplars, err = strconv.Atoi(v); err != nil { + s.Exemplars = 0 + errs = append(errs, err) + } + } + return s, errors.Join(errs...) +} diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index d82237371..6756bf0ab 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -19,7 +19,6 @@ import ( "fmt" "io" "net/http" - "strconv" "strings" "time" @@ -201,7 +200,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { respStats, errHTTPCode, err := h.writeV2(r.Context(), &req) // Set required X-Prometheus-Remote-Write-Written-* response headers, in all cases. - respStats.SetResponseHeaders(w.Header()) + respStats.SetHeaders(w) if err != nil { if errHTTPCode/5 == 100 { // 5xx @@ -318,24 +317,6 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist return nil } -const ( - prw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Written-Samples" - rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Written-Histograms" - rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Written-Exemplars" -) - -type responseStats struct { - samples int - histograms int - exemplars int -} - -func (s responseStats) SetResponseHeaders(h http.Header) { - h.Set(prw20WrittenSamplesHeader, strconv.Itoa(s.samples)) - h.Set(rw20WrittenHistogramsHeader, strconv.Itoa(s.histograms)) - h.Set(rw20WrittenExemplarsHeader, strconv.Itoa(s.exemplars)) -} - // writeV2 is similar to write, but it works with v2 proto message, // allows partial 4xx writes and gathers statistics. // @@ -345,14 +326,14 @@ func (s responseStats) SetResponseHeaders(h http.Header) { // // NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors. // Once we have 5xx type of error, we immediately stop and rollback all appends. -func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ responseStats, errHTTPCode int, _ error) { +func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ WriteResponseStats, errHTTPCode int, _ error) { app := &timeLimitAppender{ Appender: h.appendable.Appender(ctx), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), } - rs := responseStats{} - samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &rs) + s := WriteResponseStats{} + samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &s) if err != nil { if errHTTPCode/5 == 100 { // On 5xx, we always rollback, because we expect @@ -360,29 +341,29 @@ func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ res if rerr := app.Rollback(); rerr != nil { level.Error(h.logger).Log("msg", "writev2 rollback failed on retry-able error", "err", rerr) } - return responseStats{}, errHTTPCode, err + return WriteResponseStats{}, errHTTPCode, err } // Non-retriable (e.g. bad request error case). Can be partially written. commitErr := app.Commit() if commitErr != nil { // Bad requests does not matter as we have internal error (retryable). - return responseStats{}, http.StatusInternalServerError, commitErr + return WriteResponseStats{}, http.StatusInternalServerError, commitErr } // Bad request error happened, but rest of data (if any) was written. h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata)) - return rs, errHTTPCode, err + return s, errHTTPCode, err } // All good just commit. if err := app.Commit(); err != nil { - return responseStats{}, http.StatusInternalServerError, err + return WriteResponseStats{}, http.StatusInternalServerError, err } h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata)) - return rs, 0, nil + return s, 0, nil } -func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *responseStats) (samplesWithoutMetadata, errHTTPCode int, err error) { +func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *WriteResponseStats) (samplesWithoutMetadata, errHTTPCode int, err error) { var ( badRequestErrs []error outOfOrderExemplarErrs, samplesWithInvalidLabels int @@ -400,14 +381,14 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * continue } - allSamplesSoFar := rs.samples + rs.histograms + allSamplesSoFar := rs.AllSamples() var ref storage.SeriesRef // Samples. for _, s := range ts.Samples { ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue()) if err == nil { - rs.samples++ + rs.Samples++ continue } // Handle append error. @@ -431,7 +412,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, hp.ToIntHistogram(), nil) } if err == nil { - rs.histograms++ + rs.Histograms++ continue } // Handle append error. @@ -453,18 +434,19 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * e := ep.ToExemplar(&b, req.Symbols) ref, err = app.AppendExemplar(ref, ls, e) if err == nil { - rs.exemplars++ + rs.Exemplars++ continue } // Handle append error. - // TODO(bwplotka): I left the logic as in v1, but we might want to make it consistent with samples and histograms. - // Since exemplar storage is still experimental, we don't fail in anyway, the request on ingestion errors. if errors.Is(err, storage.ErrOutOfOrderExemplar) { - outOfOrderExemplarErrs++ - level.Debug(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + outOfOrderExemplarErrs++ // Maintain old metrics, but technically not needed, given we fail here. + level.Error(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } - level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) + // TODO(bwplotka): Add strict mode which would trigger rollback of everything if needed. + // For now we keep the previously released flow (just error not debug leve) of dropping them without rollback and 5xx. + level.Error(h.logger).Log("msg", "failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) } m := ts.ToMetadata(req.Symbols) @@ -472,7 +454,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err) // Metadata is attached to each series, so since Prometheus does not reject sample without metadata information, // we don't report remote write error either. We increment metric instead. - samplesWithoutMetadata += (rs.samples + rs.histograms) - allSamplesSoFar + samplesWithoutMetadata += rs.AllSamples() - allSamplesSoFar } } diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index 9b5fb1a6e..af2229b9a 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -398,7 +398,7 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { { desc: "Partial write; skipped exemplar; exemplar storage errs are noop", input: writeV2RequestFixture.Timeseries, - appendExemplarErr: errors.New("some exemplar append error"), + appendExemplarErr: errors.New("some exemplar internal append error"), expectedCode: http.StatusNoContent, }, @@ -449,9 +449,9 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { if tc.expectedCode == http.StatusInternalServerError { // We don't expect writes for partial writes with retry-able code. - expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Samples")) - expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Histograms")) - expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars")) + expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenSamplesHeader)) + expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenHistogramsHeader)) + expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader)) require.Empty(t, len(appendable.samples)) require.Empty(t, len(appendable.histograms)) @@ -462,12 +462,12 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { // Double check mandatory 2.0 stats. // writeV2RequestFixture has 2 series with 1 sample, 2 histograms, 1 exemplar each. - expectHeaderValue(t, 2, resp.Header.Get("X-Prometheus-Remote-Write-Written-Samples")) - expectHeaderValue(t, 4, resp.Header.Get("X-Prometheus-Remote-Write-Written-Histograms")) + expectHeaderValue(t, 2, resp.Header.Get(rw20WrittenSamplesHeader)) + expectHeaderValue(t, 4, resp.Header.Get(rw20WrittenHistogramsHeader)) if tc.appendExemplarErr != nil { - expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars")) + expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader)) } else { - expectHeaderValue(t, 2, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars")) + expectHeaderValue(t, 2, resp.Header.Get(rw20WrittenExemplarsHeader)) } // Double check what was actually appended. From 465891cc5686f207a27e7cd1f9718062a5a83aba Mon Sep 17 00:00:00 2001 From: gotjosh Date: Mon, 22 Jul 2024 14:11:18 +0100 Subject: [PATCH 54/81] Rules: Refactor concurrency controller interface (#14491) * Rules: Refactor concurrency controller interface Even though the main purpose of this refactor is to modify the interface of the concurrency controller to accept a Context. I did two drive-by modifications that I think are sensible: 1. I have moved the check for dependencies on rules to the controller itself - this aligns with how the controller should behave as it is a deciding factor on wether we should run concurrently or not. 2. I cleaned up some unused methods from the days of the old interface before #13527 changed it. Signed-off-by: gotjosh --------- Signed-off-by: gotjosh --- rules/group.go | 10 ++------- rules/manager.go | 56 ++++++++++++++++-------------------------------- 2 files changed, 20 insertions(+), 46 deletions(-) diff --git a/rules/group.go b/rules/group.go index 0bc219a11..201d3a67d 100644 --- a/rules/group.go +++ b/rules/group.go @@ -621,14 +621,12 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { } } - // If the rule has no dependencies, it can run concurrently because no other rules in this group depend on its output. - // Try run concurrently if there are slots available. - if ctrl := g.concurrencyController; isRuleEligibleForConcurrentExecution(rule) && ctrl.Allow() { + if ctrl := g.concurrencyController; ctrl.Allow(ctx, g, rule) { wg.Add(1) go eval(i, rule, func() { wg.Done() - ctrl.Done() + ctrl.Done(ctx) }) } else { eval(i, rule, nil) @@ -1094,7 +1092,3 @@ func buildDependencyMap(rules []Rule) dependencyMap { return dependencies } - -func isRuleEligibleForConcurrentExecution(rule Rule) bool { - return rule.NoDependentRules() && rule.NoDependencyRules() -} diff --git a/rules/manager.go b/rules/manager.go index ab33c3c7d..9e5b33fbc 100644 --- a/rules/manager.go +++ b/rules/manager.go @@ -457,67 +457,47 @@ func (c ruleDependencyController) AnalyseRules(rules []Rule) { // Its purpose is to bound the amount of concurrency in rule evaluations to avoid overwhelming the Prometheus // server with additional query load. Concurrency is controlled globally, not on a per-group basis. type RuleConcurrencyController interface { - // Allow determines whether any concurrent evaluation slots are available. - // If Allow() returns true, then Done() must be called to release the acquired slot. - Allow() bool + // Allow determines if the given rule is allowed to be evaluated concurrently. + // If Allow() returns true, then Done() must be called to release the acquired slot and corresponding cleanup is done. + // It is important that both *Group and Rule are not retained and only be used for the duration of the call. + Allow(ctx context.Context, group *Group, rule Rule) bool // Done releases a concurrent evaluation slot. - Done() + Done(ctx context.Context) } // concurrentRuleEvalController holds a weighted semaphore which controls the concurrent evaluation of rules. type concurrentRuleEvalController struct { - sema *semaphore.Weighted - depMapsMu sync.Mutex - depMaps map[*Group]dependencyMap + sema *semaphore.Weighted } func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyController { return &concurrentRuleEvalController{ - sema: semaphore.NewWeighted(maxConcurrency), - depMaps: map[*Group]dependencyMap{}, + sema: semaphore.NewWeighted(maxConcurrency), } } -func (c *concurrentRuleEvalController) RuleEligible(g *Group, r Rule) bool { - c.depMapsMu.Lock() - defer c.depMapsMu.Unlock() - - depMap, found := c.depMaps[g] - if !found { - depMap = buildDependencyMap(g.rules) - c.depMaps[g] = depMap +func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, rule Rule) bool { + // To allow a rule to be executed concurrently, we need 3 conditions: + // 1. The rule must not have any rules that depend on it. + // 2. The rule itself must not depend on any other rules. + // 3. If 1 & 2 are true, then and only then we should try to acquire the concurrency slot. + if rule.NoDependentRules() && rule.NoDependencyRules() { + return c.sema.TryAcquire(1) } - return depMap.isIndependent(r) + return false } -func (c *concurrentRuleEvalController) Allow() bool { - return c.sema.TryAcquire(1) -} - -func (c *concurrentRuleEvalController) Done() { +func (c *concurrentRuleEvalController) Done(_ context.Context) { c.sema.Release(1) } -func (c *concurrentRuleEvalController) Invalidate() { - c.depMapsMu.Lock() - defer c.depMapsMu.Unlock() - - // Clear out the memoized dependency maps because some or all groups may have been updated. - c.depMaps = map[*Group]dependencyMap{} -} - // sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially. type sequentialRuleEvalController struct{} -func (c sequentialRuleEvalController) RuleEligible(_ *Group, _ Rule) bool { +func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) bool { return false } -func (c sequentialRuleEvalController) Allow() bool { - return false -} - -func (c sequentialRuleEvalController) Done() {} -func (c sequentialRuleEvalController) Invalidate() {} +func (c sequentialRuleEvalController) Done(_ context.Context) {} From 23307b02c58adc56695844b9a851578fb6c26677 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 22 Jul 2024 14:33:59 +0100 Subject: [PATCH 55/81] [TESTS] Storage: Improve MergeQuerier tests `TestMergeQuerierWithSecondaries_ErrorHandling` now tests `NewMergeQuerier` rather than creating the data structure directly. This means we now test short-circuiting when only a single querier is required. Merge `mockGenericQuerier` into `mockQuerier`. Replace `unwrapMockGenericQuerier` with a visitor pattern. No change in functionality intended. Signed-off-by: Bryan Boreham --- storage/merge_test.go | 284 +++++++++++++++++++++--------------------- 1 file changed, 139 insertions(+), 145 deletions(-) diff --git a/storage/merge_test.go b/storage/merge_test.go index 7619af3c1..a3cba9bb9 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -912,9 +912,23 @@ func TestConcatenatingChunkIterator(t *testing.T) { } type mockQuerier struct { - LabelQuerier + mtx sync.Mutex - toReturn []Series + toReturn []Series // Response for Select. + + closed bool + labelNamesCalls int + labelNamesRequested []labelNameRequest + sortedSeriesRequested []bool + + resp []string // Response for LabelNames and LabelValues; turned into Select response if toReturn is not supplied. + warnings annotations.Annotations + err error +} + +type labelNameRequest struct { + name string + matchers []*labels.Matcher } type seriesByLabel []Series @@ -924,13 +938,47 @@ func (a seriesByLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a seriesByLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 } func (m *mockQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) SeriesSet { - cpy := make([]Series, len(m.toReturn)) - copy(cpy, m.toReturn) + m.mtx.Lock() + defer m.mtx.Unlock() + m.sortedSeriesRequested = append(m.sortedSeriesRequested, sortSeries) + + var ret []Series + if len(m.toReturn) > 0 { + ret = make([]Series, len(m.toReturn)) + copy(ret, m.toReturn) + } else if len(m.resp) > 0 { + ret = make([]Series, 0, len(m.resp)) + for _, l := range m.resp { + ret = append(ret, NewListSeries(labels.FromStrings("test", string(l)), nil)) + } + } if sortSeries { - sort.Sort(seriesByLabel(cpy)) + sort.Sort(seriesByLabel(ret)) } - return NewMockSeriesSet(cpy...) + return &mockSeriesSet{idx: -1, series: ret, warnings: m.warnings, err: m.err} +} + +func (m *mockQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + m.mtx.Lock() + m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{ + name: name, + matchers: matchers, + }) + m.mtx.Unlock() + return m.resp, m.warnings, m.err +} + +func (m *mockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { + m.mtx.Lock() + m.labelNamesCalls++ + m.mtx.Unlock() + return m.resp, m.warnings, m.err +} + +func (m *mockQuerier) Close() error { + m.closed = true + return nil } type mockChunkQuerier struct { @@ -960,6 +1008,9 @@ func (m *mockChunkQuerier) Select(_ context.Context, sortSeries bool, _ *SelectH type mockSeriesSet struct { idx int series []Series + + warnings annotations.Annotations + err error } func NewMockSeriesSet(series ...Series) SeriesSet { @@ -970,15 +1021,18 @@ func NewMockSeriesSet(series ...Series) SeriesSet { } func (m *mockSeriesSet) Next() bool { + if m.err != nil { + return false + } m.idx++ return m.idx < len(m.series) } func (m *mockSeriesSet) At() Series { return m.series[m.idx] } -func (m *mockSeriesSet) Err() error { return nil } +func (m *mockSeriesSet) Err() error { return m.err } -func (m *mockSeriesSet) Warnings() annotations.Annotations { return nil } +func (m *mockSeriesSet) Warnings() annotations.Annotations { return m.warnings } type mockChunkSeriesSet struct { idx int @@ -1336,105 +1390,44 @@ func BenchmarkMergeSeriesSet(b *testing.B) { } } -type mockGenericQuerier struct { - mtx sync.Mutex - - closed bool - labelNamesCalls int - labelNamesRequested []labelNameRequest - sortedSeriesRequested []bool - - resp []string - warnings annotations.Annotations - err error -} - -type labelNameRequest struct { - name string - matchers []*labels.Matcher -} - -func (m *mockGenericQuerier) Select(_ context.Context, b bool, _ *SelectHints, _ ...*labels.Matcher) genericSeriesSet { - m.mtx.Lock() - m.sortedSeriesRequested = append(m.sortedSeriesRequested, b) - m.mtx.Unlock() - return &mockGenericSeriesSet{resp: m.resp, warnings: m.warnings, err: m.err} -} - -func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - m.mtx.Lock() - m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{ - name: name, - matchers: matchers, - }) - m.mtx.Unlock() - return m.resp, m.warnings, m.err -} - -func (m *mockGenericQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) { - m.mtx.Lock() - m.labelNamesCalls++ - m.mtx.Unlock() - return m.resp, m.warnings, m.err -} - -func (m *mockGenericQuerier) Close() error { - m.closed = true - return nil -} - -type mockGenericSeriesSet struct { - resp []string - warnings annotations.Annotations - err error - - curr int -} - -func (m *mockGenericSeriesSet) Next() bool { - if m.err != nil { - return false +func visitMockQueriers(t *testing.T, qr Querier, f func(t *testing.T, q *mockQuerier)) int { + count := 0 + switch x := qr.(type) { + case *mockQuerier: + count++ + f(t, x) + case *querierAdapter: + count += visitMockQueriersInGenericQuerier(t, x.genericQuerier, f) } - if m.curr >= len(m.resp) { - return false + return count +} + +func visitMockQueriersInGenericQuerier(t *testing.T, g genericQuerier, f func(t *testing.T, q *mockQuerier)) int { + count := 0 + switch x := g.(type) { + case *mergeGenericQuerier: + for _, q := range x.queriers { + count += visitMockQueriersInGenericQuerier(t, q, f) + } + case *genericQuerierAdapter: + // Visitor for chunkQuerier not implemented. + count += visitMockQueriers(t, x.q, f) + case *secondaryQuerier: + count += visitMockQueriersInGenericQuerier(t, x.genericQuerier, f) } - m.curr++ - return true + return count } -func (m *mockGenericSeriesSet) Err() error { return m.err } -func (m *mockGenericSeriesSet) Warnings() annotations.Annotations { return m.warnings } - -func (m *mockGenericSeriesSet) At() Labels { - return mockLabels(m.resp[m.curr-1]) -} - -type mockLabels string - -func (l mockLabels) Labels() labels.Labels { - return labels.FromStrings("test", string(l)) -} - -func unwrapMockGenericQuerier(t *testing.T, qr genericQuerier) *mockGenericQuerier { - m, ok := qr.(*mockGenericQuerier) - if !ok { - s, ok := qr.(*secondaryQuerier) - require.True(t, ok, "expected secondaryQuerier got something else") - m, ok = s.genericQuerier.(*mockGenericQuerier) - require.True(t, ok, "expected mockGenericQuerier got something else") - } - return m -} - -func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { +func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) { var ( errStorage = errors.New("storage error") warnStorage = errors.New("storage warning") ctx = context.Background() ) for _, tcase := range []struct { - name string - queriers []genericQuerier + name string + primaries []Querier + secondaries []Querier expectedSelectsSeries []labels.Labels expectedLabels []string @@ -1443,10 +1436,8 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { expectedErrs [4]error }{ { - // NewMergeQuerier will not create a mergeGenericQuerier - // with just one querier inside, but we can test it anyway. - name: "one successful primary querier", - queriers: []genericQuerier{&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}}, + name: "one successful primary querier", + primaries: []Querier{&mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}}, expectedSelectsSeries: []labels.Labels{ labels.FromStrings("test", "a"), labels.FromStrings("test", "b"), @@ -1455,9 +1446,9 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { }, { name: "multiple successful primary queriers", - queriers: []genericQuerier{ - &mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}, - &mockGenericQuerier{resp: []string{"b", "c"}, warnings: nil, err: nil}, + primaries: []Querier{ + &mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}, + &mockQuerier{resp: []string{"b", "c"}, warnings: nil, err: nil}, }, expectedSelectsSeries: []labels.Labels{ labels.FromStrings("test", "a"), @@ -1468,15 +1459,17 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { }, { name: "one failed primary querier", - queriers: []genericQuerier{&mockGenericQuerier{warnings: nil, err: errStorage}}, + primaries: []Querier{&mockQuerier{warnings: nil, err: errStorage}}, expectedErrs: [4]error{errStorage, errStorage, errStorage, errStorage}, }, { name: "one successful primary querier with successful secondaries", - queriers: []genericQuerier{ - &mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}}, + primaries: []Querier{ + &mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}, + }, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: nil, err: nil}, + &mockQuerier{resp: []string{"c"}, warnings: nil, err: nil}, }, expectedSelectsSeries: []labels.Labels{ labels.FromStrings("test", "a"), @@ -1487,10 +1480,12 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { }, { name: "one successful primary querier with empty response and successful secondaries", - queriers: []genericQuerier{ - &mockGenericQuerier{resp: []string{}, warnings: nil, err: nil}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}}, + primaries: []Querier{ + &mockQuerier{resp: []string{}, warnings: nil, err: nil}, + }, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: nil, err: nil}, + &mockQuerier{resp: []string{"c"}, warnings: nil, err: nil}, }, expectedSelectsSeries: []labels.Labels{ labels.FromStrings("test", "b"), @@ -1500,19 +1495,23 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { }, { name: "one failed primary querier with successful secondaries", - queriers: []genericQuerier{ - &mockGenericQuerier{warnings: nil, err: errStorage}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}}, + primaries: []Querier{ + &mockQuerier{warnings: nil, err: errStorage}, + }, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: nil, err: nil}, + &mockQuerier{resp: []string{"c"}, warnings: nil, err: nil}, }, expectedErrs: [4]error{errStorage, errStorage, errStorage, errStorage}, }, { name: "one successful primary querier with failed secondaries", - queriers: []genericQuerier{ - &mockGenericQuerier{resp: []string{"a"}, warnings: nil, err: nil}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: errStorage}}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: errStorage}}, + primaries: []Querier{ + &mockQuerier{resp: []string{"a"}, warnings: nil, err: nil}, + }, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage}, + &mockQuerier{resp: []string{"c"}, warnings: nil, err: errStorage}, }, expectedSelectsSeries: []labels.Labels{ labels.FromStrings("test", "a"), @@ -1522,9 +1521,11 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { }, { name: "successful queriers with warnings", - queriers: []genericQuerier{ - &mockGenericQuerier{resp: []string{"a"}, warnings: annotations.New().Add(warnStorage), err: nil}, - &secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: annotations.New().Add(warnStorage), err: nil}}, + primaries: []Querier{ + &mockQuerier{resp: []string{"a"}, warnings: annotations.New().Add(warnStorage), err: nil}, + }, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: annotations.New().Add(warnStorage), err: nil}, }, expectedSelectsSeries: []labels.Labels{ labels.FromStrings("test", "a"), @@ -1535,10 +1536,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { }, } { t.Run(tcase.name, func(t *testing.T) { - q := &mergeGenericQuerier{ - queriers: tcase.queriers, - mergeFn: func(l ...Labels) Labels { return l[0] }, - } + q := NewMergeQuerier(tcase.primaries, tcase.secondaries, func(s ...Series) Series { return s[0] }) t.Run("Select", func(t *testing.T) { res := q.Select(context.Background(), false, nil) @@ -1551,11 +1549,13 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { require.ErrorIs(t, res.Err(), tcase.expectedErrs[0], "expected error doesn't match") require.Equal(t, tcase.expectedSelectsSeries, lbls) - for _, qr := range q.queriers { - m := unwrapMockGenericQuerier(t, qr) - // mergeGenericQuerier forces all Selects to be sorted. - require.Equal(t, []bool{true}, m.sortedSeriesRequested) - } + n := visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) { + // Single queries should be unsorted; merged queries sorted. + exp := len(tcase.primaries)+len(tcase.secondaries) > 1 + require.Equal(t, []bool{exp}, m.sortedSeriesRequested) + }) + // Check we visited all queriers. + require.Equal(t, len(tcase.primaries)+len(tcase.secondaries), n) }) t.Run("LabelNames", func(t *testing.T) { res, w, err := q.LabelNames(ctx, nil) @@ -1566,11 +1566,9 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { if err != nil { return } - for _, qr := range q.queriers { - m := unwrapMockGenericQuerier(t, qr) - + visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) { require.Equal(t, 1, m.labelNamesCalls) - } + }) }) t.Run("LabelValues", func(t *testing.T) { res, w, err := q.LabelValues(ctx, "test", nil) @@ -1581,11 +1579,9 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { if err != nil { return } - for _, qr := range q.queriers { - m := unwrapMockGenericQuerier(t, qr) - + visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) { require.Equal(t, []labelNameRequest{{name: "test"}}, m.labelNamesRequested) - } + }) }) t.Run("LabelValuesWithMatchers", func(t *testing.T) { matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue") @@ -1597,14 +1593,12 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) { if err != nil { return } - for _, qr := range q.queriers { - m := unwrapMockGenericQuerier(t, qr) - + visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) { require.Equal(t, []labelNameRequest{ {name: "test"}, {name: "test2", matchers: []*labels.Matcher{matcher}}, }, m.labelNamesRequested) - } + }) }) }) } From 677cdcdcecc3826390461b9574b882ebb9a42143 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 22 Jul 2024 15:01:00 +0100 Subject: [PATCH 56/81] [TEST] Storage: ignore difference between nil and empty We need this for subsequent changes. Signed-off-by: Bryan Boreham --- storage/merge_test.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/storage/merge_test.go b/storage/merge_test.go index a3cba9bb9..dae338a00 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -1561,7 +1561,7 @@ func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) { res, w, err := q.LabelNames(ctx, nil) require.Subset(t, tcase.expectedWarnings, w) require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match") - require.Equal(t, tcase.expectedLabels, res) + requireEqualSlice(t, tcase.expectedLabels, res) if err != nil { return @@ -1574,7 +1574,7 @@ func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) { res, w, err := q.LabelValues(ctx, "test", nil) require.Subset(t, tcase.expectedWarnings, w) require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match") - require.Equal(t, tcase.expectedLabels, res) + requireEqualSlice(t, tcase.expectedLabels, res) if err != nil { return @@ -1588,7 +1588,7 @@ func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) { res, w, err := q.LabelValues(ctx, "test2", nil, matcher) require.Subset(t, tcase.expectedWarnings, w) require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match") - require.Equal(t, tcase.expectedLabels, res) + requireEqualSlice(t, tcase.expectedLabels, res) if err != nil { return @@ -1604,6 +1604,15 @@ func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) { } } +// Check slice but ignore difference between nil and empty. +func requireEqualSlice[T any](t require.TestingT, a, b []T, msgAndArgs ...interface{}) { + if len(a) == 0 { + require.Empty(t, b, msgAndArgs...) + } else { + require.Equal(t, a, b, msgAndArgs...) + } +} + type errIterator struct { err error } From 0ae881739bab56f91ad95e7b3d0dc7e470b9ce2e Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 22 Jul 2024 15:02:52 +0100 Subject: [PATCH 57/81] [TEST] Storage: check MergeQuerier with nil primary This test fails on current code. Signed-off-by: Bryan Boreham --- storage/merge_test.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/storage/merge_test.go b/storage/merge_test.go index dae338a00..488edb2e6 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -1504,6 +1504,25 @@ func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) { }, expectedErrs: [4]error{errStorage, errStorage, errStorage, errStorage}, }, + { + name: "nil primary querier with failed secondary", + primaries: nil, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage}, + }, + expectedLabels: []string{}, + expectedWarnings: annotations.New().Add(errStorage), + }, + { + name: "nil primary querier with two failed secondaries", + primaries: nil, + secondaries: []Querier{ + &mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage}, + &mockQuerier{resp: []string{"c"}, warnings: nil, err: errStorage}, + }, + expectedLabels: []string{}, + expectedWarnings: annotations.New().Add(errStorage), + }, { name: "one successful primary querier with failed secondaries", primaries: []Querier{ From 90d793e8c5e8f8c9823cfa9942e4e08019b37a6d Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 22 Jul 2024 15:33:07 +0100 Subject: [PATCH 58/81] [BUGFIX] Storage: Single secondary querier errors should be warnings. Signed-off-by: Bryan Boreham --- storage/merge.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/merge.go b/storage/merge.go index 194494b6a..66c4c3ed3 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -51,7 +51,7 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer case len(primaries) == 1 && len(secondaries) == 0: return primaries[0] case len(primaries) == 0 && len(secondaries) == 1: - return secondaries[0] + return &querierAdapter{newSecondaryQuerierFrom(secondaries[0])} } queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) @@ -89,7 +89,7 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica case len(primaries) == 1 && len(secondaries) == 0: return primaries[0] case len(primaries) == 0 && len(secondaries) == 1: - return secondaries[0] + return &chunkQuerierAdapter{newSecondaryQuerierFromChunk(secondaries[0])} } queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) From 12bd92a25ccc6516f7cda9f53b5798fb5992a6c3 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 22 Jul 2024 15:35:09 +0100 Subject: [PATCH 59/81] [ENHANCEMENT] Storage: Short-circuit merge of single querier with no-op queriers Filter before checking whether there is only one. Signed-off-by: Bryan Boreham --- storage/merge.go | 46 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/storage/merge.go b/storage/merge.go index 66c4c3ed3..2424b26ab 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -45,8 +45,11 @@ type mergeGenericQuerier struct { // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier { + primaries = filterQueriers(primaries) + secondaries = filterQueriers(secondaries) + switch { - case len(primaries)+len(secondaries) == 0: + case len(primaries) == 0 && len(secondaries) == 0: return noopQuerier{} case len(primaries) == 1 && len(secondaries) == 0: return primaries[0] @@ -56,14 +59,10 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { - if _, ok := q.(noopQuerier); !ok && q != nil { - queriers = append(queriers, newGenericQuerierFrom(q)) - } + queriers = append(queriers, newGenericQuerierFrom(q)) } for _, q := range secondaries { - if _, ok := q.(noopQuerier); !ok && q != nil { - queriers = append(queriers, newSecondaryQuerierFrom(q)) - } + queriers = append(queriers, newSecondaryQuerierFrom(q)) } concurrentSelect := false @@ -77,12 +76,25 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer }} } +func filterQueriers(qs []Querier) []Querier { + ret := make([]Querier, 0, len(qs)) + for _, q := range qs { + if _, ok := q.(noopQuerier); !ok && q != nil { + ret = append(ret, q) + } + } + return ret +} + // NewMergeChunkQuerier returns a new Chunk Querier that merges results of given primary and secondary chunk queriers. // See NewFanout commentary to learn more about primary vs secondary differences. // // In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used. // TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier { + primaries = filterChunkQueriers(primaries) + secondaries = filterChunkQueriers(secondaries) + switch { case len(primaries) == 0 && len(secondaries) == 0: return noopChunkQuerier{} @@ -94,14 +106,10 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries)) for _, q := range primaries { - if _, ok := q.(noopChunkQuerier); !ok && q != nil { - queriers = append(queriers, newGenericQuerierFromChunk(q)) - } + queriers = append(queriers, newGenericQuerierFromChunk(q)) } - for _, querier := range secondaries { - if _, ok := querier.(noopChunkQuerier); !ok && querier != nil { - queriers = append(queriers, newSecondaryQuerierFromChunk(querier)) - } + for _, q := range secondaries { + queriers = append(queriers, newSecondaryQuerierFromChunk(q)) } concurrentSelect := false @@ -115,6 +123,16 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica }} } +func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier { + ret := make([]ChunkQuerier, 0, len(qs)) + for _, q := range qs { + if _, ok := q.(noopChunkQuerier); !ok && q != nil { + ret = append(ret, q) + } + } + return ret +} + // Select returns a set of series that matches the given label matchers. func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet { seriesSets := make([]genericSeriesSet, 0, len(q.queriers)) From cc7dcf5afea759263ca0fa555f70741b76ce4df6 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 22 Jul 2024 16:17:35 +0100 Subject: [PATCH 60/81] [DOCS] Querying basics: explain range and instant queries I often see people ask questions that indicate they don't understand this point, and launching into "instant vector" and "range vector" is likely to point them in the wrong direction. Remove the admonishment that the reader mustn't confuse these things. Remove mention of "inferred sample timestamps" that is never explained. Signed-off-by: Bryan Boreham --- docs/querying/basics.md | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 1c72adb3e..304c9f07d 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -8,9 +8,15 @@ sort_rank: 1 Prometheus provides a functional query language called PromQL (Prometheus Query Language) that lets the user select and aggregate time series data in real -time. The result of an expression can either be shown as a graph, viewed as -tabular data in Prometheus's expression browser, or consumed by external -systems via the [HTTP API](api.md). +time. + +When you send a query request to Prometheus, it can be an _instant query_, evaluated at one point in time, +or a _range query_ at equally-spaced steps between a start and an end time. PromQL works exactly the same +in each cases; the range query is just like an instant query run multiple times at different timestamps. + +In the Prometheus UI, the "Table" tab is for instant queries and the "Graph" tab is for range queries. + +Other programs can fetch the result of a PromQL expression via the [HTTP API](api.md). ## Examples @@ -94,9 +100,7 @@ Examples: ## Time series selectors -Time series selectors are responsible for selecting the times series and raw or inferred sample timestamps and values. - -Time series *selectors* are not to be confused with higher level concept of instant and range *queries* that can execute the time series *selectors*. A higher level instant query would evaluate the given selector at one point in time, however the range query would evaluate the selector at multiple different times in between a minimum and maximum timestamp at regular steps. +These are the basic building-blocks that instruct PromQL what data to fetch. ### Instant vector selectors From c037a3df844d15b5e38be7eaa28f842c5ee07ee2 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Mon, 22 Jul 2024 16:34:42 +0100 Subject: [PATCH 61/81] lint Signed-off-by: Bryan Boreham --- storage/merge_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/merge_test.go b/storage/merge_test.go index 488edb2e6..b145743c8 100644 --- a/storage/merge_test.go +++ b/storage/merge_test.go @@ -949,7 +949,7 @@ func (m *mockQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints, } else if len(m.resp) > 0 { ret = make([]Series, 0, len(m.resp)) for _, l := range m.resp { - ret = append(ret, NewListSeries(labels.FromStrings("test", string(l)), nil)) + ret = append(ret, NewListSeries(labels.FromStrings("test", l), nil)) } } if sortSeries { From be7a4c9b83a9f074f823d12e3d58338407fe76a1 Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Fri, 26 Jul 2024 09:49:57 +0200 Subject: [PATCH 62/81] Ignore stale histograms for counter reset detection The histogram stats decoder keeps track of the last seen histogram sample in order to properly detect counter resets. We are seeing an issue where a histogram with UnknownResetHint gets treated as a counter reset when it follows a stale histogram sample. I believe that this is incorrect since stale samples should be completely ignored in PromQL. As a result, they should not be stored in the histogram stats iterator and the counter reset detection needs to be done against the last non-stale sample. Signed-off-by: Filip Petkovski --- promql/histogram_stats_iterator.go | 2 - promql/histogram_stats_iterator_test.go | 123 +++++++++++++++--------- tsdb/tsdbutil/histogram.go | 10 +- 3 files changed, 84 insertions(+), 51 deletions(-) diff --git a/promql/histogram_stats_iterator.go b/promql/histogram_stats_iterator.go index dfafea5f8..0a5f67ae7 100644 --- a/promql/histogram_stats_iterator.go +++ b/promql/histogram_stats_iterator.go @@ -48,7 +48,6 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi var t int64 t, f.currentH = f.Iterator.AtHistogram(f.currentH) if value.IsStaleNaN(f.currentH.Sum) { - f.setLastH(f.currentH) h = &histogram.Histogram{Sum: f.currentH.Sum} return t, h } @@ -77,7 +76,6 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) var t int64 t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH) if value.IsStaleNaN(f.currentFH.Sum) { - f.setLastFH(f.currentFH) return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum} } diff --git a/promql/histogram_stats_iterator_test.go b/promql/histogram_stats_iterator_test.go index b71a9d602..d5c081348 100644 --- a/promql/histogram_stats_iterator_test.go +++ b/promql/histogram_stats_iterator_test.go @@ -14,62 +14,99 @@ package promql import ( + "fmt" + "math" "testing" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/tsdbutil" ) func TestHistogramStatsDecoding(t *testing.T) { - histograms := []*histogram.Histogram{ - tsdbutil.GenerateTestHistogram(0), - tsdbutil.GenerateTestHistogram(1), - tsdbutil.GenerateTestHistogram(2), - tsdbutil.GenerateTestHistogram(2), - } - histograms[0].CounterResetHint = histogram.NotCounterReset - histograms[1].CounterResetHint = histogram.UnknownCounterReset - histograms[2].CounterResetHint = histogram.CounterReset - histograms[3].CounterResetHint = histogram.UnknownCounterReset - - expectedHints := []histogram.CounterResetHint{ - histogram.NotCounterReset, - histogram.NotCounterReset, - histogram.CounterReset, - histogram.NotCounterReset, + cases := []struct { + name string + histograms []*histogram.Histogram + expectedHints []histogram.CounterResetHint + }{ + { + name: "unknown counter reset triggers detection", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset), + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + tsdbutil.GenerateTestHistogramWithHint(2, histogram.CounterReset), + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.NotCounterReset, + histogram.CounterReset, + histogram.NotCounterReset, + }, + }, + { + name: "stale sample before unknown reset hint", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(0, histogram.NotCounterReset), + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + {Sum: math.Float64frombits(value.StaleNaN)}, + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.NotCounterReset, + histogram.UnknownCounterReset, + histogram.NotCounterReset, + }, + }, } - t.Run("histogram_stats", func(t *testing.T) { - decodedStats := make([]*histogram.Histogram, 0) - statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil)) - for statsIterator.Next() != chunkenc.ValNone { - _, h := statsIterator.AtHistogram(nil) - decodedStats = append(decodedStats, h) - } - for i := 0; i < len(histograms); i++ { - require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint) - require.Equal(t, histograms[i].Count, decodedStats[i].Count) - require.Equal(t, histograms[i].Sum, decodedStats[i].Sum) - } - }) - t.Run("float_histogram_stats", func(t *testing.T) { - decodedStats := make([]*histogram.FloatHistogram, 0) - statsIterator := NewHistogramStatsIterator(newHistogramSeries(histograms).Iterator(nil)) - for statsIterator.Next() != chunkenc.ValNone { - _, h := statsIterator.AtFloatHistogram(nil) - decodedStats = append(decodedStats, h) - } - for i := 0; i < len(histograms); i++ { - fh := histograms[i].ToFloat(nil) - require.Equal(t, expectedHints[i], decodedStats[i].CounterResetHint) - require.Equal(t, fh.Count, decodedStats[i].Count) - require.Equal(t, fh.Sum, decodedStats[i].Sum) - } - }) + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Run("histogram_stats", func(t *testing.T) { + decodedStats := make([]*histogram.Histogram, 0) + statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil)) + for statsIterator.Next() != chunkenc.ValNone { + _, h := statsIterator.AtHistogram(nil) + decodedStats = append(decodedStats, h) + } + for i := 0; i < len(tc.histograms); i++ { + require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint, fmt.Sprintf("mismatch in counter reset hint for histogram %d", i)) + h := tc.histograms[i] + if value.IsStaleNaN(h.Sum) { + require.True(t, value.IsStaleNaN(decodedStats[i].Sum)) + require.Equal(t, uint64(0), decodedStats[i].Count) + } else { + require.Equal(t, tc.histograms[i].Count, decodedStats[i].Count) + require.Equal(t, tc.histograms[i].Sum, decodedStats[i].Sum) + } + } + }) + t.Run("float_histogram_stats", func(t *testing.T) { + decodedStats := make([]*histogram.FloatHistogram, 0) + statsIterator := NewHistogramStatsIterator(newHistogramSeries(tc.histograms).Iterator(nil)) + for statsIterator.Next() != chunkenc.ValNone { + _, h := statsIterator.AtFloatHistogram(nil) + decodedStats = append(decodedStats, h) + } + for i := 0; i < len(tc.histograms); i++ { + require.Equal(t, tc.expectedHints[i], decodedStats[i].CounterResetHint) + fh := tc.histograms[i].ToFloat(nil) + if value.IsStaleNaN(fh.Sum) { + require.True(t, value.IsStaleNaN(decodedStats[i].Sum)) + require.Equal(t, float64(0), decodedStats[i].Count) + } else { + require.Equal(t, fh.Count, decodedStats[i].Count) + require.Equal(t, fh.Sum, decodedStats[i].Sum) + } + } + }) + }) + } } type histogramSeries struct { diff --git a/tsdb/tsdbutil/histogram.go b/tsdb/tsdbutil/histogram.go index 3c7349cf7..ce934a638 100644 --- a/tsdb/tsdbutil/histogram.go +++ b/tsdb/tsdbutil/histogram.go @@ -30,12 +30,10 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) { return r } -func GenerateTestHistogramsWithUnknownResetHint(n int) []*histogram.Histogram { - hs := GenerateTestHistograms(n) - for i := range hs { - hs[i].CounterResetHint = histogram.UnknownCounterReset - } - return hs +func GenerateTestHistogramWithHint(n int, hint histogram.CounterResetHint) *histogram.Histogram { + h := GenerateTestHistogram(n) + h.CounterResetHint = hint + return h } // GenerateTestHistogram but it is up to the user to set any known counter reset hint. From 6e89250a5d937485a140c6ba6dcdb35d2db51cd0 Mon Sep 17 00:00:00 2001 From: Bryan Boreham Date: Fri, 26 Jul 2024 09:49:25 +0100 Subject: [PATCH 63/81] Revert "Chunked remote read: close the querier earlier" Believed to trigger segmentation faults due to memory-mapped block data still being accessed by iterators after the querier is closed. Signed-off-by: Bryan Boreham --- storage/remote/read_handler.go | 53 ++++++++++++++-------------------- 1 file changed, 21 insertions(+), 32 deletions(-) diff --git a/storage/remote/read_handler.go b/storage/remote/read_handler.go index 2a00ce897..ffc64c9c3 100644 --- a/storage/remote/read_handler.go +++ b/storage/remote/read_handler.go @@ -202,16 +202,34 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re return err } - chunks := h.getChunkSeriesSet(ctx, query, filteredMatchers) - if err := chunks.Err(); err != nil { + querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs) + if err != nil { return err } + defer func() { + if err := querier.Close(); err != nil { + level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error()) + } + }() + + var hints *storage.SelectHints + if query.Hints != nil { + hints = &storage.SelectHints{ + Start: query.Hints.StartMs, + End: query.Hints.EndMs, + Step: query.Hints.StepMs, + Func: query.Hints.Func, + Grouping: query.Hints.Grouping, + Range: query.Hints.RangeMs, + By: query.Hints.By, + } + } ws, err := StreamChunkedReadResponses( NewChunkedWriter(w, f), int64(i), // The streaming API has to provide the series sorted. - chunks, + querier.Select(ctx, true, hints, filteredMatchers...), sortedExternalLabels, h.remoteReadMaxBytesInFrame, h.marshalPool, @@ -236,35 +254,6 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re } } -// getChunkSeriesSet executes a query to retrieve a ChunkSeriesSet, -// encapsulating the operation in its own function to ensure timely release of -// the querier resources. -func (h *readHandler) getChunkSeriesSet(ctx context.Context, query *prompb.Query, filteredMatchers []*labels.Matcher) storage.ChunkSeriesSet { - querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs) - if err != nil { - return storage.ErrChunkSeriesSet(err) - } - defer func() { - if err := querier.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error()) - } - }() - - var hints *storage.SelectHints - if query.Hints != nil { - hints = &storage.SelectHints{ - Start: query.Hints.StartMs, - End: query.Hints.EndMs, - Step: query.Hints.StepMs, - Func: query.Hints.Func, - Grouping: query.Hints.Grouping, - Range: query.Hints.RangeMs, - By: query.Hints.By, - } - } - return querier.Select(ctx, true, hints, filteredMatchers...) -} - // filterExtLabelsFromMatchers change equality matchers which match external labels // to a matcher that looks for an empty label, // as that label should not be present in the storage. From d4f098ae80fb276153efc757e373c813163da0e8 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Fri, 26 Jul 2024 14:55:39 +0200 Subject: [PATCH 64/81] Fix relabel.Regexp zero value marshalling (#14517) Signed-off-by: Marco Pracucci --- model/relabel/relabel.go | 4 ++++ model/relabel/relabel_test.go | 13 +++++++++++++ 2 files changed, 17 insertions(+) diff --git a/model/relabel/relabel.go b/model/relabel/relabel.go index 4f33edda4..a88046596 100644 --- a/model/relabel/relabel.go +++ b/model/relabel/relabel.go @@ -213,6 +213,10 @@ func (re Regexp) IsZero() bool { // String returns the original string used to compile the regular expression. func (re Regexp) String() string { + if re.Regexp == nil { + return "" + } + str := re.Regexp.String() // Trim the anchor `^(?:` prefix and `)$` suffix. return str[4 : len(str)-2] diff --git a/model/relabel/relabel_test.go b/model/relabel/relabel_test.go index 0f11f7068..fc9952134 100644 --- a/model/relabel/relabel_test.go +++ b/model/relabel/relabel_test.go @@ -900,3 +900,16 @@ action: replace }) } } + +func TestRegexp_ShouldMarshalAndUnmarshalZeroValue(t *testing.T) { + var zero Regexp + + marshalled, err := yaml.Marshal(&zero) + require.NoError(t, err) + require.Equal(t, "null\n", string(marshalled)) + + var unmarshalled Regexp + err = yaml.Unmarshal(marshalled, &unmarshalled) + require.NoError(t, err) + require.Nil(t, unmarshalled.Regexp) +} From fe12924638d433c99b51f9acb1d7ebb9c1f40881 Mon Sep 17 00:00:00 2001 From: Kushal shukla <85934954+kushalShukla-web@users.noreply.github.com> Date: Mon, 29 Jul 2024 07:28:08 -0400 Subject: [PATCH 65/81] promtool: JUnit-Format XML Test Results (#14506) * Junit compatible output Signed-off-by: Kushal Shukla --- cmd/promtool/main.go | 7 ++- cmd/promtool/unittest.go | 40 +++++++++++++---- cmd/promtool/unittest_test.go | 50 +++++++++++++++++++++ docs/command-line/promtool.md | 9 ++++ util/junitxml/junitxml.go | 81 ++++++++++++++++++++++++++++++++++ util/junitxml/junitxml_test.go | 66 +++++++++++++++++++++++++++ 6 files changed, 243 insertions(+), 10 deletions(-) create mode 100644 util/junitxml/junitxml.go create mode 100644 util/junitxml/junitxml_test.go diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index e1d275e97..1c8e1dd1c 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -204,6 +204,7 @@ func main() { pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap() testCmd := app.Command("test", "Unit testing.") + junitOutFile := testCmd.Flag("junit", "File path to store JUnit XML test results.").OpenFile(os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644) testRulesCmd := testCmd.Command("rules", "Unit tests for rules.") testRulesRun := testRulesCmd.Flag("run", "If set, will only run test groups whose names match the regular expression. Can be specified multiple times.").Strings() testRulesFiles := testRulesCmd.Arg( @@ -378,7 +379,11 @@ func main() { os.Exit(QueryLabels(serverURL, httpRoundTripper, *queryLabelsMatch, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p)) case testRulesCmd.FullCommand(): - os.Exit(RulesUnitTest( + results := io.Discard + if *junitOutFile != nil { + results = *junitOutFile + } + os.Exit(RulesUnitTestResult(results, promqltest.LazyLoaderOpts{ EnableAtModifier: true, EnableNegativeOffset: true, diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 5451c5296..7030635d1 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -18,6 +18,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "os" "path/filepath" "sort" @@ -29,9 +30,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/grafana/regexp" "github.com/nsf/jsondiff" - "github.com/prometheus/common/model" "gopkg.in/yaml.v2" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" @@ -39,12 +41,18 @@ import ( "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/junitxml" ) // RulesUnitTest does unit testing of rules based on the unit testing files provided. // More info about the file format can be found in the docs. func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { + return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, files...) +} + +func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int { failed := false + junit := &junitxml.JUnitXML{} var run *regexp.Regexp if runStrings != nil { @@ -52,7 +60,7 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif } for _, f := range files { - if errs := ruleUnitTest(f, queryOpts, run, diffFlag); errs != nil { + if errs := ruleUnitTest(f, queryOpts, run, diffFlag, junit.Suite(f)); errs != nil { fmt.Fprintln(os.Stderr, " FAILED:") for _, e := range errs { fmt.Fprintln(os.Stderr, e.Error()) @@ -64,25 +72,30 @@ func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, dif } fmt.Println() } + err := junit.WriteXML(results) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to write JUnit XML: %s\n", err) + } if failed { return failureExitCode } return successExitCode } -func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error { - fmt.Println("Unit Testing: ", filename) - +func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool, ts *junitxml.TestSuite) []error { b, err := os.ReadFile(filename) if err != nil { + ts.Abort(err) return []error{err} } var unitTestInp unitTestFile if err := yaml.UnmarshalStrict(b, &unitTestInp); err != nil { + ts.Abort(err) return []error{err} } if err := resolveAndGlobFilepaths(filepath.Dir(filename), &unitTestInp); err != nil { + ts.Abort(err) return []error{err} } @@ -91,29 +104,38 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg } evalInterval := time.Duration(unitTestInp.EvaluationInterval) - + ts.Settime(time.Now().Format("2006-01-02T15:04:05")) // Giving number for groups mentioned in the file for ordering. // Lower number group should be evaluated before higher number group. groupOrderMap := make(map[string]int) for i, gn := range unitTestInp.GroupEvalOrder { if _, ok := groupOrderMap[gn]; ok { - return []error{fmt.Errorf("group name repeated in evaluation order: %s", gn)} + err := fmt.Errorf("group name repeated in evaluation order: %s", gn) + ts.Abort(err) + return []error{err} } groupOrderMap[gn] = i } // Testing. var errs []error - for _, t := range unitTestInp.Tests { + for i, t := range unitTestInp.Tests { if !matchesRun(t.TestGroupName, run) { continue } - + testname := t.TestGroupName + if testname == "" { + testname = fmt.Sprintf("unnamed#%d", i) + } + tc := ts.Case(testname) if t.Interval == 0 { t.Interval = unitTestInp.EvaluationInterval } ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...) if ers != nil { + for _, e := range ers { + tc.Fail(e.Error()) + } errs = append(errs, ers...) } } diff --git a/cmd/promtool/unittest_test.go b/cmd/promtool/unittest_test.go index 2dbd5a4e5..9bbac28e9 100644 --- a/cmd/promtool/unittest_test.go +++ b/cmd/promtool/unittest_test.go @@ -14,11 +14,15 @@ package main import ( + "bytes" + "encoding/xml" + "fmt" "testing" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/promql/promqltest" + "github.com/prometheus/prometheus/util/junitxml" ) func TestRulesUnitTest(t *testing.T) { @@ -125,13 +129,59 @@ func TestRulesUnitTest(t *testing.T) { want: 0, }, } + reuseFiles := []string{} + reuseCount := [2]int{} for _, tt := range tests { + if (tt.queryOpts == promqltest.LazyLoaderOpts{ + EnableNegativeOffset: true, + } || tt.queryOpts == promqltest.LazyLoaderOpts{ + EnableAtModifier: true, + }) { + reuseFiles = append(reuseFiles, tt.args.files...) + reuseCount[tt.want] += len(tt.args.files) + } t.Run(tt.name, func(t *testing.T) { if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want { t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want) } }) } + t.Run("Junit xml output ", func(t *testing.T) { + var buf bytes.Buffer + if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, reuseFiles...); got != 1 { + t.Errorf("RulesUnitTestResults() = %v, want 1", got) + } + var test junitxml.JUnitXML + output := buf.Bytes() + err := xml.Unmarshal(output, &test) + if err != nil { + fmt.Println("error in decoding XML:", err) + return + } + var total int + var passes int + var failures int + var cases int + total = len(test.Suites) + if total != len(reuseFiles) { + t.Errorf("JUnit output had %d testsuite elements; expected %d\n", total, len(reuseFiles)) + } + + for _, i := range test.Suites { + if i.FailureCount == 0 { + passes++ + } else { + failures++ + } + cases += len(i.Cases) + } + if total != passes+failures { + t.Errorf("JUnit output mismatch: Total testsuites (%d) does not equal the sum of passes (%d) and failures (%d).", total, passes, failures) + } + if cases < total { + t.Errorf("JUnit output had %d suites without test cases\n", total-cases) + } + }) } func TestRulesUnitTestRun(t *testing.T) { diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 443cd3f0c..6bb80169a 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -442,6 +442,15 @@ Unit testing. +#### Flags + +| Flag | Description | +| --- | --- | +| --junit | File path to store JUnit XML test results. | + + + + ##### `promtool test rules` Unit tests for rules. diff --git a/util/junitxml/junitxml.go b/util/junitxml/junitxml.go new file mode 100644 index 000000000..14e4b6dba --- /dev/null +++ b/util/junitxml/junitxml.go @@ -0,0 +1,81 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package junitxml + +import ( + "encoding/xml" + "io" +) + +type JUnitXML struct { + XMLName xml.Name `xml:"testsuites"` + Suites []*TestSuite `xml:"testsuite"` +} + +type TestSuite struct { + Name string `xml:"name,attr"` + TestCount int `xml:"tests,attr"` + FailureCount int `xml:"failures,attr"` + ErrorCount int `xml:"errors,attr"` + SkippedCount int `xml:"skipped,attr"` + Timestamp string `xml:"timestamp,attr"` + Cases []*TestCase `xml:"testcase"` +} +type TestCase struct { + Name string `xml:"name,attr"` + Failures []string `xml:"failure,omitempty"` + Error string `xml:"error,omitempty"` +} + +func (j *JUnitXML) WriteXML(h io.Writer) error { + return xml.NewEncoder(h).Encode(j) +} + +func (j *JUnitXML) Suite(name string) *TestSuite { + ts := &TestSuite{Name: name} + j.Suites = append(j.Suites, ts) + return ts +} + +func (ts *TestSuite) Fail(f string) { + ts.FailureCount++ + curt := ts.lastCase() + curt.Failures = append(curt.Failures, f) +} + +func (ts *TestSuite) lastCase() *TestCase { + if len(ts.Cases) == 0 { + ts.Case("unknown") + } + return ts.Cases[len(ts.Cases)-1] +} + +func (ts *TestSuite) Case(name string) *TestSuite { + j := &TestCase{ + Name: name, + } + ts.Cases = append(ts.Cases, j) + ts.TestCount++ + return ts +} + +func (ts *TestSuite) Settime(name string) { + ts.Timestamp = name +} + +func (ts *TestSuite) Abort(e error) { + ts.ErrorCount++ + curt := ts.lastCase() + curt.Error = e.Error() +} diff --git a/util/junitxml/junitxml_test.go b/util/junitxml/junitxml_test.go new file mode 100644 index 000000000..ad4d0293d --- /dev/null +++ b/util/junitxml/junitxml_test.go @@ -0,0 +1,66 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package junitxml + +import ( + "bytes" + "encoding/xml" + "errors" + "testing" +) + +func TestJunitOutput(t *testing.T) { + var buf bytes.Buffer + var test JUnitXML + x := FakeTestSuites() + if err := x.WriteXML(&buf); err != nil { + t.Fatalf("Failed to encode XML: %v", err) + } + + output := buf.Bytes() + + err := xml.Unmarshal(output, &test) + if err != nil { + t.Errorf("Unmarshal failed with error: %v", err) + } + var total int + var cases int + total = len(test.Suites) + if total != 3 { + t.Errorf("JUnit output had %d testsuite elements; expected 3\n", total) + } + for _, i := range test.Suites { + cases += len(i.Cases) + } + + if cases != 7 { + t.Errorf("JUnit output had %d testcase; expected 7\n", cases) + } +} + +func FakeTestSuites() *JUnitXML { + ju := &JUnitXML{} + good := ju.Suite("all good") + good.Case("alpha") + good.Case("beta") + good.Case("gamma") + mixed := ju.Suite("mixed") + mixed.Case("good") + bad := mixed.Case("bad") + bad.Fail("once") + bad.Fail("twice") + mixed.Case("ugly").Abort(errors.New("buggy")) + ju.Suite("fast").Fail("fail early") + return ju +} From 2cd97c61e02ac9cf50e0fa4a72bbc61f8e128b8b Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Mon, 29 Jul 2024 14:53:32 +0200 Subject: [PATCH 66/81] Add more test cases Signed-off-by: Filip Petkovski --- promql/histogram_stats_iterator_test.go | 33 +++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/promql/histogram_stats_iterator_test.go b/promql/histogram_stats_iterator_test.go index d5c081348..7a2953d3e 100644 --- a/promql/histogram_stats_iterator_test.go +++ b/promql/histogram_stats_iterator_test.go @@ -63,6 +63,39 @@ func TestHistogramStatsDecoding(t *testing.T) { histogram.NotCounterReset, }, }, + { + name: "unknown counter reset at the beginning", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + }, + }, + { + name: "detect real counter reset", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.CounterReset, + }, + }, + { + name: "detect real counter reset after stale NaN", + histograms: []*histogram.Histogram{ + tsdbutil.GenerateTestHistogramWithHint(2, histogram.UnknownCounterReset), + {Sum: math.Float64frombits(value.StaleNaN)}, + tsdbutil.GenerateTestHistogramWithHint(1, histogram.UnknownCounterReset), + }, + expectedHints: []histogram.CounterResetHint{ + histogram.NotCounterReset, + histogram.UnknownCounterReset, + histogram.CounterReset, + }, + }, } for _, tc := range cases { From b7f2f3c3ac90f2347de6112c185a4e470e7ae8a6 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 30 Jul 2024 10:19:56 +0200 Subject: [PATCH 67/81] Add BenchmarkLoadRealWLs This benchmark runs on real WLs rather than fake generated ones. Signed-off-by: Oleg Zaytsev --- tsdb/head_test.go | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index c192c8a07..09927c23c 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -23,6 +23,7 @@ import ( "path" "path/filepath" "reflect" + "runtime/pprof" "sort" "strconv" "strings" @@ -89,6 +90,43 @@ func newTestHeadWithOptions(t testing.TB, compressWAL wlog.CompressionType, opts return h, wal } +// BenchmarkLoadRealWLs will be skipped unless the BENCHMARK_LOAD_REAL_WLS_DIR environment variable is set. +// BENCHMARK_LOAD_REAL_WLS_DIR should be the folder where `wal` and `chunks_head` are located. +// Optionally, BENCHMARK_LOAD_REAL_WLS_PROFILE can be set to a file path to write a CPU profile. +func BenchmarkLoadRealWLs(b *testing.B) { + dir := os.Getenv("BENCHMARK_LOAD_REAL_WLS_DIR") + if dir == "" { + b.Skipped() + } + + profileFile := os.Getenv("BENCHMARK_LOAD_REAL_WLS_PROFILE") + if profileFile != "" { + b.Logf("Will profile in %s", profileFile) + f, err := os.Create(profileFile) + require.NoError(b, err) + b.Cleanup(func() { f.Close() }) + require.NoError(b, pprof.StartCPUProfile(f)) + b.Cleanup(pprof.StopCPUProfile) + } + + wal, err := wlog.New(nil, nil, filepath.Join(dir, "wal"), wlog.CompressionNone) + require.NoError(b, err) + b.Cleanup(func() { wal.Close() }) + + wbl, err := wlog.New(nil, nil, filepath.Join(dir, "wbl"), wlog.CompressionNone) + require.NoError(b, err) + b.Cleanup(func() { wbl.Close() }) + + // Load the WAL. + for i := 0; i < b.N; i++ { + opts := DefaultHeadOptions() + opts.ChunkDirRoot = dir + h, err := NewHead(nil, nil, wal, wbl, opts, nil) + require.NoError(b, err) + h.Init(0) + } +} + func BenchmarkCreateSeries(b *testing.B) { series := genSeries(b.N, 10, 0, 0) h, _ := newTestHead(b, 10000, wlog.CompressionNone, false) From d8e1b6bdfd3c8cd02a38b21386453dac9b14da1b Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 30 Jul 2024 10:20:29 +0200 Subject: [PATCH 68/81] Store mmMaxTime in same field as seriesShard We don't use seriesShard during DB initialization, so we can use the same 8 bytes to store mmMaxTime, and save those during the rest of the lifetime of the database. This doesn't affect CPU performance. Signed-off-by: Oleg Zaytsev --- tsdb/head.go | 46 +++++++++++++++++++++++++++++++++++----------- tsdb/head_read.go | 2 +- tsdb/head_wal.go | 13 ++++++++----- 3 files changed, 44 insertions(+), 17 deletions(-) diff --git a/tsdb/head.go b/tsdb/head.go index b7bfaa0fd..1659e57a4 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -178,6 +178,7 @@ type HeadOptions struct { WALReplayConcurrency int // EnableSharding enables ShardedPostings() support in the Head. + // EnableSharding is temporarily disabled during Init(). EnableSharding bool } @@ -609,7 +610,7 @@ const cardinalityCacheExpirationTime = time.Duration(30) * time.Second // Init loads data from the write ahead log and prepares the head for writes. // It should be called before using an appender so that it // limits the ingested samples to the head min valid time. -func (h *Head) Init(minValidTime int64) error { +func (h *Head) Init(minValidTime int64) (err error) { h.minValidTime.Store(minValidTime) defer func() { h.postings.EnsureOrder(h.opts.WALReplayConcurrency) @@ -623,6 +624,24 @@ func (h *Head) Init(minValidTime int64) error { } }() + // If sharding is enabled, disable it while initializing, and calculate the shards later. + // We're going to use that field for other purposes during WAL replay, + // so we don't want to waste time on calculating the shard that we're going to lose anyway. + if h.opts.EnableSharding { + h.opts.EnableSharding = false + defer func() { + if err == nil { + h.opts.EnableSharding = true + // No locking is needed here as nobody should be writing while we're in Init. + for _, stripe := range h.series.series { + for _, s := range stripe { + s.shardHashOrMemoryMappedMaxTime = labels.StableHash(s.lset) + } + } + } + }() + } + level.Info(h.logger).Log("msg", "Replaying on-disk memory mappable chunks if any") start := time.Now() @@ -683,7 +702,6 @@ func (h *Head) Init(minValidTime int64) error { mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk lastMmapRef chunks.ChunkDiskMapperRef - err error mmapChunkReplayDuration time.Duration ) @@ -2068,9 +2086,11 @@ type memSeries struct { ref chunks.HeadSeriesRef meta *metadata.Metadata - // Series labels hash to use for sharding purposes. The value is always 0 when sharding has not - // been explicitly enabled in TSDB. - shardHash uint64 + // Series labels hash to use for sharding purposes. + // The value is always 0 when sharding has not been explicitly enabled in TSDB. + // While the WAL replay the value stored here is the max time of any mmapped chunk, + // and the shard hash is re-calculated after WAL replay is complete. + shardHashOrMemoryMappedMaxTime uint64 // Everything after here should only be accessed with the lock held. sync.Mutex @@ -2095,8 +2115,6 @@ type memSeries struct { ooo *memSeriesOOOFields - mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. - nextAt int64 // Timestamp at which to cut the next chunk. histogramChunkHasComputedEndTime bool // True if nextAt has been predicted for the current histograms chunk; false otherwise. pendingCommit bool // Whether there are samples waiting to be committed to this series. @@ -2127,10 +2145,10 @@ type memSeriesOOOFields struct { func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, isolationDisabled bool) *memSeries { s := &memSeries{ - lset: lset, - ref: id, - nextAt: math.MinInt64, - shardHash: shardHash, + lset: lset, + ref: id, + nextAt: math.MinInt64, + shardHashOrMemoryMappedMaxTime: shardHash, } if !isolationDisabled { s.txs = newTxRing(0) @@ -2218,6 +2236,12 @@ func (s *memSeries) truncateChunksBefore(mint int64, minOOOMmapRef chunks.ChunkD return removedInOrder + removedOOO } +// shardHash returns the shard hash of the series, only available after WAL replay. +func (s *memSeries) shardHash() uint64 { return s.shardHashOrMemoryMappedMaxTime } + +// mmMaxTime returns the max time of any mmapped chunk in the series, only available during WAL replay. +func (s *memSeries) mmMaxTime() int64 { return int64(s.shardHashOrMemoryMappedMaxTime) } + // cleanupAppendIDsBelow cleans up older appendIDs. Has to be called after // acquiring lock. func (s *memSeries) cleanupAppendIDsBelow(bound uint64) { diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 9ba8785ad..3a50f316b 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -170,7 +170,7 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou } // Check if the series belong to the shard. - if s.shardHash%shardCount != shardIndex { + if s.shardHash()%shardCount != shardIndex { continue } diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 787cb7c26..2852709a0 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -435,6 +435,8 @@ Outer: return nil } +func minInt64() int64 { return math.MinInt64 } + // resetSeriesWithMMappedChunks is only used during the WAL replay. func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*mmappedChunk, walSeriesRef chunks.HeadSeriesRef) (overlapped bool) { if mSeries.ref != walSeriesRef { @@ -481,10 +483,11 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m } // Cache the last mmapped chunk time, so we can skip calling append() for samples it will reject. if len(mmc) == 0 { - mSeries.mmMaxTime = math.MinInt64 + mSeries.shardHashOrMemoryMappedMaxTime = uint64(minInt64()) } else { - mSeries.mmMaxTime = mmc[len(mmc)-1].maxTime - h.updateMinMaxTime(mmc[0].minTime, mSeries.mmMaxTime) + mmMaxTime := mmc[len(mmc)-1].maxTime + mSeries.shardHashOrMemoryMappedMaxTime = uint64(mmMaxTime) + h.updateMinMaxTime(mmc[0].minTime, mmMaxTime) } if len(oooMmc) != 0 { // Mint and maxt can be in any chunk, they are not sorted. @@ -585,7 +588,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp unknownRefs++ continue } - if s.T <= ms.mmMaxTime { + if s.T <= ms.mmMaxTime() { continue } if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated { @@ -614,7 +617,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp unknownHistogramRefs++ continue } - if s.t <= ms.mmMaxTime { + if s.t <= ms.mmMaxTime() { continue } var chunkCreated bool From 0300ad58a97098674ca4757c79a74a05e9c33322 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 30 Jul 2024 11:31:31 +0200 Subject: [PATCH 69/81] Revert the option regardless of error Signed-off-by: Oleg Zaytsev --- tsdb/head.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsdb/head.go b/tsdb/head.go index 1659e57a4..9d81b24ae 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -630,8 +630,8 @@ func (h *Head) Init(minValidTime int64) (err error) { if h.opts.EnableSharding { h.opts.EnableSharding = false defer func() { + h.opts.EnableSharding = true if err == nil { - h.opts.EnableSharding = true // No locking is needed here as nobody should be writing while we're in Init. for _, stripe := range h.series.series { for _, s := range stripe { From 6cef8698c27b99263efcbe5025846187cf4358f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Jul 2024 13:30:49 +0200 Subject: [PATCH 70/81] build(deps-dev): bump @lezer/generator from 1.7.0 to 1.7.1 in /web/ui (#14382) Bumps [@lezer/generator](https://github.com/lezer-parser/generator) from 1.7.0 to 1.7.1. - [Changelog](https://github.com/lezer-parser/generator/blob/main/CHANGELOG.md) - [Commits](https://github.com/lezer-parser/generator/compare/1.7.0...1.7.1) --- updated-dependencies: - dependency-name: "@lezer/generator" dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- web/ui/module/lezer-promql/package.json | 2 +- web/ui/package-lock.json | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index cbd03ae2b..43a5c44fa 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -30,7 +30,7 @@ "test": "NODE_OPTIONS=--experimental-vm-modules jest" }, "devDependencies": { - "@lezer/generator": "^1.7.0", + "@lezer/generator": "^1.7.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.1" }, diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 62ac34e43..2028c3402 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -72,7 +72,7 @@ "version": "0.53.1", "license": "Apache-2.0", "devDependencies": { - "@lezer/generator": "^1.7.0", + "@lezer/generator": "^1.7.1", "@lezer/highlight": "^1.2.0", "@lezer/lr": "^1.4.1" }, @@ -3371,9 +3371,9 @@ "integrity": "sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ==" }, "node_modules/@lezer/generator": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.7.0.tgz", - "integrity": "sha512-IJ16tx3biLKlCXUzcK4v8S10AVa2BSM2rB12rtAL6f1hL2TS/HQQlGCoWRvanlL2J4mCYEEIv9uG7n4kVMkVDA==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@lezer/generator/-/generator-1.7.1.tgz", + "integrity": "sha512-MgPJN9Si+ccxzXl3OAmCeZuUKw4XiPl4y664FX/hnnyG9CTqUPq65N3/VGPA2jD23D7QgMTtNqflta+cPN+5mQ==", "dev": true, "dependencies": { "@lezer/common": "^1.1.0", From 84b819a69f375dc66ea41302a56e44975c0317e3 Mon Sep 17 00:00:00 2001 From: Max Amin Date: Tue, 30 Jul 2024 11:25:19 -0400 Subject: [PATCH 71/81] feat: add Google cloud roundtripper for remote write (#14346) * feat: Google Auth for remote write Signed-off-by: Max Amin --------- Signed-off-by: Max Amin --- config/config.go | 36 +++++++++++++----- config/config_test.go | 2 +- docs/configuration/configuration.md | 16 ++++++-- promql/engine_test.go | 3 +- rules/manager_test.go | 3 +- storage/remote/client.go | 9 +++++ storage/remote/googleiam/googleiam.go | 54 +++++++++++++++++++++++++++ storage/remote/write.go | 1 + tsdb/db_test.go | 5 ++- 9 files changed, 110 insertions(+), 19 deletions(-) create mode 100644 storage/remote/googleiam/googleiam.go diff --git a/config/config.go b/config/config.go index 913983881..8a6216146 100644 --- a/config/config.go +++ b/config/config.go @@ -37,6 +37,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/storage/remote/azuread" + "github.com/prometheus/prometheus/storage/remote/googleiam" ) var ( @@ -1123,6 +1124,7 @@ type RemoteWriteConfig struct { MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"` SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"` AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"` + GoogleIAMConfig *googleiam.Config `yaml:"google_iam,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -1160,17 +1162,33 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err return err } - httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || - c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil + return validateAuthConfigs(c) +} - if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") +// validateAuthConfigs validates that at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured. +func validateAuthConfigs(c *RemoteWriteConfig) error { + var authConfigured []string + if c.HTTPClientConfig.BasicAuth != nil { + authConfigured = append(authConfigured, "basic_auth") } - - if c.SigV4Config != nil && c.AzureADConfig != nil { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") + if c.HTTPClientConfig.Authorization != nil { + authConfigured = append(authConfigured, "authorization") + } + if c.HTTPClientConfig.OAuth2 != nil { + authConfigured = append(authConfigured, "oauth2") + } + if c.SigV4Config != nil { + authConfigured = append(authConfigured, "sigv4") + } + if c.AzureADConfig != nil { + authConfigured = append(authConfigured, "azuread") + } + if c.GoogleIAMConfig != nil { + authConfigured = append(authConfigured, "google_iam") + } + if len(authConfigured) > 1 { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured. Currently configured: %v", authConfigured) } - return nil } @@ -1189,7 +1207,7 @@ func validateHeadersForTracing(headers map[string]string) error { func validateHeaders(headers map[string]string) error { for header := range headers { if strings.ToLower(header) == "authorization" { - return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter") + return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter") } if _, ok := reservedHeaders[strings.ToLower(header)]; ok { return fmt.Errorf("%s is a reserved header. It must not be changed", header) diff --git a/config/config_test.go b/config/config_test.go index b684fdb50..9b074bef1 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1826,7 +1826,7 @@ var expectedErrors = []struct { }, { filename: "remote_write_authorization_header.bad.yml", - errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`, + errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter`, }, { filename: "remote_write_wrong_msg.bad.yml", diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 5aa57b3ba..313a7f2f3 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -3401,8 +3401,8 @@ authorization: # It is mutually exclusive with `credentials`. [ credentials_file: ] -# Optionally configures AWS's Signature Verification 4 signing process to -# sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2. +# Optionally configures AWS's Signature Verification 4 signing process to sign requests. +# Cannot be set at the same time as basic_auth, authorization, oauth2, azuread or google_iam. # To use the default credentials from the AWS SDK, use `sigv4: {}`. sigv4: # The AWS region. If blank, the region from the default credentials chain @@ -3655,12 +3655,12 @@ sigv4: [ role_arn: ] # Optional OAuth 2.0 configuration. -# Cannot be used at the same time as basic_auth, authorization, sigv4, or azuread. +# Cannot be used at the same time as basic_auth, authorization, sigv4, azuread or google_iam. oauth2: [ ] # Optional AzureAD configuration. -# Cannot be used at the same time as basic_auth, authorization, oauth2, or sigv4. +# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or google_iam. azuread: # The Azure Cloud. Options are 'AzurePublic', 'AzureChina', or 'AzureGovernment'. [ cloud: | default = AzurePublic ] @@ -3680,6 +3680,14 @@ azuread: [ sdk: [ tenant_id: ] ] +# WARNING: Remote write is NOT SUPPORTED by Google Cloud. This configuration is reserved for future use. +# Optional Google Cloud Monitoring configuration. +# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread. +# To use the default credentials from the Google Cloud SDK, use `google_iam: {}`. +google_iam: + # Service account key with monitoring write permessions. + credentials_file: + # Configures the remote write request's TLS settings. tls_config: [ ] diff --git a/promql/engine_test.go b/promql/engine_test.go index 523c0613d..8e618d435 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -26,7 +26,6 @@ import ( "time" "github.com/stretchr/testify/require" - "go.uber.org/goleak" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -51,7 +50,7 @@ const ( func TestMain(m *testing.M) { // Enable experimental functions testing parser.EnableExperimentalFunctions = true - goleak.VerifyTestMain(m) + testutil.TolerantVerifyLeak(m) } func TestQueryConcurrency(t *testing.T) { diff --git a/rules/manager_test.go b/rules/manager_test.go index 51239e6c9..9865cbdfe 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -32,7 +32,6 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/atomic" - "go.uber.org/goleak" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/model/labels" @@ -50,7 +49,7 @@ import ( ) func TestMain(m *testing.M) { - goleak.VerifyTestMain(m) + prom_testutil.TolerantVerifyLeak(m) } func TestAlertingRule(t *testing.T) { diff --git a/storage/remote/client.go b/storage/remote/client.go index 17caf7be9..11e423b6a 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -37,6 +37,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage/remote/azuread" + "github.com/prometheus/prometheus/storage/remote/googleiam" ) const maxErrMsgLen = 1024 @@ -131,6 +132,7 @@ type ClientConfig struct { HTTPClientConfig config_util.HTTPClientConfig SigV4Config *sigv4.SigV4Config AzureADConfig *azuread.AzureADConfig + GoogleIAMConfig *googleiam.Config Headers map[string]string RetryOnRateLimit bool WriteProtoMsg config.RemoteWriteProtoMsg @@ -192,6 +194,13 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) { } } + if conf.GoogleIAMConfig != nil { + t, err = googleiam.NewRoundTripper(conf.GoogleIAMConfig, t) + if err != nil { + return nil, err + } + } + writeProtoMsg := config.RemoteWriteProtoMsgV1 if conf.WriteProtoMsg != "" { writeProtoMsg = conf.WriteProtoMsg diff --git a/storage/remote/googleiam/googleiam.go b/storage/remote/googleiam/googleiam.go new file mode 100644 index 000000000..acf3bd5a6 --- /dev/null +++ b/storage/remote/googleiam/googleiam.go @@ -0,0 +1,54 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package googleiam provides an http.RoundTripper that attaches an Google Cloud accessToken +// to remote write requests. +package googleiam + +import ( + "context" + "fmt" + "net/http" + + "golang.org/x/oauth2/google" + "google.golang.org/api/option" + apihttp "google.golang.org/api/transport/http" +) + +type Config struct { + CredentialsFile string `yaml:"credentials_file,omitempty"` +} + +// NewRoundTripper creates a round tripper that adds Google Cloud Monitoring authorization to calls +// using either a credentials file or the default credentials. +func NewRoundTripper(cfg *Config, next http.RoundTripper) (http.RoundTripper, error) { + if next == nil { + next = http.DefaultTransport + } + const scopes = "https://www.googleapis.com/auth/monitoring.write" + ctx := context.Background() + opts := []option.ClientOption{ + option.WithScopes(scopes), + } + if cfg.CredentialsFile != "" { + opts = append(opts, option.WithCredentialsFile(cfg.CredentialsFile)) + } else { + creds, err := google.FindDefaultCredentials(ctx, scopes) + if err != nil { + return nil, fmt.Errorf("error finding default Google credentials: %w", err) + } + opts = append(opts, option.WithCredentials(creds)) + } + + return apihttp.NewTransport(ctx, next, opts...) +} diff --git a/storage/remote/write.go b/storage/remote/write.go index 81902a8f1..3d2f1fdfc 100644 --- a/storage/remote/write.go +++ b/storage/remote/write.go @@ -176,6 +176,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { HTTPClientConfig: rwConf.HTTPClientConfig, SigV4Config: rwConf.SigV4Config, AzureADConfig: rwConf.AzureADConfig, + GoogleIAMConfig: rwConf.GoogleIAMConfig, Headers: rwConf.Headers, RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit, }) diff --git a/tsdb/db_test.go b/tsdb/db_test.go index c0edafe08..c8dad8699 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -63,7 +63,10 @@ func TestMain(m *testing.M) { flag.Parse() defaultIsolationDisabled = !isolationEnabled - goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func1"), goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2")) + goleak.VerifyTestMain(m, + goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func1"), + goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2"), + goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start")) } func openTestDB(t testing.TB, opts *Options, rngs []int64) (db *DB) { From 15618157321f988e069cdaa955422b24632f5743 Mon Sep 17 00:00:00 2001 From: Callum Styan Date: Tue, 30 Jul 2024 14:08:28 -0700 Subject: [PATCH 72/81] remote write: increase time threshold for resharding (#14450) Don't reshard if we haven't successfully sent a sample in the last shardUpdateDuration seconds. Signed-off-by: Callum Styan Co-authored-by: kushagra Shukla --- storage/remote/queue_manager.go | 6 +++--- storage/remote/queue_manager_test.go | 13 ++++++++----- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 5b59288e6..17ff1850f 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -1109,9 +1109,9 @@ func (t *QueueManager) shouldReshard(desiredShards int) bool { if desiredShards == t.numShards { return false } - // We shouldn't reshard if Prometheus hasn't been able to send to the - // remote endpoint successfully within some period of time. - minSendTimestamp := time.Now().Add(-2 * time.Duration(t.cfg.BatchSendDeadline)).Unix() + // We shouldn't reshard if Prometheus hasn't been able to send + // since the last time it checked if it should reshard. + minSendTimestamp := time.Now().Add(-1 * shardUpdateDuration).Unix() lsts := t.lastSendTimestamp.Load() if lsts < minSendTimestamp { level.Warn(t.logger).Log("msg", "Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp) diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 7343184fc..1c06173a5 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -703,32 +703,35 @@ func TestShouldReshard(t *testing.T) { startingShards int samplesIn, samplesOut, lastSendTimestamp int64 expectedToReshard bool + sendDeadline model.Duration } cases := []testcase{ { - // Resharding shouldn't take place if the last successful send was > batch send deadline*2 seconds ago. + // resharding shouldn't take place if we haven't successfully sent + // since the last shardUpdateDuration, even if the send deadline is very low startingShards: 10, samplesIn: 1000, samplesOut: 10, - lastSendTimestamp: time.Now().Unix() - int64(3*time.Duration(config.DefaultQueueConfig.BatchSendDeadline)/time.Second), + lastSendTimestamp: time.Now().Unix() - int64(shardUpdateDuration), expectedToReshard: false, + sendDeadline: model.Duration(100 * time.Millisecond), }, { - startingShards: 5, + startingShards: 10, samplesIn: 1000, samplesOut: 10, lastSendTimestamp: time.Now().Unix(), expectedToReshard: true, + sendDeadline: config.DefaultQueueConfig.BatchSendDeadline, }, } for _, c := range cases { - _, m := newTestClientAndQueueManager(t, defaultFlushDeadline, config.RemoteWriteProtoMsgV1) + _, m := newTestClientAndQueueManager(t, time.Duration(c.sendDeadline), config.RemoteWriteProtoMsgV1) m.numShards = c.startingShards m.dataIn.incr(c.samplesIn) m.dataOut.incr(c.samplesOut) m.lastSendTimestamp.Store(c.lastSendTimestamp) - m.Start() desiredShards := m.calculateDesiredShards() From 2880ee8e46e2c49e5155523b30b7878d7cc65ae8 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Thu, 25 Jan 2024 07:29:48 +0100 Subject: [PATCH 73/81] chore: provide OSSF security insight Signed-off-by: Matthieu MOREL --- README.md | 3 ++- SECURITY-INSIGHTS.yml | 48 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 SECURITY-INSIGHTS.yml diff --git a/README.md b/README.md index cd14ed2ec..df974e109 100644 --- a/README.md +++ b/README.md @@ -12,9 +12,10 @@ examples and guides.

[![Docker Pulls](https://img.shields.io/docker/pulls/prom/prometheus.svg?maxAge=604800)][hub] [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/prometheus)](https://goreportcard.com/report/github.com/prometheus/prometheus) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus) +[![CLOMonitor](https://img.shields.io/endpoint?url=https://clomonitor.io/api/projects/cncf/prometheus/badge)](https://clomonitor.io/projects/cncf/prometheus) [![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus) -[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus) diff --git a/SECURITY-INSIGHTS.yml b/SECURITY-INSIGHTS.yml new file mode 100644 index 000000000..009b35621 --- /dev/null +++ b/SECURITY-INSIGHTS.yml @@ -0,0 +1,48 @@ +header: + schema-version: '1.0.0' + expiration-date: '2025-07-30T01:00:00.000Z' + last-updated: '2024-07-30' + last-reviewed: '2024-07-30' + project-url: https://github.com/prometheus/prometheus + changelog: https://github.com/prometheus/prometheus/blob/main/CHANGELOG.md + license: https://github.com/prometheus/prometheus/blob/main/LICENSE +project-lifecycle: + status: active + bug-fixes-only: false + core-maintainers: + - https://github.com/prometheus/prometheus/blob/main/MAINTAINERS.md +contribution-policy: + accepts-pull-requests: true + accepts-automated-pull-requests: true +dependencies: + third-party-packages: true + dependencies-lists: + - https://github.com/prometheus/prometheus/blob/main/go.mod + - https://github.com/prometheus/prometheus/blob/main/web/ui/package.json + env-dependencies-policy: + policy-url: https://github.com/prometheus/prometheus/blob/main/CONTRIBUTING.md#dependency-management +distribution-points: + - https://github.com/prometheus/prometheus/releases +documentation: + - https://prometheus.io/docs/introduction/overview/ +security-contacts: + - type: email + value: prometheus-team@googlegroups.com +security-testing: + - tool-type: sca + tool-name: Dependabot + tool-version: latest + integration: + ad-hoc: false + ci: true + before-release: true + - tool-type: sast + tool-name: CodeQL + tool-version: latest + integration: + ad-hoc: false + ci: true + before-release: true +vulnerability-reporting: + accepts-vulnerability-reports: true + security-policy: https://github.com/prometheus/prometheus/security/policy From 7fab72a280f139170a14e6f6a21f6396fa02899e Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Wed, 31 Jul 2024 17:53:05 +1000 Subject: [PATCH 74/81] promqltest: add support for setting counter reset hint on histogram samples (#14537) * promqltest: add support for setting counter reset hint on histogram samples Signed-off-by: Charles Korn --- docs/configuration/unit_testing_rules.md | 4 +- promql/parser/generated_parser.y | 17 +- promql/parser/generated_parser.y.go | 797 ++++++++++++----------- promql/parser/lex.go | 33 +- promql/parser/parse.go | 22 + promql/parser/parse_test.go | 75 ++- 6 files changed, 531 insertions(+), 417 deletions(-) diff --git a/docs/configuration/unit_testing_rules.md b/docs/configuration/unit_testing_rules.md index 163fcb91f..7fc676a25 100644 --- a/docs/configuration/unit_testing_rules.md +++ b/docs/configuration/unit_testing_rules.md @@ -92,7 +92,7 @@ series: # # Native histogram notation: # Native histograms can be used instead of floating point numbers using the following notation: -# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}} +# {{schema:1 sum:-0.3 count:3.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5 counter_reset_hint:gauge}} # Native histograms support the same expanding notation as floating point numbers, i.e. 'axn', 'a+bxn' and 'a-bxn'. # All properties are optional and default to 0. The order is not important. The following properties are supported: # - schema (int): @@ -119,6 +119,8 @@ series: # Observation counts in negative buckets. Each represents an absolute count. # - n_offset (int): # The starting index of the first entry in the negative buckets. +# - counter_reset_hint (one of 'unknown', 'reset', 'not_reset' or 'gauge') +# The counter reset hint associated with this histogram. Defaults to 'unknown' if not set. values: ``` diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index b99e67424..b8e6aa373 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -84,6 +84,7 @@ NEGATIVE_BUCKETS_DESC ZERO_BUCKET_DESC ZERO_BUCKET_WIDTH_DESC CUSTOM_VALUES_DESC +COUNTER_RESET_HINT_DESC %token histogramDescEnd // Operators. @@ -149,6 +150,14 @@ START END %token preprocessorEnd +// Counter reset hints. +%token counterResetHintsStart +%token +UNKNOWN_COUNTER_RESET +COUNTER_RESET +NOT_COUNTER_RESET +GAUGE_TYPE +%token counterResetHintsEnd // Start symbols for the generated parser. %token startSymbolsStart @@ -163,7 +172,7 @@ START_METRIC_SELECTOR // Type definitions for grammar rules. %type label_match_list %type label_matcher -%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier +%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint %type label_set metric %type label_set_list %type