2015-01-21 19:07:45 +00:00
|
|
|
// Copyright 2013 The Prometheus Authors
|
2013-01-07 22:24:26 +00:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2015-03-30 17:43:19 +00:00
|
|
|
package rules
|
2013-01-07 22:24:26 +00:00
|
|
|
|
|
|
|
import (
|
2017-10-25 04:21:42 +00:00
|
|
|
"context"
|
2022-06-17 07:54:25 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2024-09-10 01:41:53 +00:00
|
|
|
"log/slog"
|
2015-06-30 12:38:01 +00:00
|
|
|
"net/url"
|
2024-01-15 16:24:46 +00:00
|
|
|
"slices"
|
2023-09-21 20:53:51 +00:00
|
|
|
"strings"
|
2013-04-17 12:42:15 +00:00
|
|
|
"sync"
|
2013-01-07 22:24:26 +00:00
|
|
|
"time"
|
|
|
|
|
2014-06-18 17:43:15 +00:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2024-09-10 01:41:53 +00:00
|
|
|
"github.com/prometheus/common/promslog"
|
2023-10-25 20:31:26 +00:00
|
|
|
"golang.org/x/sync/semaphore"
|
2019-03-25 23:01:12 +00:00
|
|
|
|
2021-11-08 14:23:17 +00:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
|
|
|
"github.com/prometheus/prometheus/model/rulefmt"
|
2022-10-07 14:58:17 +00:00
|
|
|
"github.com/prometheus/prometheus/notifier"
|
2015-03-30 17:43:19 +00:00
|
|
|
"github.com/prometheus/prometheus/promql"
|
2020-02-03 18:06:39 +00:00
|
|
|
"github.com/prometheus/prometheus/promql/parser"
|
2015-03-15 02:36:15 +00:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2022-10-07 14:58:17 +00:00
|
|
|
"github.com/prometheus/prometheus/util/strutil"
|
2013-06-25 12:02:27 +00:00
|
|
|
)
|
2013-01-07 22:24:26 +00:00
|
|
|
|
2017-11-23 12:04:54 +00:00
|
|
|
// QueryFunc processes PromQL queries.
|
|
|
|
type QueryFunc func(ctx context.Context, q string, t time.Time) (promql.Vector, error)
|
|
|
|
|
|
|
|
// EngineQueryFunc returns a new query function that executes instant queries against
|
|
|
|
// the given engine.
|
2018-07-18 03:54:33 +00:00
|
|
|
// It converts scalar into vector results.
|
2024-03-06 03:54:33 +00:00
|
|
|
func EngineQueryFunc(engine promql.QueryEngine, q storage.Queryable) QueryFunc {
|
2017-11-23 12:04:54 +00:00
|
|
|
return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
|
2023-04-18 04:32:38 +00:00
|
|
|
q, err := engine.NewInstantQuery(ctx, q, nil, qs, t)
|
2017-11-23 12:04:54 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
res := q.Exec(ctx)
|
|
|
|
if res.Err != nil {
|
|
|
|
return nil, res.Err
|
|
|
|
}
|
|
|
|
switch v := res.Value.(type) {
|
|
|
|
case promql.Vector:
|
|
|
|
return v, nil
|
|
|
|
case promql.Scalar:
|
|
|
|
return promql.Vector{promql.Sample{
|
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 14:58:40 +00:00
|
|
|
T: v.T,
|
|
|
|
F: v.V,
|
2017-11-23 12:04:54 +00:00
|
|
|
Metric: labels.Labels{},
|
|
|
|
}}, nil
|
|
|
|
default:
|
2019-03-25 23:01:12 +00:00
|
|
|
return nil, errors.New("rule result is not a vector or scalar")
|
2017-11-23 12:04:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-04 18:21:13 +00:00
|
|
|
// DefaultEvalIterationFunc is the default implementation of
|
|
|
|
// GroupEvalIterationFunc that is periodically invoked to evaluate the rules
|
|
|
|
// in a group at a given point in time and updates Group state and metrics
|
|
|
|
// accordingly. Custom GroupEvalIterationFunc implementations are recommended
|
|
|
|
// to invoke this function as well, to ensure correct Group state and metrics
|
|
|
|
// are maintained.
|
|
|
|
func DefaultEvalIterationFunc(ctx context.Context, g *Group, evalTimestamp time.Time) {
|
|
|
|
g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Inc()
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
g.Eval(ctx, evalTimestamp)
|
|
|
|
timeSinceStart := time.Since(start)
|
|
|
|
|
|
|
|
g.metrics.IterationDuration.Observe(timeSinceStart.Seconds())
|
|
|
|
g.setEvaluationTime(timeSinceStart)
|
|
|
|
g.setLastEvaluation(start)
|
|
|
|
g.setLastEvalTimestamp(evalTimestamp)
|
2022-03-29 00:16:46 +00:00
|
|
|
}
|
|
|
|
|
2015-12-14 16:40:40 +00:00
|
|
|
// The Manager manages recording and alerting rules.
|
|
|
|
type Manager struct {
|
2018-08-02 10:18:24 +00:00
|
|
|
opts *ManagerOptions
|
|
|
|
groups map[string]*Group
|
|
|
|
mtx sync.RWMutex
|
|
|
|
block chan struct{}
|
2020-02-12 15:22:18 +00:00
|
|
|
done chan struct{}
|
2018-08-02 10:18:24 +00:00
|
|
|
restored bool
|
2017-06-16 10:22:44 +00:00
|
|
|
|
2024-09-10 01:41:53 +00:00
|
|
|
logger *slog.Logger
|
2015-12-14 16:40:40 +00:00
|
|
|
}
|
2013-07-30 15:18:07 +00:00
|
|
|
|
2017-11-24 07:59:05 +00:00
|
|
|
// NotifyFunc sends notifications about a set of alerts generated by the given expression.
|
2018-08-04 19:31:12 +00:00
|
|
|
type NotifyFunc func(ctx context.Context, expr string, alerts ...*Alert)
|
2017-11-24 07:59:05 +00:00
|
|
|
|
2015-12-14 16:40:40 +00:00
|
|
|
// ManagerOptions bundles options for the Manager.
|
|
|
|
type ManagerOptions struct {
|
2023-10-28 09:44:20 +00:00
|
|
|
ExternalURL *url.URL
|
|
|
|
QueryFunc QueryFunc
|
|
|
|
NotifyFunc NotifyFunc
|
|
|
|
Context context.Context
|
|
|
|
Appendable storage.Appendable
|
|
|
|
Queryable storage.Queryable
|
2024-09-10 01:41:53 +00:00
|
|
|
Logger *slog.Logger
|
2023-10-28 09:44:20 +00:00
|
|
|
Registerer prometheus.Registerer
|
|
|
|
OutageTolerance time.Duration
|
|
|
|
ForGracePeriod time.Duration
|
|
|
|
ResendDelay time.Duration
|
|
|
|
GroupLoader GroupLoader
|
2024-05-30 10:49:50 +00:00
|
|
|
DefaultRuleQueryOffset func() time.Duration
|
2023-10-28 09:44:20 +00:00
|
|
|
MaxConcurrentEvals int64
|
|
|
|
ConcurrentEvalsEnabled bool
|
2023-11-02 18:33:06 +00:00
|
|
|
RuleConcurrencyController RuleConcurrencyController
|
2024-02-02 09:06:37 +00:00
|
|
|
RuleDependencyController RuleDependencyController
|
2018-12-28 10:20:29 +00:00
|
|
|
|
2019-01-03 12:07:06 +00:00
|
|
|
Metrics *Metrics
|
2015-12-14 16:40:40 +00:00
|
|
|
}
|
2015-03-15 02:36:15 +00:00
|
|
|
|
2015-12-14 16:40:40 +00:00
|
|
|
// NewManager returns an implementation of Manager, ready to be started
|
|
|
|
// by calling the Run method.
|
|
|
|
func NewManager(o *ManagerOptions) *Manager {
|
2019-01-03 12:07:06 +00:00
|
|
|
if o.Metrics == nil {
|
|
|
|
o.Metrics = NewGroupMetrics(o.Registerer)
|
2018-12-28 10:20:29 +00:00
|
|
|
}
|
|
|
|
|
2020-07-22 14:19:34 +00:00
|
|
|
if o.GroupLoader == nil {
|
|
|
|
o.GroupLoader = FileLoader{}
|
|
|
|
}
|
|
|
|
|
2023-11-02 18:33:06 +00:00
|
|
|
if o.RuleConcurrencyController == nil {
|
2024-01-26 18:53:44 +00:00
|
|
|
if o.ConcurrentEvalsEnabled {
|
|
|
|
o.RuleConcurrencyController = newRuleConcurrencyController(o.MaxConcurrentEvals)
|
|
|
|
} else {
|
|
|
|
o.RuleConcurrencyController = sequentialRuleEvalController{}
|
|
|
|
}
|
2023-11-02 18:33:06 +00:00
|
|
|
}
|
2023-10-25 20:31:26 +00:00
|
|
|
|
2024-02-02 09:06:37 +00:00
|
|
|
if o.RuleDependencyController == nil {
|
|
|
|
o.RuleDependencyController = ruleDependencyController{}
|
|
|
|
}
|
|
|
|
|
2024-09-10 01:41:53 +00:00
|
|
|
if o.Logger == nil {
|
|
|
|
o.Logger = promslog.NewNopLogger()
|
|
|
|
}
|
|
|
|
|
2017-11-30 14:36:34 +00:00
|
|
|
m := &Manager{
|
2015-12-14 16:40:40 +00:00
|
|
|
groups: map[string]*Group{},
|
|
|
|
opts: o,
|
2016-01-08 16:51:22 +00:00
|
|
|
block: make(chan struct{}),
|
2020-02-12 15:22:18 +00:00
|
|
|
done: make(chan struct{}),
|
2017-06-16 10:22:44 +00:00
|
|
|
logger: o.Logger,
|
2013-01-07 22:24:26 +00:00
|
|
|
}
|
2018-12-28 10:20:29 +00:00
|
|
|
|
2017-11-30 14:36:34 +00:00
|
|
|
return m
|
2013-01-07 22:24:26 +00:00
|
|
|
}
|
|
|
|
|
2020-07-21 22:13:24 +00:00
|
|
|
// Run starts processing of the rule manager. It is blocking.
|
2016-01-08 16:51:22 +00:00
|
|
|
func (m *Manager) Run() {
|
2024-09-10 01:41:53 +00:00
|
|
|
m.logger.Info("Starting rule manager...")
|
2020-07-21 22:13:24 +00:00
|
|
|
m.start()
|
|
|
|
<-m.done
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Manager) start() {
|
2016-01-08 16:51:22 +00:00
|
|
|
close(m.block)
|
|
|
|
}
|
|
|
|
|
2015-12-14 16:40:40 +00:00
|
|
|
// Stop the rule manager's rule evaluation cycles.
|
|
|
|
func (m *Manager) Stop() {
|
rules/manager.go: Fix race between reload and stop
On one relatively large Prometheus instance (1.7M series), I noticed
that upgrades were frequently resulting in Prometheus undergoing crash
recovery on start-up.
On closer examination, I found that Prometheus was panicking on
shutdown.
It seems that our configuration management (or misconfiguration thereof)
is reloading Prometheus then immediately restarting it, which I suspect
is causing this race:
Sep 21 15:12:42 host systemd[1]: Reloading prometheus monitoring system.
Sep 21 15:12:42 host prometheus[18734]: time="2016-09-21T15:12:42Z" level=info msg="Loading configuration file /etc/prometheus/config.yaml" source="main.go:221"
Sep 21 15:12:42 host systemd[1]: Reloaded prometheus monitoring system.
Sep 21 15:12:44 host systemd[1]: Stopping prometheus monitoring system...
Sep 21 15:12:44 host prometheus[18734]: time="2016-09-21T15:12:44Z" level=warning msg="Received SIGTERM, exiting gracefully..." source="main.go:203"
Sep 21 15:12:44 host prometheus[18734]: time="2016-09-21T15:12:44Z" level=info msg="See you next time!" source="main.go:210"
Sep 21 15:12:44 host prometheus[18734]: time="2016-09-21T15:12:44Z" level=info msg="Stopping target manager..." source="targetmanager.go:90"
Sep 21 15:12:52 host prometheus[18734]: time="2016-09-21T15:12:52Z" level=info msg="Checkpointing in-memory metrics and chunks..." source="persistence.go:548"
Sep 21 15:12:56 host prometheus[18734]: time="2016-09-21T15:12:56Z" level=warning msg="Error on ingesting out-of-order samples" numDropped=1 source="scrape.go:467"
Sep 21 15:12:56 host prometheus[18734]: time="2016-09-21T15:12:56Z" level=error msg="Error adding file watch for \"/etc/prometheus/targets\": no such file or directory" source="file.go:84"
Sep 21 15:12:56 host prometheus[18734]: time="2016-09-21T15:12:56Z" level=error msg="Error adding file watch for \"/etc/prometheus/targets\": no such file or directory" source="file.go:84"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Stopping rule manager..." source="manager.go:366"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Rule manager stopped." source="manager.go:372"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Stopping notification handler..." source="notifier.go:325"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Stopping local storage..." source="storage.go:381"
Sep 21 15:13:01 host prometheus[18734]: time="2016-09-21T15:13:01Z" level=info msg="Stopping maintenance loop..." source="storage.go:383"
Sep 21 15:13:01 host prometheus[18734]: panic: close of closed channel
Sep 21 15:13:01 host prometheus[18734]: goroutine 7686074 [running]:
Sep 21 15:13:01 host prometheus[18734]: panic(0xba57a0, 0xc60c42b500)
Sep 21 15:13:01 host prometheus[18734]: /usr/local/go/src/runtime/panic.go:500 +0x1a1
Sep 21 15:13:01 host prometheus[18734]: github.com/prometheus/prometheus/rules.(*Manager).ApplyConfig.func1(0xc6645a9901, 0xc420271ef0, 0xc420338ed0, 0xc60c42b4f0, 0xc6645a9900)
Sep 21 15:13:01 host prometheus[18734]: /home/build/packages/prometheus/tmp/build/gopath/src/github.com/prometheus/prometheus/rules/manager.go:412 +0x3c
Sep 21 15:13:01 host prometheus[18734]: created by github.com/prometheus/prometheus/rules.(*Manager).ApplyConfig
Sep 21 15:13:01 host prometheus[18734]: /home/build/packages/prometheus/tmp/build/gopath/src/github.com/prometheus/prometheus/rules/manager.go:423 +0x56b
Sep 21 15:13:03 host systemd[1]: prometheus.service: main process exited, code=exited, status=2/INVALIDARGUMENT
2016-09-21 21:03:02 +00:00
|
|
|
m.mtx.Lock()
|
|
|
|
defer m.mtx.Unlock()
|
|
|
|
|
2024-09-10 01:41:53 +00:00
|
|
|
m.logger.Info("Stopping rule manager...")
|
2015-06-30 09:51:05 +00:00
|
|
|
|
2015-12-14 16:40:40 +00:00
|
|
|
for _, eg := range m.groups {
|
|
|
|
eg.stop()
|
2015-06-30 09:51:05 +00:00
|
|
|
}
|
|
|
|
|
2020-02-12 15:22:18 +00:00
|
|
|
// Shut down the groups waiting multiple evaluation intervals to write
|
|
|
|
// staleness markers.
|
|
|
|
close(m.done)
|
|
|
|
|
2024-09-10 01:41:53 +00:00
|
|
|
m.logger.Info("Rule manager stopped")
|
2015-06-30 09:51:05 +00:00
|
|
|
}
|
|
|
|
|
2017-11-23 14:48:14 +00:00
|
|
|
// Update the rule manager's state as the config requires. If
|
2016-07-11 14:24:54 +00:00
|
|
|
// loading the new rules failed the old rule set is restored.
|
2024-06-28 23:43:22 +00:00
|
|
|
// This method will no-op in case the manager is already stopped.
|
2023-04-04 18:21:13 +00:00
|
|
|
func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc) error {
|
2015-12-14 16:40:40 +00:00
|
|
|
m.mtx.Lock()
|
|
|
|
defer m.mtx.Unlock()
|
2015-05-12 14:52:56 +00:00
|
|
|
|
2024-06-28 22:50:54 +00:00
|
|
|
// We cannot update a stopped manager
|
|
|
|
select {
|
|
|
|
case <-m.done:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2023-04-04 18:21:13 +00:00
|
|
|
groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, files...)
|
2022-03-29 00:16:46 +00:00
|
|
|
|
2017-06-16 11:14:33 +00:00
|
|
|
if errs != nil {
|
|
|
|
for _, e := range errs {
|
2024-09-10 01:41:53 +00:00
|
|
|
m.logger.Error("loading groups failed", "err", e)
|
2017-06-16 11:14:33 +00:00
|
|
|
}
|
|
|
|
return errors.New("error loading rules, previous rule set restored")
|
2015-05-12 14:52:56 +00:00
|
|
|
}
|
2018-08-02 10:18:24 +00:00
|
|
|
m.restored = true
|
2015-06-23 10:07:53 +00:00
|
|
|
|
2015-12-14 16:40:40 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
for _, newg := range groups {
|
2019-12-19 10:41:11 +00:00
|
|
|
// If there is an old group with the same identifier,
|
|
|
|
// check if new group equals with the old group, if yes then skip it.
|
|
|
|
// If not equals, stop it and wait for it to finish the current iteration.
|
|
|
|
// Then copy it into the new group.
|
2020-09-13 15:07:59 +00:00
|
|
|
gn := GroupKey(newg.file, newg.name)
|
2017-11-01 11:58:00 +00:00
|
|
|
oldg, ok := m.groups[gn]
|
|
|
|
delete(m.groups, gn)
|
2015-12-14 16:40:40 +00:00
|
|
|
|
2019-12-19 10:41:11 +00:00
|
|
|
if ok && oldg.Equals(newg) {
|
|
|
|
groups[gn] = oldg
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Add(1)
|
2020-03-01 18:32:14 +00:00
|
|
|
go func(newg *Group) {
|
2015-12-14 16:40:40 +00:00
|
|
|
if ok {
|
|
|
|
oldg.stop()
|
2018-07-18 13:14:38 +00:00
|
|
|
newg.CopyState(oldg)
|
2015-12-14 16:40:40 +00:00
|
|
|
}
|
|
|
|
wg.Done()
|
2020-09-21 10:29:03 +00:00
|
|
|
// Wait with starting evaluation until the rule manager
|
|
|
|
// is told to run. This is necessary to avoid running
|
|
|
|
// queries against a bootstrapping storage.
|
|
|
|
<-m.block
|
|
|
|
newg.run(m.opts.Context)
|
2020-03-01 18:32:14 +00:00
|
|
|
}(newg)
|
2015-12-14 16:40:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stop remaining old groups.
|
2020-02-12 15:22:18 +00:00
|
|
|
wg.Add(len(m.groups))
|
2020-01-27 12:41:32 +00:00
|
|
|
for n, oldg := range m.groups {
|
2020-02-12 15:22:18 +00:00
|
|
|
go func(n string, g *Group) {
|
2020-04-18 12:32:18 +00:00
|
|
|
g.markStale = true
|
|
|
|
g.stop()
|
2020-02-12 15:22:18 +00:00
|
|
|
if m := g.metrics; m != nil {
|
2021-04-30 17:25:34 +00:00
|
|
|
m.IterationsMissed.DeleteLabelValues(n)
|
|
|
|
m.IterationsScheduled.DeleteLabelValues(n)
|
|
|
|
m.EvalTotal.DeleteLabelValues(n)
|
|
|
|
m.EvalFailures.DeleteLabelValues(n)
|
|
|
|
m.GroupInterval.DeleteLabelValues(n)
|
|
|
|
m.GroupLastEvalTime.DeleteLabelValues(n)
|
|
|
|
m.GroupLastDuration.DeleteLabelValues(n)
|
|
|
|
m.GroupRules.DeleteLabelValues(n)
|
|
|
|
m.GroupSamples.DeleteLabelValues((n))
|
2020-02-12 15:22:18 +00:00
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}(n, oldg)
|
2015-12-14 16:40:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
m.groups = groups
|
|
|
|
|
2016-07-11 14:24:54 +00:00
|
|
|
return nil
|
2015-05-12 14:52:56 +00:00
|
|
|
}
|
|
|
|
|
2020-07-22 14:19:34 +00:00
|
|
|
// GroupLoader is responsible for loading rule groups from arbitrary sources and parsing them.
|
|
|
|
type GroupLoader interface {
|
|
|
|
Load(identifier string) (*rulefmt.RuleGroups, []error)
|
|
|
|
Parse(query string) (parser.Expr, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FileLoader is the default GroupLoader implementation. It defers to rulefmt.ParseFile
|
2023-10-03 20:09:25 +00:00
|
|
|
// and parser.ParseExpr.
|
2020-07-22 14:19:34 +00:00
|
|
|
type FileLoader struct{}
|
|
|
|
|
|
|
|
func (FileLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) {
|
|
|
|
return rulefmt.ParseFile(identifier)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (FileLoader) Parse(query string) (parser.Expr, error) { return parser.ParseExpr(query) }
|
|
|
|
|
2018-09-25 16:06:26 +00:00
|
|
|
// LoadGroups reads groups from a list of files.
|
2019-04-15 16:52:58 +00:00
|
|
|
func (m *Manager) LoadGroups(
|
2023-04-04 18:21:13 +00:00
|
|
|
interval time.Duration, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc, filenames ...string,
|
2019-04-15 16:52:58 +00:00
|
|
|
) (map[string]*Group, []error) {
|
2017-06-12 12:44:39 +00:00
|
|
|
groups := make(map[string]*Group)
|
|
|
|
|
2018-08-02 10:18:24 +00:00
|
|
|
shouldRestore := !m.restored
|
|
|
|
|
2015-04-29 09:08:56 +00:00
|
|
|
for _, fn := range filenames {
|
2020-07-22 14:19:34 +00:00
|
|
|
rgs, errs := m.opts.GroupLoader.Load(fn)
|
2017-06-14 06:49:21 +00:00
|
|
|
if errs != nil {
|
2017-06-16 11:14:33 +00:00
|
|
|
return nil, errs
|
2015-04-29 09:08:56 +00:00
|
|
|
}
|
2015-07-03 12:48:22 +00:00
|
|
|
|
2017-06-12 12:44:39 +00:00
|
|
|
for _, rg := range rgs.Groups {
|
|
|
|
itv := interval
|
2017-06-16 05:16:21 +00:00
|
|
|
if rg.Interval != 0 {
|
|
|
|
itv = time.Duration(rg.Interval)
|
2017-06-12 12:44:39 +00:00
|
|
|
}
|
2015-12-14 16:40:40 +00:00
|
|
|
|
2017-06-14 05:43:00 +00:00
|
|
|
rules := make([]Rule, 0, len(rg.Rules))
|
2017-06-12 12:44:39 +00:00
|
|
|
for _, r := range rg.Rules {
|
2020-07-22 14:19:34 +00:00
|
|
|
expr, err := m.opts.GroupLoader.Parse(r.Expr.Value)
|
2017-06-12 12:44:39 +00:00
|
|
|
if err != nil {
|
2022-06-17 07:54:25 +00:00
|
|
|
return nil, []error{fmt.Errorf("%s: %w", fn, err)}
|
2017-06-12 12:44:39 +00:00
|
|
|
}
|
2015-12-14 16:40:40 +00:00
|
|
|
|
2022-10-18 18:43:32 +00:00
|
|
|
mLabels := FromMaps(rg.Labels, r.Labels)
|
|
|
|
|
2020-01-15 18:07:54 +00:00
|
|
|
if r.Alert.Value != "" {
|
2017-06-12 12:44:39 +00:00
|
|
|
rules = append(rules, NewAlertingRule(
|
2020-01-15 18:07:54 +00:00
|
|
|
r.Alert.Value,
|
2017-06-12 12:44:39 +00:00
|
|
|
expr,
|
2017-06-16 05:16:21 +00:00
|
|
|
time.Duration(r.For),
|
2023-01-09 11:21:38 +00:00
|
|
|
time.Duration(r.KeepFiringFor),
|
2022-10-18 18:43:32 +00:00
|
|
|
mLabels,
|
2017-06-12 12:44:39 +00:00
|
|
|
labels.FromMap(r.Annotations),
|
2019-04-15 16:52:58 +00:00
|
|
|
externalLabels,
|
2021-05-31 03:35:26 +00:00
|
|
|
externalURL,
|
2018-08-02 10:18:24 +00:00
|
|
|
m.restored,
|
2024-09-10 01:41:53 +00:00
|
|
|
m.logger.With("alert", r.Alert),
|
2017-06-12 12:44:39 +00:00
|
|
|
))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
rules = append(rules, NewRecordingRule(
|
2020-01-15 18:07:54 +00:00
|
|
|
r.Record.Value,
|
2017-06-12 12:44:39 +00:00
|
|
|
expr,
|
2022-10-18 18:43:32 +00:00
|
|
|
mLabels,
|
2017-06-12 12:44:39 +00:00
|
|
|
))
|
2015-04-29 09:08:56 +00:00
|
|
|
}
|
2017-06-12 12:44:39 +00:00
|
|
|
|
2024-02-02 09:06:37 +00:00
|
|
|
// Check dependencies between rules and store it on the Rule itself.
|
|
|
|
m.opts.RuleDependencyController.AnalyseRules(rules)
|
|
|
|
|
2020-09-13 15:07:59 +00:00
|
|
|
groups[GroupKey(fn, rg.Name)] = NewGroup(GroupOptions{
|
2023-04-04 18:21:13 +00:00
|
|
|
Name: rg.Name,
|
|
|
|
File: fn,
|
|
|
|
Interval: itv,
|
|
|
|
Limit: rg.Limit,
|
|
|
|
Rules: rules,
|
|
|
|
ShouldRestore: shouldRestore,
|
|
|
|
Opts: m.opts,
|
2024-05-30 10:49:50 +00:00
|
|
|
QueryOffset: (*time.Duration)(rg.QueryOffset),
|
2023-04-04 18:21:13 +00:00
|
|
|
done: m.done,
|
|
|
|
EvalIterationFunc: groupEvalIterationFunc,
|
2020-02-12 15:22:18 +00:00
|
|
|
})
|
2015-04-29 09:08:56 +00:00
|
|
|
}
|
|
|
|
}
|
2015-12-14 16:40:40 +00:00
|
|
|
|
|
|
|
return groups, nil
|
2013-01-07 22:24:26 +00:00
|
|
|
}
|
2013-06-11 09:00:55 +00:00
|
|
|
|
2017-06-14 10:39:14 +00:00
|
|
|
// RuleGroups returns the list of manager's rule groups.
|
|
|
|
func (m *Manager) RuleGroups() []*Group {
|
|
|
|
m.mtx.RLock()
|
|
|
|
defer m.mtx.RUnlock()
|
|
|
|
|
|
|
|
rgs := make([]*Group, 0, len(m.groups))
|
|
|
|
for _, g := range m.groups {
|
|
|
|
rgs = append(rgs, g)
|
|
|
|
}
|
|
|
|
|
2023-09-21 20:53:51 +00:00
|
|
|
slices.SortFunc(rgs, func(a, b *Group) int {
|
|
|
|
fileCompare := strings.Compare(a.file, b.file)
|
|
|
|
|
|
|
|
// If its 0, then the file names are the same.
|
|
|
|
// Lets look at the group names in that case.
|
|
|
|
if fileCompare != 0 {
|
|
|
|
return fileCompare
|
2019-02-23 08:51:44 +00:00
|
|
|
}
|
2023-09-21 20:53:51 +00:00
|
|
|
return strings.Compare(a.name, b.name)
|
2017-06-14 10:39:14 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
return rgs
|
|
|
|
}
|
|
|
|
|
2015-04-29 09:08:56 +00:00
|
|
|
// Rules returns the list of the manager's rules.
|
2024-07-10 12:18:29 +00:00
|
|
|
func (m *Manager) Rules(matcherSets ...[]*labels.Matcher) []Rule {
|
2015-12-14 16:40:40 +00:00
|
|
|
m.mtx.RLock()
|
|
|
|
defer m.mtx.RUnlock()
|
|
|
|
|
|
|
|
var rules []Rule
|
|
|
|
for _, g := range m.groups {
|
2024-07-10 12:18:29 +00:00
|
|
|
rules = append(rules, g.Rules(matcherSets...)...)
|
2015-12-14 16:40:40 +00:00
|
|
|
}
|
2013-06-11 09:00:55 +00:00
|
|
|
|
|
|
|
return rules
|
|
|
|
}
|
2013-06-13 14:10:05 +00:00
|
|
|
|
2015-04-29 09:08:56 +00:00
|
|
|
// AlertingRules returns the list of the manager's alerting rules.
|
2015-04-29 08:26:49 +00:00
|
|
|
func (m *Manager) AlertingRules() []*AlertingRule {
|
2015-03-30 17:43:19 +00:00
|
|
|
alerts := []*AlertingRule{}
|
2015-12-14 16:40:40 +00:00
|
|
|
for _, rule := range m.Rules() {
|
2015-03-30 17:43:19 +00:00
|
|
|
if alertingRule, ok := rule.(*AlertingRule); ok {
|
2013-06-13 14:10:05 +00:00
|
|
|
alerts = append(alerts, alertingRule)
|
|
|
|
}
|
|
|
|
}
|
2019-05-14 21:14:27 +00:00
|
|
|
|
2013-06-13 14:10:05 +00:00
|
|
|
return alerts
|
|
|
|
}
|
2022-10-07 14:58:17 +00:00
|
|
|
|
|
|
|
type Sender interface {
|
|
|
|
Send(alerts ...*notifier.Alert)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SendAlerts implements the rules.NotifyFunc for a Notifier.
|
|
|
|
func SendAlerts(s Sender, externalURL string) NotifyFunc {
|
|
|
|
return func(ctx context.Context, expr string, alerts ...*Alert) {
|
|
|
|
var res []*notifier.Alert
|
|
|
|
|
|
|
|
for _, alert := range alerts {
|
|
|
|
a := ¬ifier.Alert{
|
|
|
|
StartsAt: alert.FiredAt,
|
|
|
|
Labels: alert.Labels,
|
|
|
|
Annotations: alert.Annotations,
|
|
|
|
GeneratorURL: externalURL + strutil.TableLinkForExpression(expr),
|
|
|
|
}
|
|
|
|
if !alert.ResolvedAt.IsZero() {
|
|
|
|
a.EndsAt = alert.ResolvedAt
|
|
|
|
} else {
|
|
|
|
a.EndsAt = alert.ValidUntil
|
|
|
|
}
|
|
|
|
res = append(res, a)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(alerts) > 0 {
|
|
|
|
s.Send(res...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-10-25 21:05:25 +00:00
|
|
|
|
2024-02-02 09:06:37 +00:00
|
|
|
// RuleDependencyController controls whether a set of rules have dependencies between each other.
|
|
|
|
type RuleDependencyController interface {
|
|
|
|
// AnalyseRules analyses dependencies between the input rules. For each rule that it's guaranteed
|
|
|
|
// not having any dependants and/or dependency, this function should call Rule.SetNoDependentRules(true)
|
|
|
|
// and/or Rule.SetNoDependencyRules(true).
|
|
|
|
AnalyseRules(rules []Rule)
|
|
|
|
}
|
2023-11-02 18:33:06 +00:00
|
|
|
|
2024-02-02 09:06:37 +00:00
|
|
|
type ruleDependencyController struct{}
|
|
|
|
|
|
|
|
// AnalyseRules implements RuleDependencyController.
|
|
|
|
func (c ruleDependencyController) AnalyseRules(rules []Rule) {
|
|
|
|
depMap := buildDependencyMap(rules)
|
|
|
|
for _, r := range rules {
|
|
|
|
r.SetNoDependentRules(depMap.dependents(r) == 0)
|
|
|
|
r.SetNoDependencyRules(depMap.dependencies(r) == 0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// RuleConcurrencyController controls concurrency for rules that are safe to be evaluated concurrently.
|
|
|
|
// Its purpose is to bound the amount of concurrency in rule evaluations to avoid overwhelming the Prometheus
|
|
|
|
// server with additional query load. Concurrency is controlled globally, not on a per-group basis.
|
|
|
|
type RuleConcurrencyController interface {
|
2024-07-22 13:11:18 +00:00
|
|
|
// Allow determines if the given rule is allowed to be evaluated concurrently.
|
|
|
|
// If Allow() returns true, then Done() must be called to release the acquired slot and corresponding cleanup is done.
|
|
|
|
// It is important that both *Group and Rule are not retained and only be used for the duration of the call.
|
|
|
|
Allow(ctx context.Context, group *Group, rule Rule) bool
|
2023-11-02 18:33:06 +00:00
|
|
|
|
|
|
|
// Done releases a concurrent evaluation slot.
|
2024-07-22 13:11:18 +00:00
|
|
|
Done(ctx context.Context)
|
2023-11-02 18:33:06 +00:00
|
|
|
}
|
|
|
|
|
2023-10-28 09:44:20 +00:00
|
|
|
// concurrentRuleEvalController holds a weighted semaphore which controls the concurrent evaluation of rules.
|
|
|
|
type concurrentRuleEvalController struct {
|
2024-07-22 13:11:18 +00:00
|
|
|
sema *semaphore.Weighted
|
2023-10-25 21:05:25 +00:00
|
|
|
}
|
|
|
|
|
2024-01-26 18:53:44 +00:00
|
|
|
func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyController {
|
|
|
|
return &concurrentRuleEvalController{
|
2024-07-22 13:11:18 +00:00
|
|
|
sema: semaphore.NewWeighted(maxConcurrency),
|
2024-01-26 18:53:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-22 13:11:18 +00:00
|
|
|
func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, rule Rule) bool {
|
|
|
|
// To allow a rule to be executed concurrently, we need 3 conditions:
|
|
|
|
// 1. The rule must not have any rules that depend on it.
|
|
|
|
// 2. The rule itself must not depend on any other rules.
|
|
|
|
// 3. If 1 & 2 are true, then and only then we should try to acquire the concurrency slot.
|
|
|
|
if rule.NoDependentRules() && rule.NoDependencyRules() {
|
|
|
|
return c.sema.TryAcquire(1)
|
2023-11-02 18:33:06 +00:00
|
|
|
}
|
|
|
|
|
2024-07-22 13:11:18 +00:00
|
|
|
return false
|
2023-10-25 21:05:25 +00:00
|
|
|
}
|
|
|
|
|
2024-07-22 13:11:18 +00:00
|
|
|
func (c *concurrentRuleEvalController) Done(_ context.Context) {
|
2023-10-25 21:05:25 +00:00
|
|
|
c.sema.Release(1)
|
|
|
|
}
|
2023-11-02 18:33:06 +00:00
|
|
|
|
2024-01-26 18:53:44 +00:00
|
|
|
// sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially.
|
|
|
|
type sequentialRuleEvalController struct{}
|
|
|
|
|
2024-07-22 13:11:18 +00:00
|
|
|
func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) bool {
|
2024-01-26 18:53:44 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2024-07-22 13:11:18 +00:00
|
|
|
func (c sequentialRuleEvalController) Done(_ context.Context) {}
|
2022-10-18 18:43:32 +00:00
|
|
|
|
|
|
|
// FromMaps returns new sorted Labels from the given maps, overriding each other in order.
|
|
|
|
func FromMaps(maps ...map[string]string) labels.Labels {
|
|
|
|
mLables := make(map[string]string)
|
|
|
|
|
|
|
|
for _, m := range maps {
|
|
|
|
for k, v := range m {
|
|
|
|
mLables[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return labels.FromMap(mLables)
|
|
|
|
}
|