2015-01-21 19:07:45 +00:00
|
|
|
// Copyright 2013 The Prometheus Authors
|
2013-04-24 09:51:40 +00:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package rules
|
|
|
|
|
|
|
|
import (
|
2017-10-25 04:21:42 +00:00
|
|
|
"context"
|
2024-11-03 12:15:51 +00:00
|
|
|
"errors"
|
2013-04-26 14:02:52 +00:00
|
|
|
"fmt"
|
2024-09-10 01:41:53 +00:00
|
|
|
"log/slog"
|
2017-05-13 13:47:04 +00:00
|
|
|
"net/url"
|
2019-04-15 16:52:58 +00:00
|
|
|
"strings"
|
2013-06-13 14:10:05 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2015-08-20 15:18:46 +00:00
|
|
|
"github.com/prometheus/common/model"
|
2022-07-19 10:58:37 +00:00
|
|
|
"go.uber.org/atomic"
|
2022-08-31 13:50:38 +00:00
|
|
|
"gopkg.in/yaml.v2"
|
2013-06-25 12:02:27 +00:00
|
|
|
|
2021-11-08 14:23:17 +00:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
|
|
|
"github.com/prometheus/prometheus/model/rulefmt"
|
|
|
|
"github.com/prometheus/prometheus/model/timestamp"
|
2015-03-30 17:43:19 +00:00
|
|
|
"github.com/prometheus/prometheus/promql"
|
2020-02-03 18:06:39 +00:00
|
|
|
"github.com/prometheus/prometheus/promql/parser"
|
2022-03-29 00:16:46 +00:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2016-07-12 16:11:31 +00:00
|
|
|
"github.com/prometheus/prometheus/template"
|
2013-04-24 09:51:40 +00:00
|
|
|
)
|
|
|
|
|
2013-06-25 12:02:27 +00:00
|
|
|
const (
|
2014-12-10 15:16:49 +00:00
|
|
|
// AlertMetricName is the metric name for synthetic alert timeseries.
|
2016-12-24 23:37:46 +00:00
|
|
|
alertMetricName = "ALERTS"
|
2018-08-02 10:18:24 +00:00
|
|
|
// AlertForStateMetricName is the metric name for 'for' state of alert.
|
|
|
|
alertForStateMetricName = "ALERTS_FOR_STATE"
|
2013-06-25 12:02:27 +00:00
|
|
|
|
2014-12-10 15:16:49 +00:00
|
|
|
// AlertStateLabel is the label name indicating the state of an alert.
|
2016-12-24 23:37:46 +00:00
|
|
|
alertStateLabel = "alertstate"
|
2013-06-25 12:02:27 +00:00
|
|
|
)
|
|
|
|
|
2014-12-10 15:16:49 +00:00
|
|
|
// AlertState denotes the state of an active alert.
|
2013-06-13 14:10:05 +00:00
|
|
|
type AlertState int
|
2013-04-24 09:51:40 +00:00
|
|
|
|
2015-12-15 18:46:03 +00:00
|
|
|
const (
|
2016-02-05 04:42:55 +00:00
|
|
|
// StateInactive is the state of an alert that is neither firing nor pending.
|
2015-12-15 18:46:03 +00:00
|
|
|
StateInactive AlertState = iota
|
|
|
|
// StatePending is the state of an alert that has been active for less than
|
|
|
|
// the configured threshold duration.
|
|
|
|
StatePending
|
|
|
|
// StateFiring is the state of an alert that has been active for longer than
|
|
|
|
// the configured threshold duration.
|
|
|
|
StateFiring
|
|
|
|
)
|
|
|
|
|
2013-06-13 14:10:05 +00:00
|
|
|
func (s AlertState) String() string {
|
2013-04-24 09:51:40 +00:00
|
|
|
switch s {
|
2015-05-25 19:16:32 +00:00
|
|
|
case StateInactive:
|
2013-06-13 14:10:05 +00:00
|
|
|
return "inactive"
|
2015-05-25 19:16:32 +00:00
|
|
|
case StatePending:
|
2013-05-16 05:38:31 +00:00
|
|
|
return "pending"
|
2015-05-25 19:16:32 +00:00
|
|
|
case StateFiring:
|
2013-05-16 05:38:31 +00:00
|
|
|
return "firing"
|
2013-04-24 09:51:40 +00:00
|
|
|
}
|
2022-06-17 07:54:25 +00:00
|
|
|
panic(fmt.Errorf("unknown alert state: %d", s))
|
2013-04-24 09:51:40 +00:00
|
|
|
}
|
|
|
|
|
2015-12-15 18:46:03 +00:00
|
|
|
// Alert is the user-level representation of a single instance of an alerting rule.
|
|
|
|
type Alert struct {
|
2016-12-24 23:37:46 +00:00
|
|
|
State AlertState
|
|
|
|
|
2019-04-15 16:52:58 +00:00
|
|
|
Labels labels.Labels
|
|
|
|
Annotations labels.Labels
|
2016-12-24 23:37:46 +00:00
|
|
|
|
2015-12-17 10:46:10 +00:00
|
|
|
// The value at the last evaluation of the alerting expression.
|
2016-12-24 23:37:46 +00:00
|
|
|
Value float64
|
2015-12-17 10:46:10 +00:00
|
|
|
// The interval during which the condition of this alert held true.
|
|
|
|
// ResolvedAt will be 0 to indicate a still active alert.
|
2023-01-09 11:21:38 +00:00
|
|
|
ActiveAt time.Time
|
|
|
|
FiredAt time.Time
|
|
|
|
ResolvedAt time.Time
|
|
|
|
LastSentAt time.Time
|
|
|
|
ValidUntil time.Time
|
|
|
|
KeepFiringSince time.Time
|
2018-08-27 16:41:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (a *Alert) needsSending(ts time.Time, resendDelay time.Duration) bool {
|
|
|
|
if a.State == StatePending {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// if an alert has been resolved since the last send, resend it
|
|
|
|
if a.ResolvedAt.After(a.LastSentAt) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return a.LastSentAt.Add(resendDelay).Before(ts)
|
2013-04-24 09:51:40 +00:00
|
|
|
}
|
|
|
|
|
2014-12-10 15:16:49 +00:00
|
|
|
// An AlertingRule generates alerts from its vector expression.
|
2013-04-24 09:51:40 +00:00
|
|
|
type AlertingRule struct {
|
|
|
|
// The name of the alert.
|
2013-04-05 16:03:45 +00:00
|
|
|
name string
|
|
|
|
// The vector expression from which to generate alerts.
|
2020-02-03 18:06:39 +00:00
|
|
|
vector parser.Expr
|
2013-04-24 09:51:40 +00:00
|
|
|
// The duration for which a labelset needs to persist in the expression
|
2014-12-10 15:16:49 +00:00
|
|
|
// output vector before an alert transitions from Pending to Firing state.
|
2013-04-24 09:51:40 +00:00
|
|
|
holdDuration time.Duration
|
2023-01-09 11:21:38 +00:00
|
|
|
// The amount of time that the alert should remain firing after the
|
|
|
|
// resolution.
|
|
|
|
keepFiringFor time.Duration
|
2013-04-24 09:51:40 +00:00
|
|
|
// Extra labels to attach to the resulting alert sample vectors.
|
2016-12-24 23:37:46 +00:00
|
|
|
labels labels.Labels
|
2015-12-11 16:12:34 +00:00
|
|
|
// Non-identifying key/value pairs.
|
2016-12-24 23:37:46 +00:00
|
|
|
annotations labels.Labels
|
2019-04-15 16:52:58 +00:00
|
|
|
// External labels from the global config.
|
|
|
|
externalLabels map[string]string
|
2021-05-31 03:35:26 +00:00
|
|
|
// The external URL from the --web.external-url flag.
|
|
|
|
externalURL string
|
2018-08-02 10:18:24 +00:00
|
|
|
// true if old state has been restored. We start persisting samples for ALERT_FOR_STATE
|
|
|
|
// only after the restoration.
|
2022-07-19 10:58:37 +00:00
|
|
|
restored *atomic.Bool
|
2018-10-12 16:26:59 +00:00
|
|
|
// Time in seconds taken to evaluate rule.
|
2022-07-19 10:58:37 +00:00
|
|
|
evaluationDuration *atomic.Duration
|
2018-10-12 16:26:59 +00:00
|
|
|
// Timestamp of last evaluation of rule.
|
2022-07-19 10:58:37 +00:00
|
|
|
evaluationTimestamp *atomic.Time
|
2018-08-06 22:33:45 +00:00
|
|
|
// The health of the alerting rule.
|
2022-07-19 10:58:37 +00:00
|
|
|
health *atomic.String
|
2018-08-06 22:33:45 +00:00
|
|
|
// The last error seen by the alerting rule.
|
2022-07-19 10:58:37 +00:00
|
|
|
lastError *atomic.Error
|
|
|
|
// activeMtx Protects the `active` map.
|
|
|
|
activeMtx sync.Mutex
|
2014-12-10 15:16:49 +00:00
|
|
|
// A map of alerts which are currently active (Pending or Firing), keyed by
|
2013-04-24 09:51:40 +00:00
|
|
|
// the fingerprint of the labelset they correspond to.
|
2016-12-24 23:37:46 +00:00
|
|
|
active map[uint64]*Alert
|
2017-06-16 10:22:44 +00:00
|
|
|
|
2024-09-10 01:41:53 +00:00
|
|
|
logger *slog.Logger
|
2024-02-02 09:06:37 +00:00
|
|
|
|
|
|
|
noDependentRules *atomic.Bool
|
|
|
|
noDependencyRules *atomic.Bool
|
2013-04-24 09:51:40 +00:00
|
|
|
}
|
|
|
|
|
2015-05-25 19:16:32 +00:00
|
|
|
// NewAlertingRule constructs a new AlertingRule.
|
2019-04-15 16:52:58 +00:00
|
|
|
func NewAlertingRule(
|
2023-01-09 11:21:38 +00:00
|
|
|
name string, vec parser.Expr, hold, keepFiringFor time.Duration,
|
2021-05-31 03:35:26 +00:00
|
|
|
labels, annotations, externalLabels labels.Labels, externalURL string,
|
2024-09-10 01:41:53 +00:00
|
|
|
restored bool, logger *slog.Logger,
|
2019-04-15 16:52:58 +00:00
|
|
|
) *AlertingRule {
|
2022-02-27 14:12:38 +00:00
|
|
|
el := externalLabels.Map()
|
2019-04-15 16:52:58 +00:00
|
|
|
|
2015-05-25 19:16:32 +00:00
|
|
|
return &AlertingRule{
|
2022-07-19 10:58:37 +00:00
|
|
|
name: name,
|
|
|
|
vector: vec,
|
|
|
|
holdDuration: hold,
|
2023-01-09 11:21:38 +00:00
|
|
|
keepFiringFor: keepFiringFor,
|
2022-07-19 10:58:37 +00:00
|
|
|
labels: labels,
|
|
|
|
annotations: annotations,
|
|
|
|
externalLabels: el,
|
|
|
|
externalURL: externalURL,
|
|
|
|
active: map[uint64]*Alert{},
|
|
|
|
logger: logger,
|
|
|
|
restored: atomic.NewBool(restored),
|
|
|
|
health: atomic.NewString(string(HealthUnknown)),
|
|
|
|
evaluationTimestamp: atomic.NewTime(time.Time{}),
|
|
|
|
evaluationDuration: atomic.NewDuration(0),
|
|
|
|
lastError: atomic.NewError(nil),
|
2024-02-02 09:06:37 +00:00
|
|
|
noDependentRules: atomic.NewBool(false),
|
|
|
|
noDependencyRules: atomic.NewBool(false),
|
2015-05-25 19:16:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
// Name returns the name of the alerting rule.
|
2016-05-19 14:59:53 +00:00
|
|
|
func (r *AlertingRule) Name() string {
|
|
|
|
return r.name
|
2013-07-30 15:18:07 +00:00
|
|
|
}
|
2013-04-24 09:51:40 +00:00
|
|
|
|
2018-08-06 22:33:45 +00:00
|
|
|
// SetLastError sets the current error seen by the alerting rule.
|
|
|
|
func (r *AlertingRule) SetLastError(err error) {
|
2022-07-19 10:58:37 +00:00
|
|
|
r.lastError.Store(err)
|
2018-08-06 22:33:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// LastError returns the last error seen by the alerting rule.
|
|
|
|
func (r *AlertingRule) LastError() error {
|
2022-07-19 10:58:37 +00:00
|
|
|
return r.lastError.Load()
|
2018-08-06 22:33:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// SetHealth sets the current health of the alerting rule.
|
|
|
|
func (r *AlertingRule) SetHealth(health RuleHealth) {
|
2022-07-19 10:58:37 +00:00
|
|
|
r.health.Store(string(health))
|
2018-08-06 22:33:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Health returns the current health of the alerting rule.
|
|
|
|
func (r *AlertingRule) Health() RuleHealth {
|
2022-07-19 10:58:37 +00:00
|
|
|
return RuleHealth(r.health.String())
|
2018-08-06 22:33:45 +00:00
|
|
|
}
|
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
// Query returns the query expression of the alerting rule.
|
2020-02-03 18:06:39 +00:00
|
|
|
func (r *AlertingRule) Query() parser.Expr {
|
2018-03-25 16:50:34 +00:00
|
|
|
return r.vector
|
|
|
|
}
|
|
|
|
|
2020-02-01 07:31:37 +00:00
|
|
|
// HoldDuration returns the hold duration of the alerting rule.
|
|
|
|
func (r *AlertingRule) HoldDuration() time.Duration {
|
2018-03-25 16:50:34 +00:00
|
|
|
return r.holdDuration
|
|
|
|
}
|
|
|
|
|
2023-01-09 11:21:38 +00:00
|
|
|
// KeepFiringFor returns the duration an alerting rule should keep firing for
|
|
|
|
// after resolution.
|
|
|
|
func (r *AlertingRule) KeepFiringFor() time.Duration {
|
|
|
|
return r.keepFiringFor
|
|
|
|
}
|
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
// Labels returns the labels of the alerting rule.
|
2018-03-25 16:50:34 +00:00
|
|
|
func (r *AlertingRule) Labels() labels.Labels {
|
|
|
|
return r.labels
|
|
|
|
}
|
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
// Annotations returns the annotations of the alerting rule.
|
2018-03-25 16:50:34 +00:00
|
|
|
func (r *AlertingRule) Annotations() labels.Labels {
|
|
|
|
return r.annotations
|
|
|
|
}
|
|
|
|
|
2017-05-19 16:02:25 +00:00
|
|
|
func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample {
|
2016-12-24 23:37:46 +00:00
|
|
|
lb := labels.NewBuilder(r.labels)
|
2015-12-14 16:40:40 +00:00
|
|
|
|
2022-02-27 14:12:38 +00:00
|
|
|
alert.Labels.Range(func(l labels.Label) {
|
2016-12-24 23:37:46 +00:00
|
|
|
lb.Set(l.Name, l.Value)
|
2022-02-27 14:12:38 +00:00
|
|
|
})
|
2015-12-14 16:40:40 +00:00
|
|
|
|
2016-12-24 23:37:46 +00:00
|
|
|
lb.Set(labels.MetricName, alertMetricName)
|
|
|
|
lb.Set(labels.AlertName, r.name)
|
|
|
|
lb.Set(alertStateLabel, alert.State.String())
|
2015-12-14 16:40:40 +00:00
|
|
|
|
2016-12-24 23:37:46 +00:00
|
|
|
s := promql.Sample{
|
2023-03-22 15:46:02 +00:00
|
|
|
Metric: lb.Labels(),
|
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 14:58:40 +00:00
|
|
|
T: timestamp.FromTime(ts),
|
|
|
|
F: 1,
|
2015-12-14 16:40:40 +00:00
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2024-04-23 18:40:10 +00:00
|
|
|
// forStateSample returns a promql.Sample with the rule labels, `ALERTS_FOR_STATE` as the metric name and the rule name as the `alertname` label.
|
|
|
|
// Optionally, if an alert is provided it'll copy the labels of the alert into the sample labels.
|
2018-08-02 10:18:24 +00:00
|
|
|
func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) promql.Sample {
|
|
|
|
lb := labels.NewBuilder(r.labels)
|
|
|
|
|
2024-04-23 18:40:10 +00:00
|
|
|
if alert != nil {
|
|
|
|
alert.Labels.Range(func(l labels.Label) {
|
|
|
|
lb.Set(l.Name, l.Value)
|
|
|
|
})
|
|
|
|
}
|
2018-08-02 10:18:24 +00:00
|
|
|
|
|
|
|
lb.Set(labels.MetricName, alertForStateMetricName)
|
|
|
|
lb.Set(labels.AlertName, r.name)
|
|
|
|
|
|
|
|
s := promql.Sample{
|
2023-03-22 15:46:02 +00:00
|
|
|
Metric: lb.Labels(),
|
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 14:58:40 +00:00
|
|
|
T: timestamp.FromTime(ts),
|
|
|
|
F: v,
|
2018-08-02 10:18:24 +00:00
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2024-04-30 11:19:18 +00:00
|
|
|
// QueryForStateSeries returns the series for ALERTS_FOR_STATE of the alert rule.
|
|
|
|
func (r *AlertingRule) QueryForStateSeries(ctx context.Context, q storage.Querier) (storage.SeriesSet, error) {
|
2024-04-23 18:40:10 +00:00
|
|
|
// We use a sample to ease the building of matchers.
|
|
|
|
// Don't provide an alert as we want matchers that match all series for the alert rule.
|
|
|
|
smpl := r.forStateSample(nil, time.Now(), 0)
|
2022-03-29 00:16:46 +00:00
|
|
|
var matchers []*labels.Matcher
|
2022-02-27 14:12:38 +00:00
|
|
|
smpl.Metric.Range(func(l labels.Label) {
|
2022-03-29 00:16:46 +00:00
|
|
|
mt, err := labels.NewMatcher(labels.MatchEqual, l.Name, l.Value)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
matchers = append(matchers, mt)
|
2022-02-27 14:12:38 +00:00
|
|
|
})
|
2022-03-29 00:16:46 +00:00
|
|
|
|
2024-04-23 18:49:07 +00:00
|
|
|
sset := q.Select(ctx, false, nil, matchers...)
|
2024-04-23 18:40:10 +00:00
|
|
|
return sset, sset.Err()
|
2022-03-29 00:16:46 +00:00
|
|
|
}
|
|
|
|
|
2018-07-18 03:54:33 +00:00
|
|
|
// SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation.
|
|
|
|
func (r *AlertingRule) SetEvaluationDuration(dur time.Duration) {
|
2022-07-19 10:58:37 +00:00
|
|
|
r.evaluationDuration.Store(dur)
|
2017-11-17 15:18:34 +00:00
|
|
|
}
|
|
|
|
|
2018-07-18 03:54:33 +00:00
|
|
|
// GetEvaluationDuration returns the time in seconds it took to evaluate the alerting rule.
|
|
|
|
func (r *AlertingRule) GetEvaluationDuration() time.Duration {
|
2022-07-19 10:58:37 +00:00
|
|
|
return r.evaluationDuration.Load()
|
2017-11-17 15:18:34 +00:00
|
|
|
}
|
|
|
|
|
2018-10-12 16:26:59 +00:00
|
|
|
// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated.
|
|
|
|
func (r *AlertingRule) SetEvaluationTimestamp(ts time.Time) {
|
2022-07-19 10:58:37 +00:00
|
|
|
r.evaluationTimestamp.Store(ts)
|
2018-10-12 16:26:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetEvaluationTimestamp returns the time the evaluation took place.
|
|
|
|
func (r *AlertingRule) GetEvaluationTimestamp() time.Time {
|
2022-07-19 10:58:37 +00:00
|
|
|
return r.evaluationTimestamp.Load()
|
2018-10-12 16:26:59 +00:00
|
|
|
}
|
|
|
|
|
2018-08-02 10:18:24 +00:00
|
|
|
// SetRestored updates the restoration state of the alerting rule.
|
|
|
|
func (r *AlertingRule) SetRestored(restored bool) {
|
2022-07-19 10:58:37 +00:00
|
|
|
r.restored.Store(restored)
|
2018-08-02 10:18:24 +00:00
|
|
|
}
|
|
|
|
|
2021-11-05 22:26:29 +00:00
|
|
|
// Restored returns the restoration state of the alerting rule.
|
|
|
|
func (r *AlertingRule) Restored() bool {
|
2022-07-19 10:58:37 +00:00
|
|
|
return r.restored.Load()
|
2021-11-05 22:26:29 +00:00
|
|
|
}
|
|
|
|
|
2024-02-02 09:06:37 +00:00
|
|
|
func (r *AlertingRule) SetNoDependentRules(noDependentRules bool) {
|
|
|
|
r.noDependentRules.Store(noDependentRules)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *AlertingRule) NoDependentRules() bool {
|
|
|
|
return r.noDependentRules.Load()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *AlertingRule) SetNoDependencyRules(noDependencyRules bool) {
|
|
|
|
r.noDependencyRules.Store(noDependencyRules)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *AlertingRule) NoDependencyRules() bool {
|
|
|
|
return r.noDependencyRules.Load()
|
|
|
|
}
|
|
|
|
|
2015-12-15 18:46:03 +00:00
|
|
|
// resolvedRetention is the duration for which a resolved alert instance
|
2019-08-03 13:56:59 +00:00
|
|
|
// is kept in memory state and consequently repeatedly sent to the AlertManager.
|
2015-12-15 18:46:03 +00:00
|
|
|
const resolvedRetention = 15 * time.Minute
|
|
|
|
|
2016-11-18 16:12:50 +00:00
|
|
|
// Eval evaluates the rule expression and then creates pending alerts and fires
|
2015-05-25 18:43:24 +00:00
|
|
|
// or removes previously pending alerts accordingly.
|
2024-05-30 10:49:50 +00:00
|
|
|
func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts time.Time, query QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) {
|
2023-01-09 08:53:49 +00:00
|
|
|
ctx = NewOriginContext(ctx, NewRuleDetail(r))
|
2024-05-30 10:49:50 +00:00
|
|
|
res, err := query(ctx, r.vector.String(), ts.Add(-queryOffset))
|
2013-04-24 09:51:40 +00:00
|
|
|
if err != nil {
|
2013-05-16 05:38:31 +00:00
|
|
|
return nil, err
|
2013-04-24 09:51:40 +00:00
|
|
|
}
|
|
|
|
|
2013-06-14 11:03:19 +00:00
|
|
|
// Create pending alerts for any new vector elements in the alert expression
|
|
|
|
// or update the expression value for existing elements.
|
2016-12-24 23:37:46 +00:00
|
|
|
resultFPs := map[uint64]struct{}{}
|
2015-12-14 16:40:40 +00:00
|
|
|
|
2023-11-23 18:37:42 +00:00
|
|
|
lb := labels.NewBuilder(labels.EmptyLabels())
|
|
|
|
sb := labels.NewScratchBuilder(0)
|
2018-08-02 10:18:24 +00:00
|
|
|
var vec promql.Vector
|
2021-10-22 08:06:44 +00:00
|
|
|
alerts := make(map[uint64]*Alert, len(res))
|
2015-12-14 16:40:40 +00:00
|
|
|
for _, smpl := range res {
|
2016-07-12 16:11:31 +00:00
|
|
|
// Provide the alert information to the template.
|
2022-02-27 14:12:38 +00:00
|
|
|
l := smpl.Metric.Map()
|
2016-07-12 16:11:31 +00:00
|
|
|
|
2024-03-12 19:14:31 +00:00
|
|
|
tmplData := template.AlertTemplateData(l, r.externalLabels, r.externalURL, smpl)
|
2016-07-12 16:11:31 +00:00
|
|
|
// Inject some convenience variables that are easier to remember for users
|
|
|
|
// who are not used to Go's templating system.
|
2019-04-15 16:52:58 +00:00
|
|
|
defs := []string{
|
|
|
|
"{{$labels := .Labels}}",
|
|
|
|
"{{$externalLabels := .ExternalLabels}}",
|
2021-05-31 03:35:26 +00:00
|
|
|
"{{$externalURL := .ExternalURL}}",
|
2019-04-15 16:52:58 +00:00
|
|
|
"{{$value := .Value}}",
|
|
|
|
}
|
2016-07-12 16:11:31 +00:00
|
|
|
|
2016-12-24 23:37:46 +00:00
|
|
|
expand := func(text string) string {
|
2016-07-12 16:11:31 +00:00
|
|
|
tmpl := template.NewTemplateExpander(
|
2016-09-15 22:58:06 +00:00
|
|
|
ctx,
|
2019-04-15 16:52:58 +00:00
|
|
|
strings.Join(append(defs, text), ""),
|
2016-07-12 16:11:31 +00:00
|
|
|
"__alert_"+r.Name(),
|
|
|
|
tmplData,
|
2016-12-29 16:31:14 +00:00
|
|
|
model.Time(timestamp.FromTime(ts)),
|
2017-11-23 12:04:54 +00:00
|
|
|
template.QueryFunc(query),
|
2017-05-13 13:47:04 +00:00
|
|
|
externalURL,
|
2021-09-13 11:49:08 +00:00
|
|
|
nil,
|
2016-07-12 16:11:31 +00:00
|
|
|
)
|
|
|
|
result, err := tmpl.Expand()
|
|
|
|
if err != nil {
|
|
|
|
result = fmt.Sprintf("<error expanding template: %s>", err)
|
2024-09-10 01:41:53 +00:00
|
|
|
r.logger.Warn("Expanding alert template failed", "err", err, "data", tmplData)
|
2016-07-12 16:11:31 +00:00
|
|
|
}
|
2016-12-24 23:37:46 +00:00
|
|
|
return result
|
2016-07-12 16:11:31 +00:00
|
|
|
}
|
|
|
|
|
2023-11-23 18:37:42 +00:00
|
|
|
lb.Reset(smpl.Metric)
|
|
|
|
lb.Del(labels.MetricName)
|
2022-02-27 14:12:38 +00:00
|
|
|
r.labels.Range(func(l labels.Label) {
|
2016-12-24 23:37:46 +00:00
|
|
|
lb.Set(l.Name, expand(l.Value))
|
2022-02-27 14:12:38 +00:00
|
|
|
})
|
2016-12-24 23:37:46 +00:00
|
|
|
lb.Set(labels.AlertName, r.Name())
|
2016-07-12 16:11:31 +00:00
|
|
|
|
2023-11-23 18:37:42 +00:00
|
|
|
sb.Reset()
|
2022-02-27 14:12:38 +00:00
|
|
|
r.annotations.Range(func(a labels.Label) {
|
|
|
|
sb.Add(a.Name, expand(a.Value))
|
|
|
|
})
|
|
|
|
annotations := sb.Labels()
|
2016-12-24 23:37:46 +00:00
|
|
|
|
2023-03-22 15:46:02 +00:00
|
|
|
lbs := lb.Labels()
|
2018-08-15 07:52:08 +00:00
|
|
|
h := lbs.Hash()
|
2016-12-24 23:37:46 +00:00
|
|
|
resultFPs[h] = struct{}{}
|
2013-06-25 12:02:27 +00:00
|
|
|
|
2019-12-18 12:29:35 +00:00
|
|
|
if _, ok := alerts[h]; ok {
|
2024-11-03 12:15:51 +00:00
|
|
|
return nil, errors.New("vector contains metrics with the same labelset after applying alert labels")
|
2015-12-14 16:40:40 +00:00
|
|
|
}
|
|
|
|
|
2019-12-18 12:29:35 +00:00
|
|
|
alerts[h] = &Alert{
|
2018-08-15 07:52:08 +00:00
|
|
|
Labels: lbs,
|
2016-07-12 16:11:31 +00:00
|
|
|
Annotations: annotations,
|
|
|
|
ActiveAt: ts,
|
|
|
|
State: StatePending,
|
promql: Separate `Point` into `FPoint` and `HPoint`
In other words: Instead of having a “polymorphous” `Point` that can
either contain a float value or a histogram value, use an `FPoint` for
floats and an `HPoint` for histograms.
This seemingly small change has a _lot_ of repercussions throughout
the codebase.
The idea here is to avoid the increase in size of `Point` arrays that
happened after native histograms had been added.
The higher-level data structures (`Sample`, `Series`, etc.) are still
“polymorphous”. The same idea could be applied to them, but at each
step the trade-offs needed to be evaluated.
The idea with this change is to do the minimum necessary to get back
to pre-histogram performance for functions that do not touch
histograms. Here are comparisons for the `changes` function. The test
data doesn't include histograms yet. Ideally, there would be no change
in the benchmark result at all.
First runtime v2.39 compared to directly prior to this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 542µs ± 1% +38.58% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 617µs ± 2% +36.48% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.36ms ± 2% +21.58% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 8.94ms ± 1% +14.21% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.30ms ± 1% +10.67% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.10ms ± 1% +11.82% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 11.8ms ± 1% +12.50% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 87.4ms ± 1% +12.63% (p=0.000 n=9+9)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 32.8ms ± 1% +8.01% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.6ms ± 2% +9.64% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 117ms ± 1% +11.69% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 876ms ± 1% +11.83% (p=0.000 n=9+10)
```
And then runtime v2.39 compared to after this commit:
```
name old time/op new time/op delta
RangeQuery/expr=changes(a_one[1d]),steps=1-16 391µs ± 2% 547µs ± 1% +39.84% (p=0.000 n=9+8)
RangeQuery/expr=changes(a_one[1d]),steps=10-16 452µs ± 2% 616µs ± 2% +36.15% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_one[1d]),steps=100-16 1.12ms ± 1% 1.26ms ± 1% +12.20% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_one[1d]),steps=1000-16 7.83ms ± 1% 7.95ms ± 1% +1.59% (p=0.000 n=10+8)
RangeQuery/expr=changes(a_ten[1d]),steps=1-16 2.98ms ± 0% 3.38ms ± 2% +13.49% (p=0.000 n=9+10)
RangeQuery/expr=changes(a_ten[1d]),steps=10-16 3.66ms ± 1% 4.02ms ± 1% +9.80% (p=0.000 n=10+9)
RangeQuery/expr=changes(a_ten[1d]),steps=100-16 10.5ms ± 0% 10.8ms ± 1% +3.08% (p=0.000 n=8+10)
RangeQuery/expr=changes(a_ten[1d]),steps=1000-16 77.6ms ± 1% 78.1ms ± 1% +0.58% (p=0.035 n=9+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1-16 30.4ms ± 2% 33.5ms ± 4% +10.18% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=10-16 37.1ms ± 2% 40.0ms ± 1% +7.98% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=100-16 105ms ± 1% 107ms ± 1% +1.92% (p=0.000 n=10+10)
RangeQuery/expr=changes(a_hundred[1d]),steps=1000-16 783ms ± 3% 775ms ± 1% -1.02% (p=0.019 n=9+9)
```
In summary, the runtime doesn't really improve with this change for
queries with just a few steps. For queries with many steps, this
commit essentially reinstates the old performance. This is good
because the many-step queries are the one that matter most (longest
absolute runtime).
In terms of allocations, though, this commit doesn't make a dent at
all (numbers not shown). The reason is that most of the allocations
happen in the sampleRingIterator (in the storage package), which has
to be addressed in a separate commit.
Signed-off-by: beorn7 <beorn@grafana.com>
2022-10-28 14:58:40 +00:00
|
|
|
Value: smpl.F,
|
2013-04-24 09:51:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-19 10:58:37 +00:00
|
|
|
r.activeMtx.Lock()
|
|
|
|
defer r.activeMtx.Unlock()
|
|
|
|
|
2019-12-18 12:29:35 +00:00
|
|
|
for h, a := range alerts {
|
|
|
|
// Check whether we already have alerting state for the identifying label set.
|
|
|
|
// Update the last value and annotations if so, create a new alert entry otherwise.
|
|
|
|
if alert, ok := r.active[h]; ok && alert.State != StateInactive {
|
|
|
|
alert.Value = a.Value
|
|
|
|
alert.Annotations = a.Annotations
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
r.active[h] = a
|
|
|
|
}
|
|
|
|
|
2021-10-21 21:14:17 +00:00
|
|
|
var numActivePending int
|
2013-04-24 09:51:40 +00:00
|
|
|
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
|
2015-12-15 18:46:03 +00:00
|
|
|
for fp, a := range r.active {
|
2015-05-28 19:51:44 +00:00
|
|
|
if _, ok := resultFPs[fp]; !ok {
|
2023-01-23 12:59:43 +00:00
|
|
|
// There is no firing alerts for this fingerprint. The alert is no
|
|
|
|
// longer firing.
|
|
|
|
|
|
|
|
// Use keepFiringFor value to determine if the alert should keep
|
|
|
|
// firing.
|
2023-01-09 11:21:38 +00:00
|
|
|
var keepFiring bool
|
|
|
|
if a.State == StateFiring && r.keepFiringFor > 0 {
|
|
|
|
if a.KeepFiringSince.IsZero() {
|
|
|
|
a.KeepFiringSince = ts
|
|
|
|
}
|
|
|
|
if ts.Sub(a.KeepFiringSince) < r.keepFiringFor {
|
|
|
|
keepFiring = true
|
|
|
|
}
|
|
|
|
}
|
2023-01-23 12:59:43 +00:00
|
|
|
|
2024-04-25 12:18:50 +00:00
|
|
|
// If the alert is resolved (was firing but is now inactive) keep it for
|
|
|
|
// at least the retention period. This is important for a number of reasons:
|
|
|
|
//
|
|
|
|
// 1. It allows for Prometheus to be more resilient to network issues that
|
|
|
|
// would otherwise prevent a resolved alert from being reported as resolved
|
|
|
|
// to Alertmanager.
|
|
|
|
//
|
|
|
|
// 2. It helps reduce the chance of resolved notifications being lost if
|
|
|
|
// Alertmanager crashes or restarts between receiving the resolved alert
|
|
|
|
// from Prometheus and sending the resolved notification. This tends to
|
|
|
|
// occur for routes with large Group intervals.
|
2016-12-29 16:31:14 +00:00
|
|
|
if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) {
|
2015-12-15 18:46:03 +00:00
|
|
|
delete(r.active, fp)
|
|
|
|
}
|
2023-01-09 11:21:38 +00:00
|
|
|
if a.State != StateInactive && !keepFiring {
|
2015-12-15 18:46:03 +00:00
|
|
|
a.State = StateInactive
|
|
|
|
a.ResolvedAt = ts
|
|
|
|
}
|
2023-01-09 11:21:38 +00:00
|
|
|
if !keepFiring {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
} else {
|
2023-01-23 12:59:43 +00:00
|
|
|
// The alert is firing, reset keepFiringSince.
|
2023-01-09 11:21:38 +00:00
|
|
|
a.KeepFiringSince = time.Time{}
|
2013-04-24 09:51:40 +00:00
|
|
|
}
|
2021-10-21 21:14:17 +00:00
|
|
|
numActivePending++
|
2013-04-24 09:51:40 +00:00
|
|
|
|
2015-12-15 18:46:03 +00:00
|
|
|
if a.State == StatePending && ts.Sub(a.ActiveAt) >= r.holdDuration {
|
|
|
|
a.State = StateFiring
|
2017-11-24 07:59:05 +00:00
|
|
|
a.FiredAt = ts
|
2013-04-24 09:51:40 +00:00
|
|
|
}
|
|
|
|
|
2022-07-19 10:58:37 +00:00
|
|
|
if r.restored.Load() {
|
2024-05-30 10:49:50 +00:00
|
|
|
vec = append(vec, r.sample(a, ts.Add(-queryOffset)))
|
|
|
|
vec = append(vec, r.forStateSample(a, ts.Add(-queryOffset), float64(a.ActiveAt.Unix())))
|
2018-08-02 10:18:24 +00:00
|
|
|
}
|
2013-04-24 09:51:40 +00:00
|
|
|
}
|
2013-05-16 05:38:31 +00:00
|
|
|
|
2021-10-21 21:14:17 +00:00
|
|
|
if limit > 0 && numActivePending > limit {
|
2021-09-15 07:48:26 +00:00
|
|
|
r.active = map[uint64]*Alert{}
|
2022-06-17 07:54:25 +00:00
|
|
|
return nil, fmt.Errorf("exceeded limit of %d with %d alerts", limit, numActivePending)
|
2021-09-15 07:48:26 +00:00
|
|
|
}
|
|
|
|
|
2015-12-14 16:40:40 +00:00
|
|
|
return vec, nil
|
|
|
|
}
|
|
|
|
|
2015-12-17 10:46:10 +00:00
|
|
|
// State returns the maximum state of alert instances for this rule.
|
2023-10-03 20:09:25 +00:00
|
|
|
// StateFiring > StatePending > StateInactive.
|
2015-12-14 16:40:40 +00:00
|
|
|
func (r *AlertingRule) State() AlertState {
|
2022-07-19 10:58:37 +00:00
|
|
|
r.activeMtx.Lock()
|
|
|
|
defer r.activeMtx.Unlock()
|
2015-12-14 16:40:40 +00:00
|
|
|
|
|
|
|
maxState := StateInactive
|
2015-12-15 18:46:03 +00:00
|
|
|
for _, a := range r.active {
|
|
|
|
if a.State > maxState {
|
|
|
|
maxState = a.State
|
2015-12-14 16:40:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return maxState
|
|
|
|
}
|
|
|
|
|
|
|
|
// ActiveAlerts returns a slice of active alerts.
|
|
|
|
func (r *AlertingRule) ActiveAlerts() []*Alert {
|
2015-12-15 18:46:03 +00:00
|
|
|
var res []*Alert
|
2015-12-17 10:46:10 +00:00
|
|
|
for _, a := range r.currentAlerts() {
|
2016-12-29 16:31:14 +00:00
|
|
|
if a.ResolvedAt.IsZero() {
|
2015-12-15 18:46:03 +00:00
|
|
|
res = append(res, a)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2015-12-17 10:46:10 +00:00
|
|
|
// currentAlerts returns all instances of alerts for this rule. This may include
|
|
|
|
// inactive alerts that were previously firing.
|
|
|
|
func (r *AlertingRule) currentAlerts() []*Alert {
|
2022-07-19 10:58:37 +00:00
|
|
|
r.activeMtx.Lock()
|
|
|
|
defer r.activeMtx.Unlock()
|
2015-12-14 16:40:40 +00:00
|
|
|
|
|
|
|
alerts := make([]*Alert, 0, len(r.active))
|
2015-12-15 18:46:03 +00:00
|
|
|
|
|
|
|
for _, a := range r.active {
|
|
|
|
anew := *a
|
|
|
|
alerts = append(alerts, &anew)
|
2015-12-14 16:40:40 +00:00
|
|
|
}
|
|
|
|
return alerts
|
2013-04-24 09:51:40 +00:00
|
|
|
}
|
|
|
|
|
2018-08-02 10:18:24 +00:00
|
|
|
// ForEachActiveAlert runs the given function on each alert.
|
|
|
|
// This should be used when you want to use the actual alerts from the AlertingRule
|
|
|
|
// and not on its copy.
|
|
|
|
// If you want to run on a copy of alerts then don't use this, get the alerts from 'ActiveAlerts()'.
|
|
|
|
func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) {
|
2022-07-19 10:58:37 +00:00
|
|
|
r.activeMtx.Lock()
|
|
|
|
defer r.activeMtx.Unlock()
|
2018-08-02 10:18:24 +00:00
|
|
|
|
|
|
|
for _, a := range r.active {
|
|
|
|
f(a)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-24 18:10:34 +00:00
|
|
|
func (r *AlertingRule) ActiveAlertsCount() int {
|
|
|
|
r.activeMtx.Lock()
|
|
|
|
defer r.activeMtx.Unlock()
|
|
|
|
|
|
|
|
return len(r.active)
|
|
|
|
}
|
|
|
|
|
2021-10-22 08:06:44 +00:00
|
|
|
func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay, interval time.Duration, notifyFunc NotifyFunc) {
|
2019-04-15 16:52:58 +00:00
|
|
|
alerts := []*Alert{}
|
2018-08-27 16:41:42 +00:00
|
|
|
r.ForEachActiveAlert(func(alert *Alert) {
|
|
|
|
if alert.needsSending(ts, resendDelay) {
|
|
|
|
alert.LastSentAt = ts
|
2020-05-09 04:37:46 +00:00
|
|
|
// Allow for two Eval or Alertmanager send failures.
|
2018-08-28 15:05:00 +00:00
|
|
|
delta := resendDelay
|
|
|
|
if interval > resendDelay {
|
|
|
|
delta = interval
|
|
|
|
}
|
2020-05-09 04:37:46 +00:00
|
|
|
alert.ValidUntil = ts.Add(4 * delta)
|
2018-08-27 16:41:42 +00:00
|
|
|
anew := *alert
|
2022-10-07 14:58:17 +00:00
|
|
|
// The notifier re-uses the labels slice, hence make a copy.
|
|
|
|
anew.Labels = alert.Labels.Copy()
|
2018-08-27 16:41:42 +00:00
|
|
|
alerts = append(alerts, &anew)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
notifyFunc(ctx, r.vector.String(), alerts...)
|
|
|
|
}
|
|
|
|
|
2016-05-19 14:59:53 +00:00
|
|
|
func (r *AlertingRule) String() string {
|
2017-07-08 09:38:02 +00:00
|
|
|
ar := rulefmt.Rule{
|
2023-01-19 10:33:54 +00:00
|
|
|
Alert: r.name,
|
|
|
|
Expr: r.vector.String(),
|
|
|
|
For: model.Duration(r.holdDuration),
|
|
|
|
KeepFiringFor: model.Duration(r.keepFiringFor),
|
|
|
|
Labels: r.labels.Map(),
|
|
|
|
Annotations: r.annotations.Map(),
|
2015-06-23 15:46:57 +00:00
|
|
|
}
|
2017-07-08 09:38:02 +00:00
|
|
|
|
|
|
|
byt, err := yaml.Marshal(ar)
|
|
|
|
if err != nil {
|
2018-11-27 16:44:29 +00:00
|
|
|
return fmt.Sprintf("error marshaling alerting rule: %s", err.Error())
|
2015-12-11 16:12:34 +00:00
|
|
|
}
|
2017-07-08 09:38:02 +00:00
|
|
|
|
|
|
|
return string(byt)
|
2013-06-13 14:10:05 +00:00
|
|
|
}
|