prometheus/rules/alerting.go

527 lines
15 KiB
Go
Raw Normal View History

// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
"context"
"fmt"
"net/url"
"sync"
"time"
html_template "html/template"
yaml "gopkg.in/yaml.v2"
2017-08-11 18:45:52 +00:00
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/rulefmt"
2016-12-29 16:31:14 +00:00
"github.com/prometheus/prometheus/pkg/timestamp"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/template"
2015-05-29 11:30:30 +00:00
"github.com/prometheus/prometheus/util/strutil"
)
const (
// AlertMetricName is the metric name for synthetic alert timeseries.
alertMetricName = "ALERTS"
// AlertForStateMetricName is the metric name for 'for' state of alert.
alertForStateMetricName = "ALERTS_FOR_STATE"
// AlertNameLabel is the label name indicating the name of an alert.
alertNameLabel = "alertname"
// AlertStateLabel is the label name indicating the state of an alert.
alertStateLabel = "alertstate"
)
// AlertState denotes the state of an active alert.
type AlertState int
2015-12-15 18:46:03 +00:00
const (
// StateInactive is the state of an alert that is neither firing nor pending.
2015-12-15 18:46:03 +00:00
StateInactive AlertState = iota
// StatePending is the state of an alert that has been active for less than
// the configured threshold duration.
StatePending
// StateFiring is the state of an alert that has been active for longer than
// the configured threshold duration.
StateFiring
)
func (s AlertState) String() string {
switch s {
2015-05-25 19:16:32 +00:00
case StateInactive:
return "inactive"
2015-05-25 19:16:32 +00:00
case StatePending:
return "pending"
2015-05-25 19:16:32 +00:00
case StateFiring:
return "firing"
}
panic(fmt.Errorf("unknown alert state: %v", s.String()))
}
2015-12-15 18:46:03 +00:00
// Alert is the user-level representation of a single instance of an alerting rule.
type Alert struct {
State AlertState
Labels labels.Labels
Annotations labels.Labels
2015-12-17 10:46:10 +00:00
// The value at the last evaluation of the alerting expression.
Value float64
2015-12-17 10:46:10 +00:00
// The interval during which the condition of this alert held true.
// ResolvedAt will be 0 to indicate a still active alert.
ActiveAt time.Time
FiredAt time.Time
ResolvedAt time.Time
LastSentAt time.Time
ValidUntil time.Time
}
func (a *Alert) needsSending(ts time.Time, resendDelay time.Duration) bool {
if a.State == StatePending {
return false
}
// if an alert has been resolved since the last send, resend it
if a.ResolvedAt.After(a.LastSentAt) {
return true
}
return a.LastSentAt.Add(resendDelay).Before(ts)
}
// An AlertingRule generates alerts from its vector expression.
type AlertingRule struct {
// The name of the alert.
name string
// The vector expression from which to generate alerts.
2015-05-25 19:16:32 +00:00
vector promql.Expr
// The duration for which a labelset needs to persist in the expression
// output vector before an alert transitions from Pending to Firing state.
holdDuration time.Duration
// Extra labels to attach to the resulting alert sample vectors.
labels labels.Labels
// Non-identifying key/value pairs.
annotations labels.Labels
// true if old state has been restored. We start persisting samples for ALERT_FOR_STATE
// only after the restoration.
restored bool
// Protects the below.
2015-12-14 16:40:40 +00:00
mtx sync.Mutex
// Time in seconds taken to evaluate rule.
evaluationDuration time.Duration
// Timestamp of last evaluation of rule.
evaluationTimestamp time.Time
// The health of the alerting rule.
health RuleHealth
// The last error seen by the alerting rule.
lastError error
// A map of alerts which are currently active (Pending or Firing), keyed by
// the fingerprint of the labelset they correspond to.
active map[uint64]*Alert
logger log.Logger
}
2015-05-25 19:16:32 +00:00
// NewAlertingRule constructs a new AlertingRule.
func NewAlertingRule(name string, vec promql.Expr, hold time.Duration, lbls, anns labels.Labels, restored bool, logger log.Logger) *AlertingRule {
2015-05-25 19:16:32 +00:00
return &AlertingRule{
name: name,
vector: vec,
holdDuration: hold,
labels: lbls,
annotations: anns,
health: HealthUnknown,
active: map[uint64]*Alert{},
2017-08-11 18:45:52 +00:00
logger: logger,
restored: restored,
2015-05-25 19:16:32 +00:00
}
}
// Name returns the name of the alerting rule.
2016-05-19 14:59:53 +00:00
func (r *AlertingRule) Name() string {
return r.name
}
// SetLastError sets the current error seen by the alerting rule.
func (r *AlertingRule) SetLastError(err error) {
r.mtx.Lock()
defer r.mtx.Unlock()
r.lastError = err
}
// LastError returns the last error seen by the alerting rule.
func (r *AlertingRule) LastError() error {
r.mtx.Lock()
defer r.mtx.Unlock()
return r.lastError
}
// SetHealth sets the current health of the alerting rule.
func (r *AlertingRule) SetHealth(health RuleHealth) {
r.mtx.Lock()
defer r.mtx.Unlock()
r.health = health
}
// Health returns the current health of the alerting rule.
func (r *AlertingRule) Health() RuleHealth {
r.mtx.Lock()
defer r.mtx.Unlock()
return r.health
}
// Query returns the query expression of the alerting rule.
func (r *AlertingRule) Query() promql.Expr {
return r.vector
}
// Duration returns the hold duration of the alerting rule.
func (r *AlertingRule) Duration() time.Duration {
return r.holdDuration
}
// Labels returns the labels of the alerting rule.
func (r *AlertingRule) Labels() labels.Labels {
return r.labels
}
// Annotations returns the annotations of the alerting rule.
func (r *AlertingRule) Annotations() labels.Labels {
return r.annotations
}
func (r *AlertingRule) equal(o *AlertingRule) bool {
return r.name == o.name && labels.Equal(r.labels, o.labels)
}
func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample {
lb := labels.NewBuilder(r.labels)
2015-12-14 16:40:40 +00:00
for _, l := range alert.Labels {
lb.Set(l.Name, l.Value)
2015-12-14 16:40:40 +00:00
}
lb.Set(labels.MetricName, alertMetricName)
lb.Set(labels.AlertName, r.name)
lb.Set(alertStateLabel, alert.State.String())
2015-12-14 16:40:40 +00:00
s := promql.Sample{
Metric: lb.Labels(),
Point: promql.Point{T: timestamp.FromTime(ts), V: 1},
2015-12-14 16:40:40 +00:00
}
return s
}
// forStateSample returns the sample for ALERTS_FOR_STATE.
func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) promql.Sample {
lb := labels.NewBuilder(r.labels)
for _, l := range alert.Labels {
lb.Set(l.Name, l.Value)
}
lb.Set(labels.MetricName, alertForStateMetricName)
lb.Set(labels.AlertName, r.name)
s := promql.Sample{
Metric: lb.Labels(),
Point: promql.Point{T: timestamp.FromTime(ts), V: v},
}
return s
}
// SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation.
func (r *AlertingRule) SetEvaluationDuration(dur time.Duration) {
2017-11-17 15:18:34 +00:00
r.mtx.Lock()
defer r.mtx.Unlock()
r.evaluationDuration = dur
2017-11-17 15:18:34 +00:00
}
// GetEvaluationDuration returns the time in seconds it took to evaluate the alerting rule.
func (r *AlertingRule) GetEvaluationDuration() time.Duration {
2017-11-17 15:18:34 +00:00
r.mtx.Lock()
defer r.mtx.Unlock()
return r.evaluationDuration
2017-11-17 15:18:34 +00:00
}
// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated.
func (r *AlertingRule) SetEvaluationTimestamp(ts time.Time) {
r.mtx.Lock()
defer r.mtx.Unlock()
r.evaluationTimestamp = ts
}
// GetEvaluationTimestamp returns the time the evaluation took place.
func (r *AlertingRule) GetEvaluationTimestamp() time.Time {
r.mtx.Lock()
defer r.mtx.Unlock()
return r.evaluationTimestamp
}
// SetRestored updates the restoration state of the alerting rule.
func (r *AlertingRule) SetRestored(restored bool) {
r.restored = restored
}
2015-12-15 18:46:03 +00:00
// resolvedRetention is the duration for which a resolved alert instance
// is kept in memory state and consequentally repeatedly sent to the AlertManager.
const resolvedRetention = 15 * time.Minute
// Eval evaluates the rule expression and then creates pending alerts and fires
// or removes previously pending alerts accordingly.
func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, externalURL *url.URL) (promql.Vector, error) {
res, err := query(ctx, r.vector.String(), ts)
if err != nil {
r.SetHealth(HealthBad)
r.SetLastError(err)
return nil, err
}
2015-12-14 16:40:40 +00:00
r.mtx.Lock()
defer r.mtx.Unlock()
// Create pending alerts for any new vector elements in the alert expression
// or update the expression value for existing elements.
resultFPs := map[uint64]struct{}{}
2015-12-14 16:40:40 +00:00
var vec promql.Vector
2015-12-14 16:40:40 +00:00
for _, smpl := range res {
// Provide the alert information to the template.
l := make(map[string]string, len(smpl.Metric))
for _, lbl := range smpl.Metric {
l[lbl.Name] = lbl.Value
}
tmplData := template.AlertTemplateData(l, smpl.V)
// Inject some convenience variables that are easier to remember for users
// who are not used to Go's templating system.
defs := "{{$labels := .Labels}}{{$value := .Value}}"
expand := func(text string) string {
tmpl := template.NewTemplateExpander(
ctx,
defs+text,
"__alert_"+r.Name(),
tmplData,
2016-12-29 16:31:14 +00:00
model.Time(timestamp.FromTime(ts)),
template.QueryFunc(query),
externalURL,
)
result, err := tmpl.Expand()
if err != nil {
result = fmt.Sprintf("<error expanding template: %s>", err)
2017-08-11 18:45:52 +00:00
level.Warn(r.logger).Log("msg", "Expanding alert template failed", "err", err, "data", tmplData)
}
return result
}
lb := labels.NewBuilder(smpl.Metric).Del(labels.MetricName)
for _, l := range r.labels {
lb.Set(l.Name, expand(l.Value))
}
lb.Set(labels.AlertName, r.Name())
annotations := make(labels.Labels, 0, len(r.annotations))
for _, a := range r.annotations {
annotations = append(annotations, labels.Label{Name: a.Name, Value: expand(a.Value)})
}
lbs := lb.Labels()
h := lbs.Hash()
resultFPs[h] = struct{}{}
// Check whether we already have alerting state for the identifying label set.
// Update the last value and annotations if so, create a new alert entry otherwise.
if alert, ok := r.active[h]; ok && alert.State != StateInactive {
alert.Value = smpl.V
alert.Annotations = annotations
2015-12-14 16:40:40 +00:00
continue
}
r.active[h] = &Alert{
Labels: lbs,
Annotations: annotations,
ActiveAt: ts,
State: StatePending,
Value: smpl.V,
}
}
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
2015-12-15 18:46:03 +00:00
for fp, a := range r.active {
2015-05-28 19:51:44 +00:00
if _, ok := resultFPs[fp]; !ok {
2015-12-17 10:46:10 +00:00
// If the alert was previously firing, keep it around for a given
2015-12-15 18:46:03 +00:00
// retention time so it is reported as resolved to the AlertManager.
2016-12-29 16:31:14 +00:00
if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) {
2015-12-15 18:46:03 +00:00
delete(r.active, fp)
}
if a.State != StateInactive {
a.State = StateInactive
a.ResolvedAt = ts
}
continue
}
2015-12-15 18:46:03 +00:00
if a.State == StatePending && ts.Sub(a.ActiveAt) >= r.holdDuration {
a.State = StateFiring
a.FiredAt = ts
}
if r.restored {
vec = append(vec, r.sample(a, ts))
vec = append(vec, r.forStateSample(a, ts, float64(a.ActiveAt.Unix())))
}
}
// We have already acquired the lock above hence using SetHealth and
// SetLastError will deadlock.
r.health = HealthGood
r.lastError = err
2015-12-14 16:40:40 +00:00
return vec, nil
}
2015-12-17 10:46:10 +00:00
// State returns the maximum state of alert instances for this rule.
// StateFiring > StatePending > StateInactive
2015-12-14 16:40:40 +00:00
func (r *AlertingRule) State() AlertState {
r.mtx.Lock()
defer r.mtx.Unlock()
maxState := StateInactive
2015-12-15 18:46:03 +00:00
for _, a := range r.active {
if a.State > maxState {
maxState = a.State
2015-12-14 16:40:40 +00:00
}
}
return maxState
}
// ActiveAlerts returns a slice of active alerts.
func (r *AlertingRule) ActiveAlerts() []*Alert {
2015-12-15 18:46:03 +00:00
var res []*Alert
2015-12-17 10:46:10 +00:00
for _, a := range r.currentAlerts() {
2016-12-29 16:31:14 +00:00
if a.ResolvedAt.IsZero() {
2015-12-15 18:46:03 +00:00
res = append(res, a)
}
}
return res
}
2015-12-17 10:46:10 +00:00
// currentAlerts returns all instances of alerts for this rule. This may include
// inactive alerts that were previously firing.
func (r *AlertingRule) currentAlerts() []*Alert {
2015-12-14 16:40:40 +00:00
r.mtx.Lock()
defer r.mtx.Unlock()
alerts := make([]*Alert, 0, len(r.active))
2015-12-15 18:46:03 +00:00
for _, a := range r.active {
anew := *a
alerts = append(alerts, &anew)
2015-12-14 16:40:40 +00:00
}
return alerts
}
// ForEachActiveAlert runs the given function on each alert.
// This should be used when you want to use the actual alerts from the AlertingRule
// and not on its copy.
// If you want to run on a copy of alerts then don't use this, get the alerts from 'ActiveAlerts()'.
func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) {
r.mtx.Lock()
defer r.mtx.Unlock()
for _, a := range r.active {
f(a)
}
}
func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) {
alerts := make([]*Alert, 0)
r.ForEachActiveAlert(func(alert *Alert) {
if alert.needsSending(ts, resendDelay) {
alert.LastSentAt = ts
// Allow for a couple Eval or Alertmanager send failures
delta := resendDelay
if interval > resendDelay {
delta = interval
}
alert.ValidUntil = ts.Add(3 * delta)
anew := *alert
alerts = append(alerts, &anew)
}
})
notifyFunc(ctx, r.vector.String(), alerts...)
}
2016-05-19 14:59:53 +00:00
func (r *AlertingRule) String() string {
ar := rulefmt.Rule{
Alert: r.name,
Expr: r.vector.String(),
For: model.Duration(r.holdDuration),
Labels: r.labels.Map(),
Annotations: r.annotations.Map(),
}
byt, err := yaml.Marshal(ar)
if err != nil {
return fmt.Sprintf("error marshaling alerting rule: %s", err.Error())
}
return string(byt)
}
// HTMLSnippet returns an HTML snippet representing this alerting rule. The
// resulting snippet is expected to be presented in a <pre> element, so that
// line breaks and other returned whitespace is respected.
func (r *AlertingRule) HTMLSnippet(pathPrefix string) html_template.HTML {
alertMetric := model.Metric{
model.MetricNameLabel: alertMetricName,
2016-05-19 14:59:53 +00:00
alertNameLabel: model.LabelValue(r.name),
}
labels := make(map[string]string, len(r.labels))
for _, l := range r.labels {
labels[l.Name] = html_template.HTMLEscapeString(l.Value)
}
annotations := make(map[string]string, len(r.annotations))
for _, l := range r.annotations {
annotations[l.Name] = html_template.HTMLEscapeString(l.Value)
}
ar := rulefmt.Rule{
2017-10-05 10:16:15 +00:00
Alert: fmt.Sprintf("<a href=%q>%s</a>", pathPrefix+strutil.TableLinkForExpression(alertMetric.String()), r.name),
Expr: fmt.Sprintf("<a href=%q>%s</a>", pathPrefix+strutil.TableLinkForExpression(r.vector.String()), html_template.HTMLEscapeString(r.vector.String())),
For: model.Duration(r.holdDuration),
Labels: labels,
Annotations: annotations,
}
byt, err := yaml.Marshal(ar)
if err != nil {
return html_template.HTML(fmt.Sprintf("error marshaling alerting rule: %q", html_template.HTMLEscapeString(err.Error())))
}
return html_template.HTML(byt)
}
// HoldDuration returns the holdDuration of the alerting rule.
func (r *AlertingRule) HoldDuration() time.Duration {
return r.holdDuration
}