Merge pull request #13725 from prometheus/beorn7/promql2

promql: Fix limiting of extrapolation to negative values
pull/13737/head
Björn Rabenstein 9 months ago committed by GitHub
commit b0c0961f9d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -128,7 +128,16 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
sampledInterval := float64(lastT-firstT) / 1000 sampledInterval := float64(lastT-firstT) / 1000
averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne) averageDurationBetweenSamples := sampledInterval / float64(numSamplesMinusOne)
// TODO(beorn7): Do this for histograms, too. // If the first/last samples are close to the boundaries of the range,
// extrapolate the result. This is as we expect that another sample
// will exist given the spacing between samples we've seen thus far,
// with an allowance for noise.
extrapolationThreshold := averageDurationBetweenSamples * 1.1
extrapolateToInterval := sampledInterval
if durationToStart >= extrapolationThreshold {
durationToStart = averageDurationBetweenSamples / 2
}
if isCounter && resultFloat > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 { if isCounter && resultFloat > 0 && len(samples.Floats) > 0 && samples.Floats[0].F >= 0 {
// Counters cannot be negative. If we have any slope at all // Counters cannot be negative. If we have any slope at all
// (i.e. resultFloat went up), we can extrapolate the zero point // (i.e. resultFloat went up), we can extrapolate the zero point
@ -136,29 +145,19 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
// than the durationToStart, we take the zero point as the start // than the durationToStart, we take the zero point as the start
// of the series, thereby avoiding extrapolation to negative // of the series, thereby avoiding extrapolation to negative
// counter values. // counter values.
// TODO(beorn7): Do this for histograms, too.
durationToZero := sampledInterval * (samples.Floats[0].F / resultFloat) durationToZero := sampledInterval * (samples.Floats[0].F / resultFloat)
if durationToZero < durationToStart { if durationToZero < durationToStart {
durationToStart = durationToZero durationToStart = durationToZero
} }
} }
extrapolateToInterval += durationToStart
// If the first/last samples are close to the boundaries of the range, if durationToEnd >= extrapolationThreshold {
// extrapolate the result. This is as we expect that another sample durationToEnd = averageDurationBetweenSamples / 2
// will exist given the spacing between samples we've seen thus far,
// with an allowance for noise.
extrapolationThreshold := averageDurationBetweenSamples * 1.1
extrapolateToInterval := sampledInterval
if durationToStart < extrapolationThreshold {
extrapolateToInterval += durationToStart
} else {
extrapolateToInterval += averageDurationBetweenSamples / 2
}
if durationToEnd < extrapolationThreshold {
extrapolateToInterval += durationToEnd
} else {
extrapolateToInterval += averageDurationBetweenSamples / 2
} }
extrapolateToInterval += durationToEnd
factor := extrapolateToInterval / sampledInterval factor := extrapolateToInterval / sampledInterval
if isRate { if isRate {
factor /= ms.Range.Seconds() factor /= ms.Range.Seconds()

@ -71,15 +71,28 @@ clear
load 5m load 5m
http_requests{path="/foo"} 0+10x10 http_requests{path="/foo"} 0+10x10
http_requests{path="/bar"} 0+10x5 0+10x5 http_requests{path="/bar"} 0+10x5 0+10x5
http_requests{path="/dings"} 10+10x10
http_requests{path="/bumms"} 1+10x10
# Tests for increase(). # Tests for increase().
eval instant at 50m increase(http_requests[50m]) eval instant at 50m increase(http_requests[50m])
{path="/foo"} 100 {path="/foo"} 100
{path="/bar"} 90 {path="/bar"} 90
{path="/dings"} 100
{path="/bumms"} 100
# "foo" and "bar" are already at value 0 at t=0, so no extrapolation
# happens. "dings" has value 10 at t=0 and would reach 0 at t=-5m. The
# normal extrapolation by half a sample interval only goes to
# t=-2m30s, so that's not yet reaching a negative value and therefore
# chosen. However, "bumms" has value 1 at t=0 and would reach 0 at
# t=-30s. Here the extrapolation to t=-2m30s would reach a negative
# value, and therefore the extrapolation happens only by 30s.
eval instant at 50m increase(http_requests[100m]) eval instant at 50m increase(http_requests[100m])
{path="/foo"} 100 {path="/foo"} 100
{path="/bar"} 90 {path="/bar"} 90
{path="/dings"} 105
{path="/bumms"} 101
clear clear
@ -133,13 +146,15 @@ load 4m
testcounter_zero_cutoff{start="4m"} 240+240x10 testcounter_zero_cutoff{start="4m"} 240+240x10
testcounter_zero_cutoff{start="5m"} 300+240x10 testcounter_zero_cutoff{start="5m"} 300+240x10
# Zero cutoff for left-side extrapolation. # Zero cutoff for left-side extrapolation happens until we
# reach half a sampling interval (2m). Beyond that, we only
# extrapolate by half a sampling interval.
eval instant at 10m rate(testcounter_zero_cutoff[20m]) eval instant at 10m rate(testcounter_zero_cutoff[20m])
{start="0m"} 0.5 {start="0m"} 0.5
{start="1m"} 0.55 {start="1m"} 0.55
{start="2m"} 0.6 {start="2m"} 0.6
{start="3m"} 0.65 {start="3m"} 0.6
{start="4m"} 0.7 {start="4m"} 0.6
{start="5m"} 0.6 {start="5m"} 0.6
# Normal half-interval cutoff for left-side extrapolation. # Normal half-interval cutoff for left-side extrapolation.

Loading…
Cancel
Save