@ -54,9 +54,9 @@ type FunctionCall func(vals []parser.Value, args parser.Expressions, enh *EvalNo
// === time() float64 ===
func funcTime ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
return Vector { Sample { Point : Point {
V : float64 ( enh . Ts ) / 1000 ,
} } }
return Vector { Sample {
F : float64 ( enh . Ts ) / 1000 ,
} }
}
// extrapolatedRate is a utility function for rate/increase/delta.
@ -72,60 +72,66 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
rangeEnd = enh . Ts - durationMilliseconds ( vs . Offset )
resultValue float64
resultHistogram * histogram . FloatHistogram
firstT , lastT int64
numSamplesMinusOne int
)
// No sense in trying to compute a rate without at least two points. Drop
// this Vector element.
if len ( samples . Points ) < 2 {
// We need either at least two Histograms and no Floats, or at least two
// Floats and no Histograms to calculate a rate. Otherwise, drop this
// Vector element.
if len ( samples . Histograms ) > 0 && len ( samples . Floats ) > 0 {
// Mix of histograms and floats. TODO(beorn7): Communicate this failure reason.
return enh . Out
}
if samples . Points [ 0 ] . H != nil {
resultHistogram = histogramRate ( samples . Points , isCounter )
switch {
case len ( samples . Histograms ) > 1 :
numSamplesMinusOne = len ( samples . Histograms ) - 1
firstT = samples . Histograms [ 0 ] . T
lastT = samples . Histograms [ numSamplesMinusOne ] . T
resultHistogram = histogramRate ( samples . Histograms , isCounter )
if resultHistogram == nil {
// Points are a mix of floats and histograms, or the histograms
// are not compatible with each other.
// TODO(beorn7): find a way of communicating the exact reason
// The histograms are not compatible with each other.
// TODO(beorn7): Communicate this failure reason.
return enh . Out
}
} else {
resultValue = samples . Points [ len ( samples . Points ) - 1 ] . V - samples . Points [ 0 ] . V
prevValue := samples . Points [ 0 ] . V
// We have to iterate through everything even in the non-counter
// case because we have to check that everything is a float.
// TODO(beorn7): Find a way to check that earlier, e.g. by
// handing in a []FloatPoint and a []HistogramPoint separately.
for _ , currPoint := range samples . Points [ 1 : ] {
if currPoint . H != nil {
return nil // Range contains a mix of histograms and floats.
}
case len ( samples . Floats ) > 1 :
numSamplesMinusOne = len ( samples . Floats ) - 1
firstT = samples . Floats [ 0 ] . T
lastT = samples . Floats [ numSamplesMinusOne ] . T
resultValue = samples . Floats [ numSamplesMinusOne ] . F - samples . Floats [ 0 ] . F
if ! isCounter {
continue
break
}
if currPoint . V < prevValue {
// Handle counter resets:
prevValue := samples . Floats [ 0 ] . F
for _ , currPoint := range samples . Floats [ 1 : ] {
if currPoint . F < prevValue {
resultValue += prevValue
}
prevValue = currPoint . V
prevValue = currPoint . F
}
default :
// Not enough samples. TODO(beorn7): Communicate this failure reason.
return enh . Out
}
// Duration between first/last samples and boundary of range.
durationToStart := float64 ( samples . Points [ 0 ] . T - rangeStart ) / 1000
durationToEnd := float64 ( rangeEnd - samples . Poin ts [ len ( samples . Points ) - 1 ] . T ) / 1000
durationToStart := float64 ( first T- rangeStart ) / 1000
durationToEnd := float64 ( rangeEnd - las tT) / 1000
sampledInterval := float64 ( samples . Points [ len ( samples . Points ) - 1 ] . T - samples . Points [ 0 ] . T ) / 1000
averageDurationBetweenSamples := sampledInterval / float64 ( len ( samples . Points ) - 1 )
sampledInterval := float64 ( lastT - first T) / 1000
averageDurationBetweenSamples := sampledInterval / float64 ( numSamplesMinusOne )
// TODO(beorn7): Do this for histograms, too.
if isCounter && resultValue > 0 && samples . Points [ 0 ] . V >= 0 {
// Counters cannot be negative. If we have any slope at
// all (i.e. resultValue went up), we can extrapolate
// the zero point of the counter. If the duration to the
// zero point is shorter than the durationToStart, we
// take the zero point as the start of the series,
// thereby avoiding extrapolation to negative counter
// values.
durationToZero := sampledInterval * ( samples . Points [ 0 ] . V / resultValue )
if isCounter && resultValue > 0 && len ( samples . Floats ) > 0 && samples . Floats [ 0 ] . F >= 0 {
// Counters cannot be negative. If we have any slope at all
// (i.e. resultValue went up), we can extrapolate the zero point
// of the counter. If the duration to the zero point is shorter
// than the durationToStart, we take the zero point as the start
// of the series, thereby avoiding extrapolation to negative
// counter values.
durationToZero := sampledInterval * ( samples . Floats [ 0 ] . F / resultValue )
if durationToZero < durationToStart {
durationToStart = durationToZero
}
@ -158,16 +164,14 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod
resultHistogram . Scale ( factor )
}
return append ( enh . Out , Sample {
Point : Point { V : resultValue , H : resultHistogram } ,
} )
return append ( enh . Out , Sample { F : resultValue , H : resultHistogram } )
}
// histogramRate is a helper function for extrapolatedRate. It requires
// points[0] to be a histogram. It returns nil if any other Point in points is
// not a histogram.
func histogramRate ( points [ ] Point , isCounter bool ) * histogram . FloatHistogram {
prev := points [ 0 ] . H // We already know that this is a histogram.
func histogramRate ( points [ ] H Point, isCounter bool ) * histogram . FloatHistogram {
prev := points [ 0 ] . H
last := points [ len ( points ) - 1 ] . H
if last == nil {
return nil // Range contains a mix of histograms and floats.
@ -243,19 +247,19 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector {
samples := vals [ 0 ] . ( Matrix ) [ 0 ]
// No sense in trying to compute a rate without at least two points. Drop
// this Vector element.
if len ( samples . Poin ts) < 2 {
if len ( samples . Floa ts) < 2 {
return out
}
lastSample := samples . Poin ts[ len ( samples . Poin ts) - 1 ]
previousSample := samples . Poin ts[ len ( samples . Poin ts) - 2 ]
lastSample := samples . Floa ts[ len ( samples . Floa ts) - 1 ]
previousSample := samples . Floa ts[ len ( samples . Floa ts) - 2 ]
var resultValue float64
if isRate && lastSample . V < previousSample . V {
if isRate && lastSample . F < previousSample . F {
// Counter reset.
resultValue = lastSample . V
resultValue = lastSample . F
} else {
resultValue = lastSample . V - previousSample . V
resultValue = lastSample . F - previousSample . F
}
sampledInterval := lastSample . T - previousSample . T
@ -269,9 +273,7 @@ func instantValue(vals []parser.Value, out Vector, isRate bool) Vector {
resultValue /= float64 ( sampledInterval ) / 1000
}
return append ( out , Sample {
Point : Point { V : resultValue } ,
} )
return append ( out , Sample { F : resultValue } )
}
// Calculate the trend value at the given index i in raw data d.
@ -300,10 +302,10 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode
samples := vals [ 0 ] . ( Matrix ) [ 0 ]
// The smoothing factor argument.
sf := vals [ 1 ] . ( Vector ) [ 0 ] . V
sf := vals [ 1 ] . ( Vector ) [ 0 ] . F
// The trend factor argument.
tf := vals [ 2 ] . ( Vector ) [ 0 ] . V
tf := vals [ 2 ] . ( Vector ) [ 0 ] . F
// Check that the input parameters are valid.
if sf <= 0 || sf >= 1 {
@ -313,7 +315,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode
panic ( fmt . Errorf ( "invalid trend factor. Expected: 0 < tf < 1, got: %f" , tf ) )
}
l := len ( samples . Poin ts)
l := len ( samples . Floa ts)
// Can't do the smoothing operation with less than two points.
if l < 2 {
@ -322,15 +324,15 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode
var s0 , s1 , b float64
// Set initial values.
s1 = samples . Poin ts[ 0 ] . V
b = samples . Poin ts[ 1 ] . V - samples . Poin ts[ 0 ] . V
s1 = samples . Floa ts[ 0 ] . F
b = samples . Floa ts[ 1 ] . F - samples . Floa ts[ 0 ] . F
// Run the smoothing operation.
var x , y float64
for i := 1 ; i < l ; i ++ {
// Scale the raw value against the smoothing factor.
x = sf * samples . Poin ts[ i ] . V
x = sf * samples . Floa ts[ i ] . F
// Scale the last smoothed value with the trend at this point.
b = calcTrendValue ( i - 1 , tf , s0 , s1 , b )
@ -339,9 +341,7 @@ func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNode
s0 , s1 = s1 , x + y
}
return append ( enh . Out , Sample {
Point : Point { V : s1 } ,
} )
return append ( enh . Out , Sample { F : s1 } )
}
// === sort(node parser.ValueTypeVector) Vector ===
@ -365,15 +365,15 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
// === clamp(Vector parser.ValueTypeVector, min, max Scalar) Vector ===
func funcClamp ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
vec := vals [ 0 ] . ( Vector )
min := vals [ 1 ] . ( Vector ) [ 0 ] . Point . V
max := vals [ 2 ] . ( Vector ) [ 0 ] . Point . V
min := vals [ 1 ] . ( Vector ) [ 0 ] . F
max := vals [ 2 ] . ( Vector ) [ 0 ] . F
if max < min {
return enh . Out
}
for _ , el := range vec {
enh . Out = append ( enh . Out , Sample {
Metric : enh . DropMetricName ( el . Metric ) ,
Point : Point { V : math . Max ( min , math . Min ( max , el . V ) ) } ,
F : math . Max ( min , math . Min ( max , el . F ) ) ,
} )
}
return enh . Out
@ -382,11 +382,11 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
// === clamp_max(Vector parser.ValueTypeVector, max Scalar) Vector ===
func funcClampMax ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
vec := vals [ 0 ] . ( Vector )
max := vals [ 1 ] . ( Vector ) [ 0 ] . Point . V
max := vals [ 1 ] . ( Vector ) [ 0 ] . F
for _ , el := range vec {
enh . Out = append ( enh . Out , Sample {
Metric : enh . DropMetricName ( el . Metric ) ,
Point : Point { V : math . Min ( max , el . V ) } ,
F : math . Min ( max , el . F ) ,
} )
}
return enh . Out
@ -395,11 +395,11 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
// === clamp_min(Vector parser.ValueTypeVector, min Scalar) Vector ===
func funcClampMin ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
vec := vals [ 0 ] . ( Vector )
min := vals [ 1 ] . ( Vector ) [ 0 ] . Point . V
min := vals [ 1 ] . ( Vector ) [ 0 ] . F
for _ , el := range vec {
enh . Out = append ( enh . Out , Sample {
Metric : enh . DropMetricName ( el . Metric ) ,
Point : Point { V : math . Max ( min , el . V ) } ,
F : math . Max ( min , el . F ) ,
} )
}
return enh . Out
@ -412,16 +412,16 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
// Ties are solved by rounding up.
toNearest := float64 ( 1 )
if len ( args ) >= 2 {
toNearest = vals [ 1 ] . ( Vector ) [ 0 ] . Point . V
toNearest = vals [ 1 ] . ( Vector ) [ 0 ] . F
}
// Invert as it seems to cause fewer floating point accuracy issues.
toNearestInverse := 1.0 / toNearest
for _ , el := range vec {
v := math . Floor ( el . V * toNearestInverse + 0.5 ) / toNearestInverse
v := math . Floor ( el . F * toNearestInverse + 0.5 ) / toNearestInverse
enh . Out = append ( enh . Out , Sample {
Metric : enh . DropMetricName ( el . Metric ) ,
Point : Point { V : v } ,
F : v ,
} )
}
return enh . Out
@ -431,37 +431,38 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
func funcScalar ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
v := vals [ 0 ] . ( Vector )
if len ( v ) != 1 {
return append ( enh . Out , Sample {
Point : Point { V : math . NaN ( ) } ,
} )
return append ( enh . Out , Sample { F : math . NaN ( ) } )
}
return append ( enh . Out , Sample {
Point : Point { V : v [ 0 ] . V } ,
} )
return append ( enh . Out , Sample { F : v [ 0 ] . F } )
}
func aggrOverTime ( vals [ ] parser . Value , enh * EvalNodeHelper , aggrFn func ( [ ] Point ) float64 ) Vector {
func aggrOverTime ( vals [ ] parser . Value , enh * EvalNodeHelper , aggrFn func ( Series ) float64 ) Vector {
el := vals [ 0 ] . ( Matrix ) [ 0 ]
return append ( enh . Out , Sample {
Point : Point { V : aggrFn ( el . Points ) } ,
} )
return append ( enh . Out , Sample { F : aggrFn ( el ) } )
}
// === avg_over_time(Matrix parser.ValueTypeMatrix) Vector ===
func funcAvgOverTime ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
return aggrOverTime ( vals , enh , func ( values [ ] Point ) float64 {
if len ( vals [ 0 ] . ( Matrix ) [ 0 ] . Floats ) == 0 {
// TODO(beorn7): The passed values only contain
// histograms. avg_over_time ignores histograms for now. If
// there are only histograms, we have to return without adding
// anything to enh.Out.
return enh . Out
}
return aggrOverTime ( vals , enh , func ( s Series ) float64 {
var mean , count , c float64
for _ , v := range values {
for _ , f := range s . Float s {
count ++
if math . IsInf ( mean , 0 ) {
if math . IsInf ( v . V , 0 ) && ( mean > 0 ) == ( v . V > 0 ) {
// The `mean` and `v.V` values are `Inf` of the same sign. They
if math . IsInf ( f . F , 0 ) && ( mean > 0 ) == ( f . F > 0 ) {
// The `mean` and `f.F ` values are `Inf` of the same sign. They
// can't be subtracted, but the value of `mean` is correct
// already.
continue
}
if ! math . IsInf ( v . V , 0 ) && ! math . IsNaN ( v . V ) {
if ! math . IsInf ( f . F , 0 ) && ! math . IsNaN ( f . F ) {
// At this stage, the mean is an infinite. If the added
// value is neither an Inf or a Nan, we can keep that mean
// value.
@ -471,7 +472,7 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
continue
}
}
mean , c = kahanSumInc ( v . V / count - mean / count , mean , c )
mean , c = kahanSumInc ( f . F / count - mean / count , mean , c )
}
if math . IsInf ( mean , 0 ) {
@ -483,8 +484,8 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
// === count_over_time(Matrix parser.ValueTypeMatrix) Vector ===
func funcCountOverTime ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
return aggrOverTime ( vals , enh , func ( values [ ] Point ) float64 {
return float64 ( len ( value s) )
return aggrOverTime ( vals , enh , func ( s Series ) float64 {
return float64 ( len ( s . Floats ) + len ( s . Histogram s) )
} )
}
@ -492,19 +493,42 @@ func funcCountOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNo
func funcLastOverTime ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
el := vals [ 0 ] . ( Matrix ) [ 0 ]
var f FPoint
if len ( el . Floats ) > 0 {
f = el . Floats [ len ( el . Floats ) - 1 ]
}
var h HPoint
if len ( el . Histograms ) > 0 {
h = el . Histograms [ len ( el . Histograms ) - 1 ]
}
if h . H == nil || h . T < f . T {
return append ( enh . Out , Sample {
Metric : el . Metric ,
Point : Point { V : el . Points [ len ( el . Points ) - 1 ] . V } ,
F : f . F ,
} )
}
return append ( enh . Out , Sample {
Metric : el . Metric ,
H : h . H ,
} )
}
// === max_over_time(Matrix parser.ValueTypeMatrix) Vector ===
func funcMaxOverTime ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
return aggrOverTime ( vals , enh , func ( values [ ] Point ) float64 {
max := values [ 0 ] . V
for _ , v := range values {
if v . V > max || math . IsNaN ( max ) {
max = v . V
if len ( vals [ 0 ] . ( Matrix ) [ 0 ] . Floats ) == 0 {
// TODO(beorn7): The passed values only contain
// histograms. max_over_time ignores histograms for now. If
// there are only histograms, we have to return without adding
// anything to enh.Out.
return enh . Out
}
return aggrOverTime ( vals , enh , func ( s Series ) float64 {
max := s . Floats [ 0 ] . F
for _ , f := range s . Floats {
if f . F > max || math . IsNaN ( max ) {
max = f . F
}
}
return max
@ -513,11 +537,18 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
// === min_over_time(Matrix parser.ValueTypeMatrix) Vector ===
func funcMinOverTime ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
return aggrOverTime ( vals , enh , func ( values [ ] Point ) float64 {
min := values [ 0 ] . V
for _ , v := range values {
if v . V < min || math . IsNaN ( min ) {
min = v . V
if len ( vals [ 0 ] . ( Matrix ) [ 0 ] . Floats ) == 0 {
// TODO(beorn7): The passed values only contain
// histograms. min_over_time ignores histograms for now. If
// there are only histograms, we have to return without adding
// anything to enh.Out.
return enh . Out
}
return aggrOverTime ( vals , enh , func ( s Series ) float64 {
min := s . Floats [ 0 ] . F
for _ , f := range s . Floats {
if f . F < min || math . IsNaN ( min ) {
min = f . F
}
}
return min
@ -526,10 +557,17 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
// === sum_over_time(Matrix parser.ValueTypeMatrix) Vector ===
func funcSumOverTime ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
return aggrOverTime ( vals , enh , func ( values [ ] Point ) float64 {
if len ( vals [ 0 ] . ( Matrix ) [ 0 ] . Floats ) == 0 {
// TODO(beorn7): The passed values only contain
// histograms. sum_over_time ignores histograms for now. If
// there are only histograms, we have to return without adding
// anything to enh.Out.
return enh . Out
}
return aggrOverTime ( vals , enh , func ( s Series ) float64 {
var sum , c float64
for _ , v := range values {
sum , c = kahanSumInc ( v . V , sum , c )
for _ , f := range s . Float s {
sum , c = kahanSumInc ( f . F , sum , c )
}
if math . IsInf ( sum , 0 ) {
return sum
@ -540,29 +578,41 @@ func funcSumOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
// === quantile_over_time(Matrix parser.ValueTypeMatrix) Vector ===
func funcQuantileOverTime ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
q := vals [ 0 ] . ( Vector ) [ 0 ] . V
q := vals [ 0 ] . ( Vector ) [ 0 ] . F
el := vals [ 1 ] . ( Matrix ) [ 0 ]
if len ( el . Floats ) == 0 {
// TODO(beorn7): The passed values only contain
// histograms. quantile_over_time ignores histograms for now. If
// there are only histograms, we have to return without adding
// anything to enh.Out.
return enh . Out
}
values := make ( vectorByValueHeap , 0 , len ( el . Points ) )
for _ , v := range el . Points {
values = append ( values , Sample { Point : Point { V : v . V } } )
values := make ( vectorByValueHeap , 0 , len ( el . Floa ts) )
for _ , f := range el . Floa ts {
values = append ( values , Sample { F : f . F } )
}
return append ( enh . Out , Sample {
Point : Point { V : quantile ( q , values ) } ,
} )
return append ( enh . Out , Sample { F : quantile ( q , values ) } )
}
// === stddev_over_time(Matrix parser.ValueTypeMatrix) Vector ===
func funcStddevOverTime ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
return aggrOverTime ( vals , enh , func ( values [ ] Point ) float64 {
if len ( vals [ 0 ] . ( Matrix ) [ 0 ] . Floats ) == 0 {
// TODO(beorn7): The passed values only contain
// histograms. stddev_over_time ignores histograms for now. If
// there are only histograms, we have to return without adding
// anything to enh.Out.
return enh . Out
}
return aggrOverTime ( vals , enh , func ( s Series ) float64 {
var count float64
var mean , cMean float64
var aux , cAux float64
for _ , v := range values {
for _ , f := range s . Float s {
count ++
delta := v . V - ( mean + cMean )
delta := f . F - ( mean + cMean )
mean , cMean = kahanSumInc ( delta / count , mean , cMean )
aux , cAux = kahanSumInc ( delta * ( v . V - ( mean + cMean ) ) , aux , cAux )
aux , cAux = kahanSumInc ( delta * ( f . F - ( mean + cMean ) ) , aux , cAux )
}
return math . Sqrt ( ( aux + cAux ) / count )
} )
@ -570,15 +620,22 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN
// === stdvar_over_time(Matrix parser.ValueTypeMatrix) Vector ===
func funcStdvarOverTime ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
return aggrOverTime ( vals , enh , func ( values [ ] Point ) float64 {
if len ( vals [ 0 ] . ( Matrix ) [ 0 ] . Floats ) == 0 {
// TODO(beorn7): The passed values only contain
// histograms. stdvar_over_time ignores histograms for now. If
// there are only histograms, we have to return without adding
// anything to enh.Out.
return enh . Out
}
return aggrOverTime ( vals , enh , func ( s Series ) float64 {
var count float64
var mean , cMean float64
var aux , cAux float64
for _ , v := range values {
for _ , f := range s . Float s {
count ++
delta := v . V - ( mean + cMean )
delta := f . F - ( mean + cMean )
mean , cMean = kahanSumInc ( delta / count , mean , cMean )
aux , cAux = kahanSumInc ( delta * ( v . V - ( mean + cMean ) ) , aux , cAux )
aux , cAux = kahanSumInc ( delta * ( f . F - ( mean + cMean ) ) , aux , cAux )
}
return ( aux + cAux ) / count
} )
@ -592,7 +649,7 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe
return append ( enh . Out ,
Sample {
Metric : createLabelsForAbsentFunction ( args [ 0 ] ) ,
Point : Point { V : 1 } ,
F : 1 ,
} )
}
@ -602,26 +659,25 @@ func funcAbsent(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe
// Due to engine optimization, this function is only called when this condition is true.
// Then, the engine post-processes the results to get the expected output.
func funcAbsentOverTime ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
return append ( enh . Out ,
Sample {
Point : Point { V : 1 } ,
} )
return append ( enh . Out , Sample { F : 1 } )
}
// === present_over_time(Vector parser.ValueTypeMatrix) Vector ===
func funcPresentOverTime ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
return aggrOverTime ( vals , enh , func ( values [ ] Point ) float64 {
return aggrOverTime ( vals , enh , func ( s Series ) float64 {
return 1
} )
}
func simpleFunc ( vals [ ] parser . Value , enh * EvalNodeHelper , f func ( float64 ) float64 ) Vector {
for _ , el := range vals [ 0 ] . ( Vector ) {
if el . H == nil { // Process only float samples.
enh . Out = append ( enh . Out , Sample {
Metric : enh . DropMetricName ( el . Metric ) ,
Point : Point { V : f ( el . V ) } ,
F : f ( el . F ) ,
} )
}
}
return enh . Out
}
@ -741,9 +797,7 @@ func funcDeg(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper)
// === pi() Scalar ===
func funcPi ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
return Vector { Sample { Point : Point {
V : math . Pi ,
} } }
return Vector { Sample { F : math . Pi } }
}
// === sgn(Vector parser.ValueTypeVector) Vector ===
@ -764,7 +818,7 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe
for _ , el := range vec {
enh . Out = append ( enh . Out , Sample {
Metric : enh . DropMetricName ( el . Metric ) ,
Point : Point { V : float64 ( el . T ) / 1000 } ,
F : float64 ( el . T ) / 1000 ,
} )
}
return enh . Out
@ -793,7 +847,7 @@ func kahanSumInc(inc, sum, c float64) (newSum, newC float64) {
// linearRegression performs a least-square linear regression analysis on the
// provided SamplePairs. It returns the slope, and the intercept value at the
// provided time.
func linearRegression ( samples [ ] Point , interceptTime int64 ) ( slope , intercept float64 ) {
func linearRegression ( samples [ ] F Point, interceptTime int64 ) ( slope , intercept float64 ) {
var (
n float64
sumX , cX float64
@ -803,18 +857,18 @@ func linearRegression(samples []Point, interceptTime int64) (slope, intercept fl
initY float64
constY bool
)
initY = samples [ 0 ] . V
initY = samples [ 0 ] . F
constY = true
for i , sample := range samples {
// Set constY to false if any new y values are encountered.
if constY && i > 0 && sample . V != initY {
if constY && i > 0 && sample . F != initY {
constY = false
}
n += 1.0
x := float64 ( sample . T - interceptTime ) / 1e3
sumX , cX = kahanSumInc ( x , sumX , cX )
sumY , cY = kahanSumInc ( sample . V , sumY , cY )
sumXY , cXY = kahanSumInc ( x * sample . V , sumXY , cXY )
sumY , cY = kahanSumInc ( sample . F , sumY , cY )
sumXY , cXY = kahanSumInc ( x * sample . F , sumXY , cXY )
sumX2 , cX2 = kahanSumInc ( x * x , sumX2 , cX2 )
}
if constY {
@ -842,33 +896,29 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
// No sense in trying to compute a derivative without at least two points.
// Drop this Vector element.
if len ( samples . Poin ts) < 2 {
if len ( samples . Floa ts) < 2 {
return enh . Out
}
// We pass in an arbitrary timestamp that is near the values in use
// to avoid floating point accuracy issues, see
// https://github.com/prometheus/prometheus/issues/2674
slope , _ := linearRegression ( samples . Points , samples . Points [ 0 ] . T )
return append ( enh . Out , Sample {
Point : Point { V : slope } ,
} )
slope , _ := linearRegression ( samples . Floats , samples . Floats [ 0 ] . T )
return append ( enh . Out , Sample { F : slope } )
}
// === predict_linear(node parser.ValueTypeMatrix, k parser.ValueTypeScalar) Vector ===
func funcPredictLinear ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
samples := vals [ 0 ] . ( Matrix ) [ 0 ]
duration := vals [ 1 ] . ( Vector ) [ 0 ] . V
duration := vals [ 1 ] . ( Vector ) [ 0 ] . F
// No sense in trying to predict anything without at least two points.
// Drop this Vector element.
if len ( samples . Poin ts) < 2 {
if len ( samples . Floa ts) < 2 {
return enh . Out
}
slope , intercept := linearRegression ( samples . Poin ts, enh . Ts )
slope , intercept := linearRegression ( samples . Floa ts, enh . Ts )
return append ( enh . Out , Sample {
Point : Point { V : slope * duration + intercept } ,
} )
return append ( enh . Out , Sample { F : slope * duration + intercept } )
}
// === histogram_count(Vector parser.ValueTypeVector) Vector ===
@ -882,7 +932,7 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN
}
enh . Out = append ( enh . Out , Sample {
Metric : enh . DropMetricName ( sample . Metric ) ,
Point : Point { V : sample . H . Count } ,
F : sample . H . Count ,
} )
}
return enh . Out
@ -899,7 +949,7 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod
}
enh . Out = append ( enh . Out , Sample {
Metric : enh . DropMetricName ( sample . Metric ) ,
Point : Point { V : sample . H . Sum } ,
F : sample . H . Sum ,
} )
}
return enh . Out
@ -907,8 +957,8 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod
// === histogram_fraction(lower, upper parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector ===
func funcHistogramFraction ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
lower := vals [ 0 ] . ( Vector ) [ 0 ] . V
upper := vals [ 1 ] . ( Vector ) [ 0 ] . V
lower := vals [ 0 ] . ( Vector ) [ 0 ] . F
upper := vals [ 1 ] . ( Vector ) [ 0 ] . F
inVec := vals [ 2 ] . ( Vector )
for _ , sample := range inVec {
@ -918,7 +968,7 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev
}
enh . Out = append ( enh . Out , Sample {
Metric : enh . DropMetricName ( sample . Metric ) ,
Point : Point { V : histogramFraction ( lower , upper , sample . H ) } ,
F : histogramFraction ( lower , upper , sample . H ) ,
} )
}
return enh . Out
@ -926,7 +976,7 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev
// === histogram_quantile(k parser.ValueTypeScalar, Vector parser.ValueTypeVector) Vector ===
func funcHistogramQuantile ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
q := vals [ 0 ] . ( Vector ) [ 0 ] . V
q := vals [ 0 ] . ( Vector ) [ 0 ] . F
inVec := vals [ 1 ] . ( Vector )
if enh . signatureToMetricWithBuckets == nil {
@ -965,7 +1015,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
mb = & metricWithBuckets { sample . Metric , nil }
enh . signatureToMetricWithBuckets [ string ( enh . lblBuf ) ] = mb
}
mb . buckets = append ( mb . buckets , bucket { upperBound , sample . V } )
mb . buckets = append ( mb . buckets , bucket { upperBound , sample . F } )
}
@ -985,7 +1035,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
enh . Out = append ( enh . Out , Sample {
Metric : enh . DropMetricName ( sample . Metric ) ,
Point : Point { V : histogramQuantile ( q , sample . H ) } ,
F : histogramQuantile ( q , sample . H ) ,
} )
}
@ -993,7 +1043,7 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
if len ( mb . buckets ) > 0 {
enh . Out = append ( enh . Out , Sample {
Metric : mb . metric ,
Point : Point { V : bucketQuantile ( q , mb . buckets ) } ,
F : bucketQuantile ( q , mb . buckets ) ,
} )
}
}
@ -1003,40 +1053,55 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev
// === resets(Matrix parser.ValueTypeMatrix) Vector ===
func funcResets ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
sample s := vals [ 0 ] . ( Matrix ) [ 0 ]
float s := vals [ 0 ] . ( Matrix ) [ 0 ] . Floats
histograms := vals [ 0 ] . ( Matrix ) [ 0 ] . Histograms
resets := 0
prev := samples . Points [ 0 ] . V
for _ , sample := range samples . Points [ 1 : ] {
current := sample . V
if len ( floats ) > 1 {
prev := floats [ 0 ] . F
for _ , sample := range floats [ 1 : ] {
current := sample . F
if current < prev {
resets ++
}
prev = current
}
}
return append ( enh . Out , Sample {
Point : Point { V : float64 ( resets ) } ,
} )
if len ( histograms ) > 1 {
prev := histograms [ 0 ] . H
for _ , sample := range histograms [ 1 : ] {
current := sample . H
if current . DetectReset ( prev ) {
resets ++
}
prev = current
}
}
return append ( enh . Out , Sample { F : float64 ( resets ) } )
}
// === changes(Matrix parser.ValueTypeMatrix) Vector ===
func funcChanges ( vals [ ] parser . Value , args parser . Expressions , enh * EvalNodeHelper ) Vector {
samples := vals [ 0 ] . ( Matrix ) [ 0 ]
floats := vals [ 0 ] . ( Matrix ) [ 0 ] . Floats
changes := 0
prev := samples . Points [ 0 ] . V
for _ , sample := range samples . Points [ 1 : ] {
current := sample . V
if len ( floats ) == 0 {
// TODO(beorn7): Only histogram values, still need to add support.
return enh . Out
}
prev := floats [ 0 ] . F
for _ , sample := range floats [ 1 : ] {
current := sample . F
if current != prev && ! ( math . IsNaN ( current ) && math . IsNaN ( prev ) ) {
changes ++
}
prev = current
}
return append ( enh . Out , Sample {
Point : Point { V : float64 ( changes ) } ,
} )
return append ( enh . Out , Sample { F : float64 ( changes ) } )
}
// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) Vector ===
@ -1087,7 +1152,8 @@ func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNod
enh . Out = append ( enh . Out , Sample {
Metric : outMetric ,
Point : Point { V : el . Point . V } ,
F : el . F ,
H : el . H ,
} )
}
return enh . Out
@ -1098,7 +1164,7 @@ func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe
return append ( enh . Out ,
Sample {
Metric : labels . Labels { } ,
Point : Point { V : vals [ 0 ] . ( Vector ) [ 0 ] . V } ,
F : vals [ 0 ] . ( Vector ) [ 0 ] . F ,
} )
}
@ -1154,7 +1220,8 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe
enh . Out = append ( enh . Out , Sample {
Metric : outMetric ,
Point : Point { V : el . Point . V } ,
F : el . F ,
H : el . H ,
} )
}
return enh . Out
@ -1166,15 +1233,15 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo
return append ( enh . Out ,
Sample {
Metric : labels . Labels { } ,
Point : Point { V : f ( time . Unix ( enh . Ts / 1000 , 0 ) . UTC ( ) ) } ,
F : f ( time . Unix ( enh . Ts / 1000 , 0 ) . UTC ( ) ) ,
} )
}
for _ , el := range vals [ 0 ] . ( Vector ) {
t := time . Unix ( int64 ( el . V ) , 0 ) . UTC ( )
t := time . Unix ( int64 ( el . F ) , 0 ) . UTC ( )
enh . Out = append ( enh . Out , Sample {
Metric : enh . DropMetricName ( el . Metric ) ,
Point : Point { V : f ( t ) } ,
F : f ( t ) ,
} )
}
return enh . Out
@ -1332,10 +1399,20 @@ func (s vectorByValueHeap) Len() int {
}
func ( s vectorByValueHeap ) Less ( i , j int ) bool {
if math . IsNaN ( s [ i ] . V ) {
// We compare histograms based on their sum of observations.
// TODO(beorn7): Is that what we want?
vi , vj := s [ i ] . F , s [ j ] . F
if s [ i ] . H != nil {
vi = s [ i ] . H . Sum
}
if s [ j ] . H != nil {
vj = s [ j ] . H . Sum
}
if math . IsNaN ( vi ) {
return true
}
return s [ i ] . V < s [ j ] . V
return vi < vj
}
func ( s vectorByValueHeap ) Swap ( i , j int ) {
@ -1361,10 +1438,20 @@ func (s vectorByReverseValueHeap) Len() int {
}
func ( s vectorByReverseValueHeap ) Less ( i , j int ) bool {
if math . IsNaN ( s [ i ] . V ) {
// We compare histograms based on their sum of observations.
// TODO(beorn7): Is that what we want?
vi , vj := s [ i ] . F , s [ j ] . F
if s [ i ] . H != nil {
vi = s [ i ] . H . Sum
}
if s [ j ] . H != nil {
vj = s [ j ] . H . Sum
}
if math . IsNaN ( vi ) {
return true
}
return s [ i ] . V > s [ j ] . V
return vi > vj
}
func ( s vectorByReverseValueHeap ) Swap ( i , j int ) {