mirror of https://github.com/prometheus/prometheus
expand tests and support conversion to nhcb in the middle of scrape
Signed-off-by: Jeanette Tan <jeanette.tan@grafana.com>pull/14978/head
parent
41c7f7d352
commit
cd498964e6
|
@ -45,14 +45,30 @@ type NhcbParser struct {
|
|||
lset labels.Labels
|
||||
metricString string
|
||||
|
||||
// Caches the entry itself if we are inserting a converted NHCB
|
||||
// halfway through.
|
||||
entry Entry
|
||||
justInsertedNhcb bool
|
||||
// Caches the values and metric for the inserted converted NHCB.
|
||||
bytesNhcb []byte
|
||||
hNhcb *histogram.Histogram
|
||||
fhNhcb *histogram.FloatHistogram
|
||||
lsetNhcb labels.Labels
|
||||
metricStringNhcb string
|
||||
|
||||
// Collates values from the classic histogram series to build
|
||||
// the converted histogram later.
|
||||
lsetNhcb labels.Labels
|
||||
tempNhcb convertnhcb.TempHistogram
|
||||
tempLsetNhcb labels.Labels
|
||||
tempNhcb convertnhcb.TempHistogram
|
||||
isCollationInProgress bool
|
||||
|
||||
// Remembers the last native histogram name so we can ignore
|
||||
// conversions to NHCB when the name is the same.
|
||||
lastNativeHistName string
|
||||
// Remembers the last base histogram metric name (assuming it's
|
||||
// a classic histogram) so we can tell if the next float series
|
||||
// is part of the same classic histogram.
|
||||
lastBaseHistName string
|
||||
}
|
||||
|
||||
func NewNhcbParser(p Parser, keepClassicHistograms bool) Parser {
|
||||
|
@ -68,6 +84,9 @@ func (p *NhcbParser) Series() ([]byte, *int64, float64) {
|
|||
}
|
||||
|
||||
func (p *NhcbParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) {
|
||||
if p.justInsertedNhcb {
|
||||
return p.bytesNhcb, p.ts, p.hNhcb, p.fhNhcb
|
||||
}
|
||||
return p.bytes, p.ts, p.h, p.fh
|
||||
}
|
||||
|
||||
|
@ -88,6 +107,10 @@ func (p *NhcbParser) Comment() []byte {
|
|||
}
|
||||
|
||||
func (p *NhcbParser) Metric(l *labels.Labels) string {
|
||||
if p.justInsertedNhcb {
|
||||
*l = p.lsetNhcb
|
||||
return p.metricStringNhcb
|
||||
}
|
||||
*l = p.lset
|
||||
return p.metricString
|
||||
}
|
||||
|
@ -101,9 +124,19 @@ func (p *NhcbParser) CreatedTimestamp() *int64 {
|
|||
}
|
||||
|
||||
func (p *NhcbParser) Next() (Entry, error) {
|
||||
if p.justInsertedNhcb {
|
||||
p.justInsertedNhcb = false
|
||||
if p.entry == EntrySeries {
|
||||
if isNhcb := p.handleClassicHistogramSeries(p.lset); isNhcb && !p.keepClassicHistograms {
|
||||
return p.Next()
|
||||
}
|
||||
}
|
||||
return p.entry, nil
|
||||
}
|
||||
et, err := p.parser.Next()
|
||||
if errors.Is(err, io.EOF) {
|
||||
if p.processNhcb(p.tempNhcb) {
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) && p.processNhcb() {
|
||||
p.entry = et
|
||||
return EntryHistogram, nil
|
||||
}
|
||||
return EntryInvalid, err
|
||||
|
@ -112,6 +145,16 @@ func (p *NhcbParser) Next() (Entry, error) {
|
|||
case EntrySeries:
|
||||
p.bytes, p.ts, p.value = p.parser.Series()
|
||||
p.metricString = p.parser.Metric(&p.lset)
|
||||
histBaseName := convertnhcb.GetHistogramMetricBaseName(p.lset)
|
||||
if histBaseName == p.lastNativeHistName {
|
||||
break
|
||||
}
|
||||
shouldInsertNhcb := p.lastBaseHistName != "" && p.lastBaseHistName != histBaseName
|
||||
p.lastBaseHistName = histBaseName
|
||||
if shouldInsertNhcb && p.processNhcb() {
|
||||
p.entry = et
|
||||
return EntryHistogram, nil
|
||||
}
|
||||
if isNhcb := p.handleClassicHistogramSeries(p.lset); isNhcb && !p.keepClassicHistograms {
|
||||
return p.Next()
|
||||
}
|
||||
|
@ -119,6 +162,15 @@ func (p *NhcbParser) Next() (Entry, error) {
|
|||
p.bytes, p.ts, p.h, p.fh = p.parser.Histogram()
|
||||
p.metricString = p.parser.Metric(&p.lset)
|
||||
p.lastNativeHistName = p.lset.Get(labels.MetricName)
|
||||
if p.processNhcb() {
|
||||
p.entry = et
|
||||
return EntryHistogram, nil
|
||||
}
|
||||
default:
|
||||
if p.processNhcb() {
|
||||
p.entry = et
|
||||
return EntryHistogram, nil
|
||||
}
|
||||
}
|
||||
return et, err
|
||||
}
|
||||
|
@ -129,9 +181,6 @@ func (p *NhcbParser) Next() (Entry, error) {
|
|||
// right before the classic histograms) and returns true if the collation was done.
|
||||
func (p *NhcbParser) handleClassicHistogramSeries(lset labels.Labels) bool {
|
||||
mName := lset.Get(labels.MetricName)
|
||||
if convertnhcb.GetHistogramMetricBaseName(mName) == p.lastNativeHistName {
|
||||
return false
|
||||
}
|
||||
switch {
|
||||
case strings.HasSuffix(mName, "_bucket") && lset.Has(labels.BucketLabel):
|
||||
le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64)
|
||||
|
@ -156,40 +205,43 @@ func (p *NhcbParser) handleClassicHistogramSeries(lset labels.Labels) bool {
|
|||
}
|
||||
|
||||
func (p *NhcbParser) processClassicHistogramSeries(lset labels.Labels, suffix string, updateHist func(*convertnhcb.TempHistogram)) {
|
||||
p.lsetNhcb = convertnhcb.GetHistogramMetricBase(lset, suffix)
|
||||
p.isCollationInProgress = true
|
||||
p.tempLsetNhcb = convertnhcb.GetHistogramMetricBase(lset, suffix)
|
||||
updateHist(&p.tempNhcb)
|
||||
}
|
||||
|
||||
// processNhcb converts the collated classic histogram series to NHCB and caches the info
|
||||
// to be returned to callers.
|
||||
func (p *NhcbParser) processNhcb(th convertnhcb.TempHistogram) bool {
|
||||
if len(th.BucketCounts) == 0 {
|
||||
func (p *NhcbParser) processNhcb() bool {
|
||||
if !p.isCollationInProgress {
|
||||
return false
|
||||
}
|
||||
ub := make([]float64, 0, len(th.BucketCounts))
|
||||
for b := range th.BucketCounts {
|
||||
ub := make([]float64, 0, len(p.tempNhcb.BucketCounts))
|
||||
for b := range p.tempNhcb.BucketCounts {
|
||||
ub = append(ub, b)
|
||||
}
|
||||
upperBounds, hBase := convertnhcb.ProcessUpperBoundsAndCreateBaseHistogram(ub, false)
|
||||
fhBase := hBase.ToFloat(nil)
|
||||
h, fh := convertnhcb.ConvertHistogramWrapper(th, upperBounds, hBase, fhBase)
|
||||
h, fh := convertnhcb.ConvertHistogramWrapper(p.tempNhcb, upperBounds, hBase, fhBase)
|
||||
if h != nil {
|
||||
if err := h.Validate(); err != nil {
|
||||
return false
|
||||
}
|
||||
p.h = h
|
||||
p.fh = nil
|
||||
p.hNhcb = h
|
||||
p.fhNhcb = nil
|
||||
} else if fh != nil {
|
||||
if err := fh.Validate(); err != nil {
|
||||
return false
|
||||
}
|
||||
p.h = nil
|
||||
p.fh = fh
|
||||
p.hNhcb = nil
|
||||
p.fhNhcb = fh
|
||||
}
|
||||
buf := make([]byte, 0, 1024)
|
||||
p.bytes = p.lsetNhcb.Bytes(buf)
|
||||
p.lset = p.lsetNhcb
|
||||
p.metricString = p.lsetNhcb.String()
|
||||
p.bytesNhcb = p.tempLsetNhcb.Bytes(buf)
|
||||
p.lsetNhcb = p.tempLsetNhcb
|
||||
p.metricStringNhcb = p.tempLsetNhcb.String()
|
||||
p.tempNhcb = convertnhcb.NewTempHistogram()
|
||||
p.isCollationInProgress = false
|
||||
p.justInsertedNhcb = true
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -3371,80 +3371,118 @@ test_summary_count 199
|
|||
|
||||
// Testing whether we can automatically convert scraped classic histograms into native histograms with custom buckets.
|
||||
func TestConvertClassicHistograms(t *testing.T) {
|
||||
simpleStorage := teststorage.New(t)
|
||||
defer simpleStorage.Close()
|
||||
|
||||
config := &config.ScrapeConfig{
|
||||
JobName: "test",
|
||||
SampleLimit: 100,
|
||||
Scheme: "http",
|
||||
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
||||
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
||||
ScrapeClassicHistograms: true,
|
||||
ConvertClassicHistograms: true,
|
||||
metricsTexts := map[string]string{
|
||||
"normal": `
|
||||
# HELP test_metric_1 some help text
|
||||
# TYPE test_metric_1 counter
|
||||
test_metric_1 1
|
||||
# HELP test_histogram_1 This is a histogram with default buckets
|
||||
# TYPE test_histogram_1 histogram
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.005"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.01"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.025"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.05"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.1"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.25"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.5"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="1"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="2.5"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="5"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="10"} 1
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="+Inf"} 1
|
||||
test_histogram_1_sum{address="0.0.0.0",port="5001"} 10
|
||||
test_histogram_1_count{address="0.0.0.0",port="5001"} 1
|
||||
# HELP test_metric_2 some help text
|
||||
# TYPE test_metric_2 counter
|
||||
test_metric_2 1
|
||||
# HELP test_histogram_2 This is a histogram with default buckets
|
||||
# TYPE test_histogram_2 histogram
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.005"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.01"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.025"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.05"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.1"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.25"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.5"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="1"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="2.5"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="5"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="10"} 1
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="+Inf"} 1
|
||||
test_histogram_2_sum{address="0.0.0.0",port="5001"} 10
|
||||
test_histogram_2_count{address="0.0.0.0",port="5001"} 1
|
||||
# HELP test_metric_3 some help text
|
||||
# TYPE test_metric_3 counter
|
||||
test_metric_3 1
|
||||
# HELP test_histogram_3 This is a histogram with default buckets
|
||||
# TYPE test_histogram_3 histogram
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.005"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.01"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.025"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.05"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.1"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.25"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.5"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="1"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="2.5"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="5"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="10"} 1
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="+Inf"} 1
|
||||
test_histogram_3_sum{address="0.0.0.0",port="5001"} 10
|
||||
test_histogram_3_count{address="0.0.0.0",port="5001"} 1
|
||||
`,
|
||||
"no metadata and different order": `
|
||||
test_metric_1 1
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.005"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.01"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.025"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.05"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.1"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.25"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="0.5"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="1"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="2.5"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="5"} 0
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="10"} 1
|
||||
test_histogram_1_bucket{address="0.0.0.0",port="5001",le="+Inf"} 1
|
||||
test_histogram_1_sum{address="0.0.0.0",port="5001"} 10
|
||||
test_histogram_1_count{address="0.0.0.0",port="5001"} 1
|
||||
test_metric_2 1
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.005"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.01"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.025"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.05"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.1"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.25"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="0.5"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="1"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="2.5"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="5"} 0
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="10"} 1
|
||||
test_histogram_2_bucket{address="0.0.0.0",port="5001",le="+Inf"} 1
|
||||
test_histogram_2_sum{address="0.0.0.0",port="5001"} 10
|
||||
test_histogram_2_count{address="0.0.0.0",port="5001"} 1
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.005"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.01"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.025"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.05"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.1"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.25"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="0.5"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="1"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="2.5"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="5"} 0
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="10"} 1
|
||||
test_histogram_3_bucket{address="0.0.0.0",port="5001",le="+Inf"} 1
|
||||
test_histogram_3_sum{address="0.0.0.0",port="5001"} 10
|
||||
test_histogram_3_count{address="0.0.0.0",port="5001"} 1
|
||||
test_metric_3 1
|
||||
`,
|
||||
}
|
||||
|
||||
metricsText := `
|
||||
# HELP test_metric some help text
|
||||
# TYPE test_metric counter
|
||||
test_metric 1
|
||||
# HELP test_histogram This is a histogram with default buckets
|
||||
# TYPE test_histogram histogram
|
||||
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.005"} 0
|
||||
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.01"} 0
|
||||
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.025"} 0
|
||||
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.05"} 0
|
||||
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.1"} 0
|
||||
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.25"} 0
|
||||
test_histogram_bucket{address="0.0.0.0",port="5001",le="0.5"} 0
|
||||
test_histogram_bucket{address="0.0.0.0",port="5001",le="1"} 0
|
||||
test_histogram_bucket{address="0.0.0.0",port="5001",le="2.5"} 0
|
||||
test_histogram_bucket{address="0.0.0.0",port="5001",le="5"} 0
|
||||
test_histogram_bucket{address="0.0.0.0",port="5001",le="10"} 1
|
||||
test_histogram_bucket{address="0.0.0.0",port="5001",le="+Inf"} 1
|
||||
test_histogram_sum{address="0.0.0.0",port="5001"} 10
|
||||
test_histogram_count{address="0.0.0.0",port="5001"} 1
|
||||
`
|
||||
|
||||
// The expected "le" values do not have the trailing ".0".
|
||||
expectedLeValues := []string{"0.005", "0.01", "0.025", "0.05", "0.1", "0.25", "0.5", "1", "2.5", "5", "10", "+Inf"}
|
||||
|
||||
scrapeCount := 0
|
||||
scraped := make(chan bool)
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprint(w, metricsText)
|
||||
scrapeCount++
|
||||
if scrapeCount > 2 {
|
||||
close(scraped)
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
require.NoError(t, err)
|
||||
defer sp.stop()
|
||||
|
||||
testURL, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
sp.Sync([]*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}},
|
||||
},
|
||||
})
|
||||
require.Len(t, sp.ActiveTargets(), 1)
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("target was not scraped")
|
||||
case <-scraped:
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
q, err := simpleStorage.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
|
||||
require.NoError(t, err)
|
||||
defer q.Close()
|
||||
expectedLeValuesCorrect := []string{"0.005", "0.01", "0.025", "0.05", "0.1", "0.25", "0.5", "1", "2.5", "5", "10", "+Inf"}
|
||||
expectedLeValuesNone := []string{}
|
||||
|
||||
checkValues := func(labelName string, expectedValues []string, series storage.SeriesSet) {
|
||||
foundLeValues := map[string]bool{}
|
||||
|
@ -3463,7 +3501,7 @@ test_histogram_count{address="0.0.0.0",port="5001"} 1
|
|||
}
|
||||
|
||||
// Checks that the expected series is present and runs a basic sanity check of the values.
|
||||
checkSeries := func(series storage.SeriesSet, encType chunkenc.ValueType) {
|
||||
checkSeries := func(series storage.SeriesSet, encType chunkenc.ValueType, expectedCount int) {
|
||||
count := 0
|
||||
for series.Next() {
|
||||
i := series.At().Iterator(nil)
|
||||
|
@ -3482,17 +3520,121 @@ test_histogram_count{address="0.0.0.0",port="5001"} 1
|
|||
}
|
||||
count++
|
||||
}
|
||||
require.Equal(t, 1, count, "number of series not as expected")
|
||||
require.Equal(t, expectedCount, count, "number of series not as expected")
|
||||
}
|
||||
|
||||
series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_histogram_bucket"))
|
||||
checkValues("le", expectedLeValues, series)
|
||||
for metricsTextName, metricsText := range metricsTexts {
|
||||
for name, tc := range map[string]struct {
|
||||
scrapeClassicHistograms bool
|
||||
convertClassicHistograms bool
|
||||
expectedLeValues []string
|
||||
expectedNhcbCount int
|
||||
}{
|
||||
"convert with scrape": {
|
||||
scrapeClassicHistograms: true,
|
||||
convertClassicHistograms: true,
|
||||
expectedLeValues: expectedLeValuesCorrect,
|
||||
expectedNhcbCount: 1,
|
||||
},
|
||||
"convert without scrape": {
|
||||
scrapeClassicHistograms: false,
|
||||
convertClassicHistograms: true,
|
||||
expectedLeValues: expectedLeValuesNone,
|
||||
expectedNhcbCount: 1,
|
||||
},
|
||||
"scrape without convert": {
|
||||
scrapeClassicHistograms: true,
|
||||
convertClassicHistograms: false,
|
||||
expectedLeValues: expectedLeValuesCorrect,
|
||||
expectedNhcbCount: 0,
|
||||
},
|
||||
"neither scrape nor convert": {
|
||||
scrapeClassicHistograms: false,
|
||||
convertClassicHistograms: false,
|
||||
expectedLeValues: expectedLeValuesCorrect, // since these are sent without native histograms
|
||||
expectedNhcbCount: 0,
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("%s with %s", name, metricsTextName), func(t *testing.T) {
|
||||
simpleStorage := teststorage.New(t)
|
||||
defer simpleStorage.Close()
|
||||
|
||||
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_histogram"))
|
||||
checkSeries(series, chunkenc.ValHistogram)
|
||||
config := &config.ScrapeConfig{
|
||||
JobName: "test",
|
||||
SampleLimit: 100,
|
||||
Scheme: "http",
|
||||
ScrapeInterval: model.Duration(100 * time.Millisecond),
|
||||
ScrapeTimeout: model.Duration(100 * time.Millisecond),
|
||||
ScrapeClassicHistograms: tc.scrapeClassicHistograms,
|
||||
ConvertClassicHistograms: tc.convertClassicHistograms,
|
||||
}
|
||||
|
||||
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_metric"))
|
||||
checkSeries(series, chunkenc.ValFloat)
|
||||
scrapeCount := 0
|
||||
scraped := make(chan bool)
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprint(w, metricsText)
|
||||
scrapeCount++
|
||||
if scrapeCount > 2 {
|
||||
close(scraped)
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
sp, err := newScrapePool(config, simpleStorage, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
|
||||
require.NoError(t, err)
|
||||
defer sp.stop()
|
||||
|
||||
testURL, err := url.Parse(ts.URL)
|
||||
require.NoError(t, err)
|
||||
sp.Sync([]*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(testURL.Host)}},
|
||||
},
|
||||
})
|
||||
require.Len(t, sp.ActiveTargets(), 1)
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("target was not scraped")
|
||||
case <-scraped:
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
q, err := simpleStorage.Querier(time.Time{}.UnixNano(), time.Now().UnixNano())
|
||||
require.NoError(t, err)
|
||||
defer q.Close()
|
||||
|
||||
series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_metric_1"))
|
||||
checkSeries(series, chunkenc.ValFloat, 1)
|
||||
|
||||
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_histogram_1_bucket"))
|
||||
checkValues("le", tc.expectedLeValues, series)
|
||||
|
||||
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_histogram_1"))
|
||||
checkSeries(series, chunkenc.ValHistogram, tc.expectedNhcbCount)
|
||||
|
||||
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_metric_2"))
|
||||
checkSeries(series, chunkenc.ValFloat, 1)
|
||||
|
||||
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_histogram_2_bucket"))
|
||||
checkValues("le", tc.expectedLeValues, series)
|
||||
|
||||
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_histogram_2"))
|
||||
checkSeries(series, chunkenc.ValHistogram, tc.expectedNhcbCount)
|
||||
|
||||
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_metric_3"))
|
||||
checkSeries(series, chunkenc.ValFloat, 1)
|
||||
|
||||
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_histogram_3_bucket"))
|
||||
checkValues("le", tc.expectedLeValues, series)
|
||||
|
||||
series = q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "test_histogram_3"))
|
||||
checkSeries(series, chunkenc.ValHistogram, tc.expectedNhcbCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t *testing.T) {
|
||||
|
|
|
@ -164,7 +164,8 @@ func GetHistogramMetricBase(m labels.Labels, suffix string) labels.Labels {
|
|||
Labels()
|
||||
}
|
||||
|
||||
func GetHistogramMetricBaseName(s string) string {
|
||||
func GetHistogramMetricBaseName(m labels.Labels) string {
|
||||
s := m.Get(labels.MetricName)
|
||||
for _, rep := range histogramNameSuffixReplacements {
|
||||
s = rep.pattern.ReplaceAllString(s, rep.repl)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue