diff --git a/config/config.go b/config/config.go index 962a0f4a7..657c4fc75 100644 --- a/config/config.go +++ b/config/config.go @@ -656,7 +656,7 @@ type ScrapeConfig struct { // Whether to scrape a classic histogram, even if it is also exposed as a native histogram. AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"` // Whether to convert all scraped classic histograms into a native histogram with custom buckets. - ConvertClassicHistograms bool `yaml:"convert_classic_histograms,omitempty"` + ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` // File to which scrape failures are logged. ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The HTTP resource path on which to fetch metrics from targets. diff --git a/scrape/manager.go b/scrape/manager.go index 9791db0e8..f3dad2a04 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -178,7 +178,7 @@ func (m *Manager) reload() { m.logger.Error("error reloading target set", "err", "invalid config id:"+setName) continue } - if scrapeConfig.ConvertClassicHistograms && m.opts.EnableCreatedTimestampZeroIngestion { + if scrapeConfig.ConvertClassicHistogramsToNHCB && m.opts.EnableCreatedTimestampZeroIngestion { // TODO(krajorama): fix https://github.com/prometheus/prometheus/issues/15137 m.logger.Error("error reloading target set", "err", "cannot convert classic histograms to native histograms with custom buckets and ingest created timestamp zero samples at the same time due to https://github.com/prometheus/prometheus/issues/15137") continue diff --git a/scrape/scrape.go b/scrape/scrape.go index 290855b3a..c252d57f6 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -113,7 +113,7 @@ type scrapeLoopOptions struct { interval time.Duration timeout time.Duration alwaysScrapeClassicHist bool - convertClassicHistograms bool + convertClassicHistToNHCB bool validationScheme model.ValidationScheme fallbackScrapeProtocol string @@ -182,7 +182,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed opts.interval, opts.timeout, opts.alwaysScrapeClassicHist, - opts.convertClassicHistograms, + opts.convertClassicHistToNHCB, options.EnableNativeHistogramsIngestion, options.EnableCreatedTimestampZeroIngestion, options.ExtraMetrics, @@ -488,7 +488,7 @@ func (sp *scrapePool) sync(targets []*Target) { mrc = sp.config.MetricRelabelConfigs fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms - convertClassicHistograms = sp.config.ConvertClassicHistograms + convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCB ) validationScheme := model.UTF8Validation @@ -530,7 +530,7 @@ func (sp *scrapePool) sync(targets []*Target) { interval: interval, timeout: timeout, alwaysScrapeClassicHist: alwaysScrapeClassicHist, - convertClassicHistograms: convertClassicHistograms, + convertClassicHistToNHCB: convertClassicHistToNHCB, validationScheme: validationScheme, fallbackScrapeProtocol: fallbackScrapeProtocol, }) @@ -894,7 +894,7 @@ type scrapeLoop struct { interval time.Duration timeout time.Duration alwaysScrapeClassicHist bool - convertClassicHistograms bool + convertClassicHistToNHCB bool validationScheme model.ValidationScheme fallbackScrapeProtocol string @@ -1196,7 +1196,7 @@ func newScrapeLoop(ctx context.Context, interval time.Duration, timeout time.Duration, alwaysScrapeClassicHist bool, - convertClassicHistograms bool, + convertClassicHistToNHCB bool, enableNativeHistogramIngestion bool, enableCTZeroIngestion bool, reportExtraMetrics bool, @@ -1252,7 +1252,7 @@ func newScrapeLoop(ctx context.Context, interval: interval, timeout: timeout, alwaysScrapeClassicHist: alwaysScrapeClassicHist, - convertClassicHistograms: convertClassicHistograms, + convertClassicHistToNHCB: convertClassicHistToNHCB, enableNativeHistogramIngestion: enableNativeHistogramIngestion, enableCTZeroIngestion: enableCTZeroIngestion, reportExtraMetrics: reportExtraMetrics, @@ -1563,7 +1563,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ) return } - if sl.convertClassicHistograms { + if sl.convertClassicHistToNHCB { p = textparse.NewNHCBParser(p, sl.symbolTable, sl.alwaysScrapeClassicHist) } if err != nil { @@ -1751,6 +1751,9 @@ loop: } else { ref, err = app.AppendHistogram(ref, lset, t, nil, fh) } + if err != nil { + fmt.Printf("Error when appending histogram in scrape loop: %s\n", err) + } } else { ref, err = app.Append(ref, lset, t, val) } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 6187119bf..9a70d7411 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -3478,7 +3478,7 @@ test_summary_count 199 } // Testing whether we can automatically convert scraped classic histograms into native histograms with custom buckets. -func TestConvertClassicHistograms(t *testing.T) { +func TestConvertClassicHistogramsToNHCB(t *testing.T) { genTestCounterText := func(name string, value int, withMetadata bool) string { if withMetadata { return fmt.Sprintf(` @@ -3839,23 +3839,23 @@ metric: < for metricsTextName, metricsText := range metricsTexts { for name, tc := range map[string]struct { alwaysScrapeClassicHistograms bool - convertClassicHistograms bool + convertClassicHistToNHCB bool }{ "convert with scrape": { alwaysScrapeClassicHistograms: true, - convertClassicHistograms: true, + convertClassicHistToNHCB: true, }, "convert without scrape": { alwaysScrapeClassicHistograms: false, - convertClassicHistograms: true, + convertClassicHistToNHCB: true, }, "scrape without convert": { alwaysScrapeClassicHistograms: true, - convertClassicHistograms: false, + convertClassicHistToNHCB: false, }, "neither scrape nor convert": { alwaysScrapeClassicHistograms: false, - convertClassicHistograms: false, + convertClassicHistToNHCB: false, }, } { var expectedClassicHistCount, expectedNativeHistCount int @@ -3869,15 +3869,15 @@ metric: < } } else if metricsText.hasClassic { switch { - case tc.alwaysScrapeClassicHistograms && tc.convertClassicHistograms: + case tc.alwaysScrapeClassicHistograms && tc.convertClassicHistToNHCB: expectedClassicHistCount = 1 expectedNativeHistCount = 1 expectCustomBuckets = true - case !tc.alwaysScrapeClassicHistograms && tc.convertClassicHistograms: + case !tc.alwaysScrapeClassicHistograms && tc.convertClassicHistToNHCB: expectedClassicHistCount = 0 expectedNativeHistCount = 1 expectCustomBuckets = true - case !tc.convertClassicHistograms: + case !tc.convertClassicHistToNHCB: expectedClassicHistCount = 1 expectedNativeHistCount = 0 } @@ -3888,13 +3888,13 @@ metric: < defer simpleStorage.Close() config := &config.ScrapeConfig{ - JobName: "test", - SampleLimit: 100, - Scheme: "http", - ScrapeInterval: model.Duration(100 * time.Millisecond), - ScrapeTimeout: model.Duration(100 * time.Millisecond), - AlwaysScrapeClassicHistograms: tc.alwaysScrapeClassicHistograms, - ConvertClassicHistograms: tc.convertClassicHistograms, + JobName: "test", + SampleLimit: 100, + Scheme: "http", + ScrapeInterval: model.Duration(100 * time.Millisecond), + ScrapeTimeout: model.Duration(100 * time.Millisecond), + AlwaysScrapeClassicHistograms: tc.alwaysScrapeClassicHistograms, + ConvertClassicHistogramsToNHCB: tc.convertClassicHistToNHCB, } scrapeCount := 0