From efc43d0714210884de4e556fcf206930704d2b42 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Fri, 18 Oct 2024 09:32:15 +0200 Subject: [PATCH] s/scrape_classic_histograms/always_scrape_classic_histograms (3.0 breaking change) (#15178) This is for readability, especially when we can converting to nhcb option. See discussion https://cloud-native.slack.com/archives/C077Z4V13AM/p1729155873397889 Signed-off-by: bwplotka --- config/config.go | 18 +++++++++--------- docs/configuration/configuration.md | 4 ++-- docs/feature_flags.md | 2 +- scrape/scrape.go | 16 ++++++++-------- scrape/scrape_test.go | 6 +++--- 5 files changed, 23 insertions(+), 23 deletions(-) diff --git a/config/config.go b/config/config.go index 3eb6898d5..a88b0d32f 100644 --- a/config/config.go +++ b/config/config.go @@ -163,13 +163,13 @@ var ( // DefaultScrapeConfig is the default scrape configuration. DefaultScrapeConfig = ScrapeConfig{ // ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals. - ScrapeClassicHistograms: false, - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, - HonorTimestamps: true, - HTTPClientConfig: config.DefaultHTTPClientConfig, - EnableCompression: true, + AlwaysScrapeClassicHistograms: false, + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, + EnableCompression: true, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -631,8 +631,8 @@ type ScrapeConfig struct { // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, // OpenMetricsText1.0.0, PrometheusText0.0.4. ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` - // Whether to scrape a classic histogram that is also exposed as a native histogram. - ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` + // Whether to scrape a classic histogram, even if it is also exposed as a native histogram. + AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"` // File to which scrape failures are logged. ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The HTTP resource path on which to fetch metrics from targets. diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 6e0cf431c..4a681c797 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -215,9 +215,9 @@ job_name: # OpenMetricsText1.0.0, PrometheusText0.0.4. [ scrape_protocols: [, ...] | default = ] -# Whether to scrape a classic histogram that is also exposed as a native +# Whether to scrape a classic histogram, even if it is also exposed as a native # histogram (has no effect without --enable-feature=native-histograms). -[ scrape_classic_histograms: | default = false ] +[ always_scrape_classic_histograms: | default = false ] # The HTTP resource path on which to fetch metrics from targets. [ metrics_path: | default = /metrics ] diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 65eb60eaf..0d6e23972 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -84,7 +84,7 @@ those classic histograms that do not come with a corresponding native histogram. However, if a native histogram is present, Prometheus will ignore the corresponding classic histogram, with the notable exception of exemplars, which are always ingested. To keep the classic histograms as well, enable -`scrape_classic_histograms` in the scrape job. +`always_scrape_classic_histograms` in the scrape job. _Note about the format of `le` and `quantile` label values:_ diff --git a/scrape/scrape.go b/scrape/scrape.go index d40e0be2e..4273f4cb6 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -112,7 +112,7 @@ type scrapeLoopOptions struct { trackTimestampsStaleness bool interval time.Duration timeout time.Duration - scrapeClassicHistograms bool + alwaysScrapeClassicHist bool validationScheme model.ValidationScheme mrc []*relabel.Config @@ -179,7 +179,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed opts.labelLimits, opts.interval, opts.timeout, - opts.scrapeClassicHistograms, + opts.alwaysScrapeClassicHist, options.EnableNativeHistogramsIngestion, options.EnableCreatedTimestampZeroIngestion, options.ExtraMetrics, @@ -480,7 +480,7 @@ func (sp *scrapePool) sync(targets []*Target) { enableCompression = sp.config.EnableCompression trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs - scrapeClassicHistograms = sp.config.ScrapeClassicHistograms + alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms ) validationScheme := model.UTF8Validation @@ -521,7 +521,7 @@ func (sp *scrapePool) sync(targets []*Target) { mrc: mrc, interval: interval, timeout: timeout, - scrapeClassicHistograms: scrapeClassicHistograms, + alwaysScrapeClassicHist: alwaysScrapeClassicHist, validationScheme: validationScheme, }) if err != nil { @@ -883,7 +883,7 @@ type scrapeLoop struct { labelLimits *labelLimits interval time.Duration timeout time.Duration - scrapeClassicHistograms bool + alwaysScrapeClassicHist bool validationScheme model.ValidationScheme // Feature flagged options. @@ -1183,7 +1183,7 @@ func newScrapeLoop(ctx context.Context, labelLimits *labelLimits, interval time.Duration, timeout time.Duration, - scrapeClassicHistograms bool, + alwaysScrapeClassicHist bool, enableNativeHistogramIngestion bool, enableCTZeroIngestion bool, reportExtraMetrics bool, @@ -1237,7 +1237,7 @@ func newScrapeLoop(ctx context.Context, labelLimits: labelLimits, interval: interval, timeout: timeout, - scrapeClassicHistograms: scrapeClassicHistograms, + alwaysScrapeClassicHist: alwaysScrapeClassicHist, enableNativeHistogramIngestion: enableNativeHistogramIngestion, enableCTZeroIngestion: enableCTZeroIngestion, reportExtraMetrics: reportExtraMetrics, @@ -1537,7 +1537,7 @@ type appendErrors struct { } func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { - p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.enableCTZeroIngestion, sl.symbolTable) + p, err := textparse.New(b, contentType, sl.alwaysScrapeClassicHist, sl.enableCTZeroIngestion, sl.symbolTable) if err != nil { sl.l.Debug( "Invalid content type on scrape, using prometheus parser as fallback.", diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index e0e094da5..f65d41a84 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1846,7 +1846,7 @@ func TestScrapeLoopAppendStalenessIfTrackTimestampStaleness(t *testing.T) { func TestScrapeLoopAppendExemplar(t *testing.T) { tests := []struct { title string - scrapeClassicHistograms bool + alwaysScrapeClassicHist bool enableNativeHistogramsIngestion bool scrapeText string contentType string @@ -2115,7 +2115,7 @@ metric: < > `, - scrapeClassicHistograms: true, + alwaysScrapeClassicHist: true, contentType: "application/vnd.google.protobuf", floats: []floatSample{ {metric: labels.FromStrings("__name__", "test_histogram_count"), t: 1234568, f: 175}, @@ -2177,7 +2177,7 @@ metric: < sl.reportSampleMutator = func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, discoveryLabels) } - sl.scrapeClassicHistograms = test.scrapeClassicHistograms + sl.alwaysScrapeClassicHist = test.alwaysScrapeClassicHist now := time.Now()