Browse Source

Rename convert_classic_histograms to convert_classic_histograms_to_nhcb

On reviewer request.

Signed-off-by: György Krajcsovits <gyorgy.krajcsovits@grafana.com>
pull/14978/head
György Krajcsovits 1 month ago
parent
commit
4283ae73dc
  1. 2
      config/config.go
  2. 2
      scrape/manager.go
  3. 19
      scrape/scrape.go
  4. 32
      scrape/scrape_test.go

2
config/config.go

@ -656,7 +656,7 @@ type ScrapeConfig struct {
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"`
// Whether to convert all scraped classic histograms into a native histogram with custom buckets.
ConvertClassicHistograms bool `yaml:"convert_classic_histograms,omitempty"`
ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
// File to which scrape failures are logged.
ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"`
// The HTTP resource path on which to fetch metrics from targets.

2
scrape/manager.go

@ -178,7 +178,7 @@ func (m *Manager) reload() {
m.logger.Error("error reloading target set", "err", "invalid config id:"+setName)
continue
}
if scrapeConfig.ConvertClassicHistograms && m.opts.EnableCreatedTimestampZeroIngestion {
if scrapeConfig.ConvertClassicHistogramsToNHCB && m.opts.EnableCreatedTimestampZeroIngestion {
// TODO(krajorama): fix https://github.com/prometheus/prometheus/issues/15137
m.logger.Error("error reloading target set", "err", "cannot convert classic histograms to native histograms with custom buckets and ingest created timestamp zero samples at the same time due to https://github.com/prometheus/prometheus/issues/15137")
continue

19
scrape/scrape.go

@ -113,7 +113,7 @@ type scrapeLoopOptions struct {
interval time.Duration
timeout time.Duration
alwaysScrapeClassicHist bool
convertClassicHistograms bool
convertClassicHistToNHCB bool
validationScheme model.ValidationScheme
fallbackScrapeProtocol string
@ -182,7 +182,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
opts.interval,
opts.timeout,
opts.alwaysScrapeClassicHist,
opts.convertClassicHistograms,
opts.convertClassicHistToNHCB,
options.EnableNativeHistogramsIngestion,
options.EnableCreatedTimestampZeroIngestion,
options.ExtraMetrics,
@ -488,7 +488,7 @@ func (sp *scrapePool) sync(targets []*Target) {
mrc = sp.config.MetricRelabelConfigs
fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType()
alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms
convertClassicHistograms = sp.config.ConvertClassicHistograms
convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCB
)
validationScheme := model.UTF8Validation
@ -530,7 +530,7 @@ func (sp *scrapePool) sync(targets []*Target) {
interval: interval,
timeout: timeout,
alwaysScrapeClassicHist: alwaysScrapeClassicHist,
convertClassicHistograms: convertClassicHistograms,
convertClassicHistToNHCB: convertClassicHistToNHCB,
validationScheme: validationScheme,
fallbackScrapeProtocol: fallbackScrapeProtocol,
})
@ -894,7 +894,7 @@ type scrapeLoop struct {
interval time.Duration
timeout time.Duration
alwaysScrapeClassicHist bool
convertClassicHistograms bool
convertClassicHistToNHCB bool
validationScheme model.ValidationScheme
fallbackScrapeProtocol string
@ -1196,7 +1196,7 @@ func newScrapeLoop(ctx context.Context,
interval time.Duration,
timeout time.Duration,
alwaysScrapeClassicHist bool,
convertClassicHistograms bool,
convertClassicHistToNHCB bool,
enableNativeHistogramIngestion bool,
enableCTZeroIngestion bool,
reportExtraMetrics bool,
@ -1252,7 +1252,7 @@ func newScrapeLoop(ctx context.Context,
interval: interval,
timeout: timeout,
alwaysScrapeClassicHist: alwaysScrapeClassicHist,
convertClassicHistograms: convertClassicHistograms,
convertClassicHistToNHCB: convertClassicHistToNHCB,
enableNativeHistogramIngestion: enableNativeHistogramIngestion,
enableCTZeroIngestion: enableCTZeroIngestion,
reportExtraMetrics: reportExtraMetrics,
@ -1563,7 +1563,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
)
return
}
if sl.convertClassicHistograms {
if sl.convertClassicHistToNHCB {
p = textparse.NewNHCBParser(p, sl.symbolTable, sl.alwaysScrapeClassicHist)
}
if err != nil {
@ -1751,6 +1751,9 @@ loop:
} else {
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
}
if err != nil {
fmt.Printf("Error when appending histogram in scrape loop: %s\n", err)
}
} else {
ref, err = app.Append(ref, lset, t, val)
}

32
scrape/scrape_test.go

@ -3478,7 +3478,7 @@ test_summary_count 199
}
// Testing whether we can automatically convert scraped classic histograms into native histograms with custom buckets.
func TestConvertClassicHistograms(t *testing.T) {
func TestConvertClassicHistogramsToNHCB(t *testing.T) {
genTestCounterText := func(name string, value int, withMetadata bool) string {
if withMetadata {
return fmt.Sprintf(`
@ -3839,23 +3839,23 @@ metric: <
for metricsTextName, metricsText := range metricsTexts {
for name, tc := range map[string]struct {
alwaysScrapeClassicHistograms bool
convertClassicHistograms bool
convertClassicHistToNHCB bool
}{
"convert with scrape": {
alwaysScrapeClassicHistograms: true,
convertClassicHistograms: true,
convertClassicHistToNHCB: true,
},
"convert without scrape": {
alwaysScrapeClassicHistograms: false,
convertClassicHistograms: true,
convertClassicHistToNHCB: true,
},
"scrape without convert": {
alwaysScrapeClassicHistograms: true,
convertClassicHistograms: false,
convertClassicHistToNHCB: false,
},
"neither scrape nor convert": {
alwaysScrapeClassicHistograms: false,
convertClassicHistograms: false,
convertClassicHistToNHCB: false,
},
} {
var expectedClassicHistCount, expectedNativeHistCount int
@ -3869,15 +3869,15 @@ metric: <
}
} else if metricsText.hasClassic {
switch {
case tc.alwaysScrapeClassicHistograms && tc.convertClassicHistograms:
case tc.alwaysScrapeClassicHistograms && tc.convertClassicHistToNHCB:
expectedClassicHistCount = 1
expectedNativeHistCount = 1
expectCustomBuckets = true
case !tc.alwaysScrapeClassicHistograms && tc.convertClassicHistograms:
case !tc.alwaysScrapeClassicHistograms && tc.convertClassicHistToNHCB:
expectedClassicHistCount = 0
expectedNativeHistCount = 1
expectCustomBuckets = true
case !tc.convertClassicHistograms:
case !tc.convertClassicHistToNHCB:
expectedClassicHistCount = 1
expectedNativeHistCount = 0
}
@ -3888,13 +3888,13 @@ metric: <
defer simpleStorage.Close()
config := &config.ScrapeConfig{
JobName: "test",
SampleLimit: 100,
Scheme: "http",
ScrapeInterval: model.Duration(100 * time.Millisecond),
ScrapeTimeout: model.Duration(100 * time.Millisecond),
AlwaysScrapeClassicHistograms: tc.alwaysScrapeClassicHistograms,
ConvertClassicHistograms: tc.convertClassicHistograms,
JobName: "test",
SampleLimit: 100,
Scheme: "http",
ScrapeInterval: model.Duration(100 * time.Millisecond),
ScrapeTimeout: model.Duration(100 * time.Millisecond),
AlwaysScrapeClassicHistograms: tc.alwaysScrapeClassicHistograms,
ConvertClassicHistogramsToNHCB: tc.convertClassicHistToNHCB,
}
scrapeCount := 0

Loading…
Cancel
Save