|
|
|
@ -134,6 +134,12 @@ var (
|
|
|
|
|
}, |
|
|
|
|
[]string{"scrape_job"}, |
|
|
|
|
) |
|
|
|
|
targetScrapeExceededBodySizeLimit = prometheus.NewCounter( |
|
|
|
|
prometheus.CounterOpts{ |
|
|
|
|
Name: "prometheus_target_scrapes_exceeded_body_size_limit_total", |
|
|
|
|
Help: "Total number of scrapes that hit the body size limit", |
|
|
|
|
}, |
|
|
|
|
) |
|
|
|
|
targetScrapeSampleLimit = prometheus.NewCounter( |
|
|
|
|
prometheus.CounterOpts{ |
|
|
|
|
Name: "prometheus_target_scrapes_exceeded_sample_limit_total", |
|
|
|
@ -195,6 +201,7 @@ func init() {
|
|
|
|
|
targetScrapePoolReloadsFailed, |
|
|
|
|
targetSyncIntervalLength, |
|
|
|
|
targetScrapePoolSyncsCounter, |
|
|
|
|
targetScrapeExceededBodySizeLimit, |
|
|
|
|
targetScrapeSampleLimit, |
|
|
|
|
targetScrapeSampleDuplicate, |
|
|
|
|
targetScrapeSampleOutOfOrder, |
|
|
|
@ -381,11 +388,12 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|
|
|
|
targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) |
|
|
|
|
|
|
|
|
|
var ( |
|
|
|
|
wg sync.WaitGroup |
|
|
|
|
interval = time.Duration(sp.config.ScrapeInterval) |
|
|
|
|
timeout = time.Duration(sp.config.ScrapeTimeout) |
|
|
|
|
sampleLimit = int(sp.config.SampleLimit) |
|
|
|
|
labelLimits = &labelLimits{ |
|
|
|
|
wg sync.WaitGroup |
|
|
|
|
interval = time.Duration(sp.config.ScrapeInterval) |
|
|
|
|
timeout = time.Duration(sp.config.ScrapeTimeout) |
|
|
|
|
bodySizeLimit = int64(sp.config.BodySizeLimit) |
|
|
|
|
sampleLimit = int(sp.config.SampleLimit) |
|
|
|
|
labelLimits = &labelLimits{ |
|
|
|
|
labelLimit: int(sp.config.LabelLimit), |
|
|
|
|
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), |
|
|
|
|
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit), |
|
|
|
@ -408,7 +416,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|
|
|
|
} |
|
|
|
|
var ( |
|
|
|
|
t = sp.activeTargets[fp] |
|
|
|
|
s = &targetScraper{Target: t, client: sp.client, timeout: timeout} |
|
|
|
|
s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit} |
|
|
|
|
newLoop = sp.newLoop(scrapeLoopOptions{ |
|
|
|
|
target: t, |
|
|
|
|
scraper: s, |
|
|
|
@ -481,11 +489,12 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
|
|
|
|
|
// It returns after all stopped scrape loops terminated.
|
|
|
|
|
func (sp *scrapePool) sync(targets []*Target) { |
|
|
|
|
var ( |
|
|
|
|
uniqueLoops = make(map[uint64]loop) |
|
|
|
|
interval = time.Duration(sp.config.ScrapeInterval) |
|
|
|
|
timeout = time.Duration(sp.config.ScrapeTimeout) |
|
|
|
|
sampleLimit = int(sp.config.SampleLimit) |
|
|
|
|
labelLimits = &labelLimits{ |
|
|
|
|
uniqueLoops = make(map[uint64]loop) |
|
|
|
|
interval = time.Duration(sp.config.ScrapeInterval) |
|
|
|
|
timeout = time.Duration(sp.config.ScrapeTimeout) |
|
|
|
|
bodySizeLimit = int64(sp.config.BodySizeLimit) |
|
|
|
|
sampleLimit = int(sp.config.SampleLimit) |
|
|
|
|
labelLimits = &labelLimits{ |
|
|
|
|
labelLimit: int(sp.config.LabelLimit), |
|
|
|
|
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), |
|
|
|
|
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit), |
|
|
|
@ -500,7 +509,7 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|
|
|
|
hash := t.hash() |
|
|
|
|
|
|
|
|
|
if _, ok := sp.activeTargets[hash]; !ok { |
|
|
|
|
s := &targetScraper{Target: t, client: sp.client, timeout: timeout} |
|
|
|
|
s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit} |
|
|
|
|
l := sp.newLoop(scrapeLoopOptions{ |
|
|
|
|
target: t, |
|
|
|
|
scraper: s, |
|
|
|
@ -690,8 +699,12 @@ type targetScraper struct {
|
|
|
|
|
|
|
|
|
|
gzipr *gzip.Reader |
|
|
|
|
buf *bufio.Reader |
|
|
|
|
|
|
|
|
|
bodySizeLimit int64 |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
var errBodySizeLimit = errors.New("body size limit exceeded") |
|
|
|
|
|
|
|
|
|
const acceptHeader = `application/openmetrics-text; version=0.0.1,text/plain;version=0.0.4;q=0.5,*/*;q=0.1` |
|
|
|
|
|
|
|
|
|
var userAgentHeader = fmt.Sprintf("Prometheus/%s", version.Version) |
|
|
|
@ -723,11 +736,18 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
|
|
|
|
|
return "", errors.Errorf("server returned HTTP status %s", resp.Status) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if s.bodySizeLimit <= 0 { |
|
|
|
|
s.bodySizeLimit = math.MaxInt64 |
|
|
|
|
} |
|
|
|
|
if resp.Header.Get("Content-Encoding") != "gzip" { |
|
|
|
|
_, err = io.Copy(w, resp.Body) |
|
|
|
|
n, err := io.Copy(w, io.LimitReader(resp.Body, s.bodySizeLimit)) |
|
|
|
|
if err != nil { |
|
|
|
|
return "", err |
|
|
|
|
} |
|
|
|
|
if n >= s.bodySizeLimit { |
|
|
|
|
targetScrapeExceededBodySizeLimit.Inc() |
|
|
|
|
return "", errBodySizeLimit |
|
|
|
|
} |
|
|
|
|
return resp.Header.Get("Content-Type"), nil |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -744,11 +764,15 @@ func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error)
|
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
_, err = io.Copy(w, s.gzipr) |
|
|
|
|
n, err := io.Copy(w, io.LimitReader(s.gzipr, s.bodySizeLimit)) |
|
|
|
|
s.gzipr.Close() |
|
|
|
|
if err != nil { |
|
|
|
|
return "", err |
|
|
|
|
} |
|
|
|
|
if n >= s.bodySizeLimit { |
|
|
|
|
targetScrapeExceededBodySizeLimit.Inc() |
|
|
|
|
return "", errBodySizeLimit |
|
|
|
|
} |
|
|
|
|
return resp.Header.Get("Content-Type"), nil |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|