|
|
@ -95,18 +95,19 @@ type labelLimits struct {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
type scrapeLoopOptions struct {
|
|
|
|
type scrapeLoopOptions struct {
|
|
|
|
target *Target
|
|
|
|
target *Target
|
|
|
|
scraper scraper
|
|
|
|
scraper scraper
|
|
|
|
sampleLimit int
|
|
|
|
sampleLimit int
|
|
|
|
bucketLimit int
|
|
|
|
bucketLimit int
|
|
|
|
labelLimits *labelLimits
|
|
|
|
labelLimits *labelLimits
|
|
|
|
honorLabels bool
|
|
|
|
honorLabels bool
|
|
|
|
honorTimestamps bool
|
|
|
|
honorTimestamps bool
|
|
|
|
interval time.Duration
|
|
|
|
trackTimestampsStaleness bool
|
|
|
|
timeout time.Duration
|
|
|
|
interval time.Duration
|
|
|
|
scrapeClassicHistograms bool
|
|
|
|
timeout time.Duration
|
|
|
|
mrc []*relabel.Config
|
|
|
|
scrapeClassicHistograms bool
|
|
|
|
cache *scrapeCache
|
|
|
|
mrc []*relabel.Config
|
|
|
|
|
|
|
|
cache *scrapeCache
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const maxAheadTime = 10 * time.Minute
|
|
|
|
const maxAheadTime = 10 * time.Minute
|
|
|
@ -160,6 +161,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
|
|
|
cache,
|
|
|
|
cache,
|
|
|
|
offsetSeed,
|
|
|
|
offsetSeed,
|
|
|
|
opts.honorTimestamps,
|
|
|
|
opts.honorTimestamps,
|
|
|
|
|
|
|
|
opts.trackTimestampsStaleness,
|
|
|
|
opts.sampleLimit,
|
|
|
|
opts.sampleLimit,
|
|
|
|
opts.bucketLimit,
|
|
|
|
opts.bucketLimit,
|
|
|
|
opts.labelLimits,
|
|
|
|
opts.labelLimits,
|
|
|
@ -270,9 +272,10 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|
|
|
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
|
|
|
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
|
|
|
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
|
|
|
|
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
honorLabels = sp.config.HonorLabels
|
|
|
|
honorLabels = sp.config.HonorLabels
|
|
|
|
honorTimestamps = sp.config.HonorTimestamps
|
|
|
|
honorTimestamps = sp.config.HonorTimestamps
|
|
|
|
mrc = sp.config.MetricRelabelConfigs
|
|
|
|
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
|
|
|
|
|
|
|
|
mrc = sp.config.MetricRelabelConfigs
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
sp.targetMtx.Lock()
|
|
|
|
sp.targetMtx.Lock()
|
|
|
@ -298,17 +301,18 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|
|
|
acceptHeader: acceptHeader(cfg.ScrapeProtocols),
|
|
|
|
acceptHeader: acceptHeader(cfg.ScrapeProtocols),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
newLoop = sp.newLoop(scrapeLoopOptions{
|
|
|
|
newLoop = sp.newLoop(scrapeLoopOptions{
|
|
|
|
target: t,
|
|
|
|
target: t,
|
|
|
|
scraper: s,
|
|
|
|
scraper: s,
|
|
|
|
sampleLimit: sampleLimit,
|
|
|
|
sampleLimit: sampleLimit,
|
|
|
|
bucketLimit: bucketLimit,
|
|
|
|
bucketLimit: bucketLimit,
|
|
|
|
labelLimits: labelLimits,
|
|
|
|
labelLimits: labelLimits,
|
|
|
|
honorLabels: honorLabels,
|
|
|
|
honorLabels: honorLabels,
|
|
|
|
honorTimestamps: honorTimestamps,
|
|
|
|
honorTimestamps: honorTimestamps,
|
|
|
|
mrc: mrc,
|
|
|
|
trackTimestampsStaleness: trackTimestampsStaleness,
|
|
|
|
cache: cache,
|
|
|
|
mrc: mrc,
|
|
|
|
interval: interval,
|
|
|
|
cache: cache,
|
|
|
|
timeout: timeout,
|
|
|
|
interval: interval,
|
|
|
|
|
|
|
|
timeout: timeout,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
)
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
if err != nil {
|
|
|
@ -396,10 +400,11 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|
|
|
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
|
|
|
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
|
|
|
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
|
|
|
|
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
honorLabels = sp.config.HonorLabels
|
|
|
|
honorLabels = sp.config.HonorLabels
|
|
|
|
honorTimestamps = sp.config.HonorTimestamps
|
|
|
|
honorTimestamps = sp.config.HonorTimestamps
|
|
|
|
mrc = sp.config.MetricRelabelConfigs
|
|
|
|
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
|
|
|
|
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
|
|
|
|
mrc = sp.config.MetricRelabelConfigs
|
|
|
|
|
|
|
|
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
sp.targetMtx.Lock()
|
|
|
|
sp.targetMtx.Lock()
|
|
|
@ -421,17 +426,18 @@ func (sp *scrapePool) sync(targets []*Target) {
|
|
|
|
metrics: sp.metrics,
|
|
|
|
metrics: sp.metrics,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
l := sp.newLoop(scrapeLoopOptions{
|
|
|
|
l := sp.newLoop(scrapeLoopOptions{
|
|
|
|
target: t,
|
|
|
|
target: t,
|
|
|
|
scraper: s,
|
|
|
|
scraper: s,
|
|
|
|
sampleLimit: sampleLimit,
|
|
|
|
sampleLimit: sampleLimit,
|
|
|
|
bucketLimit: bucketLimit,
|
|
|
|
bucketLimit: bucketLimit,
|
|
|
|
labelLimits: labelLimits,
|
|
|
|
labelLimits: labelLimits,
|
|
|
|
honorLabels: honorLabels,
|
|
|
|
honorLabels: honorLabels,
|
|
|
|
honorTimestamps: honorTimestamps,
|
|
|
|
honorTimestamps: honorTimestamps,
|
|
|
|
mrc: mrc,
|
|
|
|
trackTimestampsStaleness: trackTimestampsStaleness,
|
|
|
|
interval: interval,
|
|
|
|
mrc: mrc,
|
|
|
|
timeout: timeout,
|
|
|
|
interval: interval,
|
|
|
|
scrapeClassicHistograms: scrapeClassicHistograms,
|
|
|
|
timeout: timeout,
|
|
|
|
|
|
|
|
scrapeClassicHistograms: scrapeClassicHistograms,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
if err != nil {
|
|
|
|
l.setForcedError(err)
|
|
|
|
l.setForcedError(err)
|
|
|
@ -750,21 +756,22 @@ type cacheEntry struct {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
type scrapeLoop struct {
|
|
|
|
type scrapeLoop struct {
|
|
|
|
scraper scraper
|
|
|
|
scraper scraper
|
|
|
|
l log.Logger
|
|
|
|
l log.Logger
|
|
|
|
cache *scrapeCache
|
|
|
|
cache *scrapeCache
|
|
|
|
lastScrapeSize int
|
|
|
|
lastScrapeSize int
|
|
|
|
buffers *pool.Pool
|
|
|
|
buffers *pool.Pool
|
|
|
|
offsetSeed uint64
|
|
|
|
offsetSeed uint64
|
|
|
|
honorTimestamps bool
|
|
|
|
honorTimestamps bool
|
|
|
|
forcedErr error
|
|
|
|
trackTimestampsStaleness bool
|
|
|
|
forcedErrMtx sync.Mutex
|
|
|
|
forcedErr error
|
|
|
|
sampleLimit int
|
|
|
|
forcedErrMtx sync.Mutex
|
|
|
|
bucketLimit int
|
|
|
|
sampleLimit int
|
|
|
|
labelLimits *labelLimits
|
|
|
|
bucketLimit int
|
|
|
|
interval time.Duration
|
|
|
|
labelLimits *labelLimits
|
|
|
|
timeout time.Duration
|
|
|
|
interval time.Duration
|
|
|
|
scrapeClassicHistograms bool
|
|
|
|
timeout time.Duration
|
|
|
|
|
|
|
|
scrapeClassicHistograms bool
|
|
|
|
|
|
|
|
|
|
|
|
appender func(ctx context.Context) storage.Appender
|
|
|
|
appender func(ctx context.Context) storage.Appender
|
|
|
|
sampleMutator labelsMutator
|
|
|
|
sampleMutator labelsMutator
|
|
|
@ -1046,6 +1053,7 @@ func newScrapeLoop(ctx context.Context,
|
|
|
|
cache *scrapeCache,
|
|
|
|
cache *scrapeCache,
|
|
|
|
offsetSeed uint64,
|
|
|
|
offsetSeed uint64,
|
|
|
|
honorTimestamps bool,
|
|
|
|
honorTimestamps bool,
|
|
|
|
|
|
|
|
trackTimestampsStaleness bool,
|
|
|
|
sampleLimit int,
|
|
|
|
sampleLimit int,
|
|
|
|
bucketLimit int,
|
|
|
|
bucketLimit int,
|
|
|
|
labelLimits *labelLimits,
|
|
|
|
labelLimits *labelLimits,
|
|
|
@ -1080,27 +1088,28 @@ func newScrapeLoop(ctx context.Context,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
sl := &scrapeLoop{
|
|
|
|
sl := &scrapeLoop{
|
|
|
|
scraper: sc,
|
|
|
|
scraper: sc,
|
|
|
|
buffers: buffers,
|
|
|
|
buffers: buffers,
|
|
|
|
cache: cache,
|
|
|
|
cache: cache,
|
|
|
|
appender: appender,
|
|
|
|
appender: appender,
|
|
|
|
sampleMutator: sampleMutator,
|
|
|
|
sampleMutator: sampleMutator,
|
|
|
|
reportSampleMutator: reportSampleMutator,
|
|
|
|
reportSampleMutator: reportSampleMutator,
|
|
|
|
stopped: make(chan struct{}),
|
|
|
|
stopped: make(chan struct{}),
|
|
|
|
offsetSeed: offsetSeed,
|
|
|
|
offsetSeed: offsetSeed,
|
|
|
|
l: l,
|
|
|
|
l: l,
|
|
|
|
parentCtx: ctx,
|
|
|
|
parentCtx: ctx,
|
|
|
|
appenderCtx: appenderCtx,
|
|
|
|
appenderCtx: appenderCtx,
|
|
|
|
honorTimestamps: honorTimestamps,
|
|
|
|
honorTimestamps: honorTimestamps,
|
|
|
|
sampleLimit: sampleLimit,
|
|
|
|
trackTimestampsStaleness: trackTimestampsStaleness,
|
|
|
|
bucketLimit: bucketLimit,
|
|
|
|
sampleLimit: sampleLimit,
|
|
|
|
labelLimits: labelLimits,
|
|
|
|
bucketLimit: bucketLimit,
|
|
|
|
interval: interval,
|
|
|
|
labelLimits: labelLimits,
|
|
|
|
timeout: timeout,
|
|
|
|
interval: interval,
|
|
|
|
scrapeClassicHistograms: scrapeClassicHistograms,
|
|
|
|
timeout: timeout,
|
|
|
|
reportExtraMetrics: reportExtraMetrics,
|
|
|
|
scrapeClassicHistograms: scrapeClassicHistograms,
|
|
|
|
appendMetadataToWAL: appendMetadataToWAL,
|
|
|
|
reportExtraMetrics: reportExtraMetrics,
|
|
|
|
metrics: metrics,
|
|
|
|
appendMetadataToWAL: appendMetadataToWAL,
|
|
|
|
|
|
|
|
metrics: metrics,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
|
|
|
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
|
|
|
|
|
|
|
|
|
|
@ -1547,7 +1556,7 @@ loop:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
if !ok {
|
|
|
|
if parsedTimestamp == nil {
|
|
|
|
if parsedTimestamp == nil || sl.trackTimestampsStaleness {
|
|
|
|
// Bypass staleness logic if there is an explicit timestamp.
|
|
|
|
// Bypass staleness logic if there is an explicit timestamp.
|
|
|
|
sl.cache.trackStaleness(hash, lset)
|
|
|
|
sl.cache.trackStaleness(hash, lset)
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -1628,7 +1637,7 @@ loop:
|
|
|
|
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
|
|
|
|
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
|
|
|
|
switch errors.Cause(err) {
|
|
|
|
switch errors.Cause(err) {
|
|
|
|
case nil:
|
|
|
|
case nil:
|
|
|
|
if tp == nil && ce != nil {
|
|
|
|
if (tp == nil || sl.trackTimestampsStaleness) && ce != nil {
|
|
|
|
sl.cache.trackStaleness(ce.hash, ce.lset)
|
|
|
|
sl.cache.trackStaleness(ce.hash, ce.lset)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
return true, nil
|
|
|
|