2016-02-17 18:33:17 +00:00
|
|
|
// Copyright 2016 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-02-01 09:55:07 +00:00
|
|
|
package scrape
|
2016-02-17 18:33:17 +00:00
|
|
|
|
2016-02-22 15:46:55 +00:00
|
|
|
import (
|
2017-02-22 12:00:51 +00:00
|
|
|
"bufio"
|
2017-01-15 16:33:07 +00:00
|
|
|
"bytes"
|
2017-10-25 04:21:42 +00:00
|
|
|
"context"
|
2023-11-01 19:06:46 +00:00
|
|
|
"errors"
|
2016-02-28 18:21:50 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2017-04-13 17:07:23 +00:00
|
|
|
"math"
|
2016-02-28 18:21:50 +00:00
|
|
|
"net/http"
|
2020-01-22 12:13:47 +00:00
|
|
|
"reflect"
|
2021-06-18 07:38:12 +00:00
|
|
|
"strconv"
|
2023-10-10 10:16:55 +00:00
|
|
|
"strings"
|
2016-02-22 15:46:55 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
2016-02-17 18:33:17 +00:00
|
|
|
|
2021-06-11 16:17:59 +00:00
|
|
|
"github.com/go-kit/log"
|
|
|
|
"github.com/go-kit/log/level"
|
2022-05-31 09:46:49 +00:00
|
|
|
"github.com/klauspost/compress/gzip"
|
2018-04-25 17:19:06 +00:00
|
|
|
config_util "github.com/prometheus/common/config"
|
2017-09-08 12:34:45 +00:00
|
|
|
"github.com/prometheus/common/model"
|
2017-02-28 13:59:33 +00:00
|
|
|
"github.com/prometheus/common/version"
|
2023-07-02 22:16:26 +00:00
|
|
|
"golang.org/x/exp/slices"
|
2016-02-17 18:33:17 +00:00
|
|
|
|
2016-02-23 10:56:09 +00:00
|
|
|
"github.com/prometheus/prometheus/config"
|
Refactor SD configuration to remove `config` dependency (#3629)
* refactor: move targetGroup struct and CheckOverflow() to their own package
* refactor: move auth and security related structs to a utility package, fix import error in utility package
* refactor: Azure SD, remove SD struct from config
* refactor: DNS SD, remove SD struct from config into dns package
* refactor: ec2 SD, move SD struct from config into the ec2 package
* refactor: file SD, move SD struct from config to file discovery package
* refactor: gce, move SD struct from config to gce discovery package
* refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil
* refactor: consul, move SD struct from config into consul discovery package
* refactor: marathon, move SD struct from config into marathon discovery package
* refactor: triton, move SD struct from config to triton discovery package, fix test
* refactor: zookeeper, move SD structs from config to zookeeper discovery package
* refactor: openstack, remove SD struct from config, move into openstack discovery package
* refactor: kubernetes, move SD struct from config into kubernetes discovery package
* refactor: notifier, use targetgroup package instead of config
* refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup
* refactor: retrieval, use targetgroup package instead of config.TargetGroup
* refactor: storage, use config util package
* refactor: discovery manager, use targetgroup package instead of config.TargetGroup
* refactor: use HTTPClient and TLS config from configUtil instead of config
* refactor: tests, use targetgroup package instead of config.TargetGroup
* refactor: fix tagetgroup.Group pointers that were removed by mistake
* refactor: openstack, kubernetes: drop prefixes
* refactor: remove import aliases forced due to vscode bug
* refactor: move main SD struct out of config into discovery/config
* refactor: rename configUtil to config_util
* refactor: rename yamlUtil to yaml_config
* refactor: kubernetes, remove prefixes
* refactor: move the TargetGroup package to discovery/
* refactor: fix order of imports
2017-12-29 20:01:34 +00:00
|
|
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
2021-11-08 14:23:17 +00:00
|
|
|
"github.com/prometheus/prometheus/model/exemplar"
|
Style cleanup of all the changes in sparsehistogram so far
A lot of this code was hacked together, literally during a
hackathon. This commit intends not to change the code substantially,
but just make the code obey the usual style practices.
A (possibly incomplete) list of areas:
* Generally address linter warnings.
* The `pgk` directory is deprecated as per dev-summit. No new packages should
be added to it. I moved the new `pkg/histogram` package to `model`
anticipating what's proposed in #9478.
* Make the naming of the Sparse Histogram more consistent. Including
abbreviations, there were just too many names for it: SparseHistogram,
Histogram, Histo, hist, his, shs, h. The idea is to call it "Histogram" in
general. Only add "Sparse" if it is needed to avoid confusion with
conventional Histograms (which is rare because the TSDB really has no notion
of conventional Histograms). Use abbreviations only in local scope, and then
really abbreviate (not just removing three out of seven letters like in
"Histo"). This is in the spirit of
https://github.com/golang/go/wiki/CodeReviewComments#variable-names
* Several other minor name changes.
* A lot of formatting of doc comments. For one, following
https://github.com/golang/go/wiki/CodeReviewComments#comment-sentences
, but also layout question, anticipating how things will look like
when rendered by `godoc` (even where `godoc` doesn't render them
right now because they are for unexported types or not a doc comment
at all but just a normal code comment - consistency is queen!).
* Re-enabled `TestQueryLog` and `TestEndopints` (they pass now,
leaving them disabled was presumably an oversight).
* Bucket iterator for histogram.Histogram is now created with a
method.
* HistogramChunk.iterator now allows iterator recycling. (I think
@dieterbe only commented it out because he was confused by the
question in the comment.)
* HistogramAppender.Append panics now because we decided to treat
staleness marker differently.
Signed-off-by: beorn7 <beorn@grafana.com>
2021-10-09 13:57:07 +00:00
|
|
|
"github.com/prometheus/prometheus/model/histogram"
|
2021-11-08 14:23:17 +00:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
2022-08-31 13:50:05 +00:00
|
|
|
"github.com/prometheus/prometheus/model/metadata"
|
2021-11-08 14:23:17 +00:00
|
|
|
"github.com/prometheus/prometheus/model/relabel"
|
|
|
|
"github.com/prometheus/prometheus/model/textparse"
|
|
|
|
"github.com/prometheus/prometheus/model/timestamp"
|
|
|
|
"github.com/prometheus/prometheus/model/value"
|
2016-02-22 15:46:55 +00:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2021-11-08 14:23:17 +00:00
|
|
|
"github.com/prometheus/prometheus/util/pool"
|
2016-02-22 15:46:55 +00:00
|
|
|
)
|
2016-02-17 18:33:17 +00:00
|
|
|
|
2021-09-08 11:57:33 +00:00
|
|
|
// ScrapeTimestampTolerance is the tolerance for scrape appends timestamps
|
|
|
|
// alignment, to enable better compression at the TSDB level.
|
2020-09-25 13:44:47 +00:00
|
|
|
// See https://github.com/prometheus/prometheus/issues/7846
|
2021-09-08 11:57:33 +00:00
|
|
|
var ScrapeTimestampTolerance = 2 * time.Millisecond
|
2020-10-07 16:25:52 +00:00
|
|
|
|
2020-10-07 19:44:36 +00:00
|
|
|
// AlignScrapeTimestamps enables the tolerance for scrape appends timestamps described above.
|
2020-10-07 16:25:52 +00:00
|
|
|
var AlignScrapeTimestamps = true
|
2020-09-25 13:44:47 +00:00
|
|
|
|
2020-03-02 07:18:05 +00:00
|
|
|
var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels.MetricName)
|
|
|
|
|
2016-02-22 15:46:55 +00:00
|
|
|
// scrapePool manages scrapes for sets of targets.
|
|
|
|
type scrapePool struct {
|
2020-02-06 15:58:38 +00:00
|
|
|
appendable storage.Appendable
|
2017-09-08 12:34:45 +00:00
|
|
|
logger log.Logger
|
2020-11-12 16:06:25 +00:00
|
|
|
cancel context.CancelFunc
|
2022-03-24 22:16:59 +00:00
|
|
|
httpOpts []config_util.HTTPClientOption
|
2016-02-17 18:33:17 +00:00
|
|
|
|
2020-11-12 16:06:25 +00:00
|
|
|
// mtx must not be taken after targetMtx.
|
2021-07-27 10:48:55 +00:00
|
|
|
mtx sync.Mutex
|
|
|
|
config *config.ScrapeConfig
|
|
|
|
client *http.Client
|
|
|
|
loops map[uint64]loop
|
2020-10-26 14:46:20 +00:00
|
|
|
|
2020-11-12 16:06:25 +00:00
|
|
|
targetMtx sync.Mutex
|
|
|
|
// activeTargets and loops must always be synchronized to have the same
|
2016-02-28 18:56:18 +00:00
|
|
|
// set of hashes.
|
2023-08-14 14:39:25 +00:00
|
|
|
activeTargets map[uint64]*Target
|
|
|
|
droppedTargets []*Target // Subject to KeepDroppedTargets limit.
|
|
|
|
droppedTargetsCount int // Count of all dropped targets.
|
2016-02-28 08:51:02 +00:00
|
|
|
|
|
|
|
// Constructor for new scrape loops. This is settable for testing convenience.
|
2019-03-12 10:26:18 +00:00
|
|
|
newLoop func(scrapeLoopOptions) loop
|
2022-07-20 11:35:47 +00:00
|
|
|
|
|
|
|
noDefaultPort bool
|
2023-09-22 16:47:44 +00:00
|
|
|
|
|
|
|
metrics *scrapeMetrics
|
2019-03-12 10:26:18 +00:00
|
|
|
}
|
|
|
|
|
2021-05-06 08:56:21 +00:00
|
|
|
type labelLimits struct {
|
|
|
|
labelLimit int
|
|
|
|
labelNameLengthLimit int
|
|
|
|
labelValueLengthLimit int
|
|
|
|
}
|
|
|
|
|
2019-03-12 10:26:18 +00:00
|
|
|
type scrapeLoopOptions struct {
|
2023-10-31 20:58:42 +00:00
|
|
|
target *Target
|
|
|
|
scraper scraper
|
|
|
|
sampleLimit int
|
|
|
|
bucketLimit int
|
2024-01-17 15:58:54 +00:00
|
|
|
maxSchema int32
|
2023-10-31 20:58:42 +00:00
|
|
|
labelLimits *labelLimits
|
|
|
|
honorLabels bool
|
|
|
|
honorTimestamps bool
|
|
|
|
trackTimestampsStaleness bool
|
|
|
|
interval time.Duration
|
|
|
|
timeout time.Duration
|
|
|
|
scrapeClassicHistograms bool
|
2023-12-11 08:43:42 +00:00
|
|
|
|
|
|
|
mrc []*relabel.Config
|
|
|
|
cache *scrapeCache
|
|
|
|
enableCompression bool
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
2016-02-17 18:33:17 +00:00
|
|
|
|
2017-09-08 12:34:45 +00:00
|
|
|
const maxAheadTime = 10 * time.Minute
|
|
|
|
|
2023-10-03 20:09:25 +00:00
|
|
|
// returning an empty label set is interpreted as "drop".
|
2017-09-08 12:34:45 +00:00
|
|
|
type labelsMutator func(labels.Labels) labels.Labels
|
|
|
|
|
2023-10-17 09:27:46 +00:00
|
|
|
func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) {
|
2017-09-15 17:45:27 +00:00
|
|
|
if logger == nil {
|
|
|
|
logger = log.NewNopLogger()
|
|
|
|
}
|
|
|
|
|
2022-07-20 11:35:47 +00:00
|
|
|
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...)
|
2016-02-28 18:21:50 +00:00
|
|
|
if err != nil {
|
2023-11-01 19:06:46 +00:00
|
|
|
return nil, fmt.Errorf("error creating HTTP client: %w", err)
|
2016-02-28 18:21:50 +00:00
|
|
|
}
|
2017-05-26 08:44:48 +00:00
|
|
|
|
2017-12-03 17:14:08 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2017-09-08 12:34:45 +00:00
|
|
|
sp := &scrapePool{
|
2023-10-10 10:16:55 +00:00
|
|
|
cancel: cancel,
|
|
|
|
appendable: app,
|
|
|
|
config: cfg,
|
|
|
|
client: client,
|
|
|
|
activeTargets: map[uint64]*Target{},
|
|
|
|
loops: map[uint64]loop{},
|
|
|
|
logger: logger,
|
2023-09-22 16:47:44 +00:00
|
|
|
metrics: metrics,
|
2023-10-10 10:16:55 +00:00
|
|
|
httpOpts: options.HTTPClientOptions,
|
|
|
|
noDefaultPort: options.NoDefaultPort,
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
2019-03-12 10:26:18 +00:00
|
|
|
sp.newLoop = func(opts scrapeLoopOptions) loop {
|
2018-05-18 07:32:11 +00:00
|
|
|
// Update the targets retrieval function for metadata to a new scrape cache.
|
2020-01-22 12:13:47 +00:00
|
|
|
cache := opts.cache
|
|
|
|
if cache == nil {
|
2023-09-22 16:47:44 +00:00
|
|
|
cache = newScrapeCache(metrics)
|
2020-01-22 12:13:47 +00:00
|
|
|
}
|
2019-12-04 15:18:27 +00:00
|
|
|
opts.target.SetMetadataStore(cache)
|
2018-05-18 07:32:11 +00:00
|
|
|
|
2017-11-26 15:15:15 +00:00
|
|
|
return newScrapeLoop(
|
2022-04-14 13:18:46 +00:00
|
|
|
ctx,
|
2019-03-12 10:26:18 +00:00
|
|
|
opts.scraper,
|
|
|
|
log.With(logger, "target", opts.target),
|
2017-09-08 12:34:45 +00:00
|
|
|
buffers,
|
2019-03-12 10:26:18 +00:00
|
|
|
func(l labels.Labels) labels.Labels {
|
|
|
|
return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc)
|
|
|
|
},
|
|
|
|
func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) },
|
2021-12-10 12:03:28 +00:00
|
|
|
func(ctx context.Context) storage.Appender { return app.Appender(ctx) },
|
2018-05-18 07:32:11 +00:00
|
|
|
cache,
|
2023-05-25 09:49:43 +00:00
|
|
|
offsetSeed,
|
2019-03-15 10:04:15 +00:00
|
|
|
opts.honorTimestamps,
|
2023-10-31 20:58:42 +00:00
|
|
|
opts.trackTimestampsStaleness,
|
2023-11-20 12:02:53 +00:00
|
|
|
opts.enableCompression,
|
2021-09-03 13:37:42 +00:00
|
|
|
opts.sampleLimit,
|
2023-04-21 19:14:19 +00:00
|
|
|
opts.bucketLimit,
|
2024-01-17 15:58:54 +00:00
|
|
|
opts.maxSchema,
|
2021-05-06 08:56:21 +00:00
|
|
|
opts.labelLimits,
|
2021-08-31 15:37:32 +00:00
|
|
|
opts.interval,
|
|
|
|
opts.timeout,
|
2023-05-10 23:59:21 +00:00
|
|
|
opts.scrapeClassicHistograms,
|
2023-12-11 08:43:42 +00:00
|
|
|
options.EnableCreatedTimestampZeroIngestion,
|
2022-07-20 11:35:47 +00:00
|
|
|
options.ExtraMetrics,
|
2022-08-31 13:50:05 +00:00
|
|
|
options.EnableMetadataStorage,
|
2022-05-03 18:45:52 +00:00
|
|
|
opts.target,
|
2022-07-20 11:35:47 +00:00
|
|
|
options.PassMetadataInContext,
|
2023-09-22 16:47:44 +00:00
|
|
|
metrics,
|
2023-12-11 08:43:42 +00:00
|
|
|
options.skipOffsetting,
|
2017-09-08 12:34:45 +00:00
|
|
|
)
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
2023-09-22 16:47:44 +00:00
|
|
|
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
|
2019-02-13 13:24:22 +00:00
|
|
|
return sp, nil
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
2016-02-17 18:33:17 +00:00
|
|
|
|
2018-09-26 09:20:56 +00:00
|
|
|
func (sp *scrapePool) ActiveTargets() []*Target {
|
2020-10-26 14:46:20 +00:00
|
|
|
sp.targetMtx.Lock()
|
|
|
|
defer sp.targetMtx.Unlock()
|
2018-09-26 09:20:56 +00:00
|
|
|
|
|
|
|
var tActive []*Target
|
|
|
|
for _, t := range sp.activeTargets {
|
|
|
|
tActive = append(tActive, t)
|
|
|
|
}
|
|
|
|
return tActive
|
|
|
|
}
|
|
|
|
|
2023-08-14 14:39:25 +00:00
|
|
|
// Return dropped targets, subject to KeepDroppedTargets limit.
|
2018-09-26 09:20:56 +00:00
|
|
|
func (sp *scrapePool) DroppedTargets() []*Target {
|
2020-10-26 14:46:20 +00:00
|
|
|
sp.targetMtx.Lock()
|
|
|
|
defer sp.targetMtx.Unlock()
|
2018-09-26 09:20:56 +00:00
|
|
|
return sp.droppedTargets
|
|
|
|
}
|
|
|
|
|
2023-08-14 14:39:25 +00:00
|
|
|
func (sp *scrapePool) DroppedTargetsCount() int {
|
|
|
|
sp.targetMtx.Lock()
|
|
|
|
defer sp.targetMtx.Unlock()
|
|
|
|
return sp.droppedTargetsCount
|
|
|
|
}
|
|
|
|
|
2016-02-23 13:37:25 +00:00
|
|
|
// stop terminates all scrape loops and returns after they all terminated.
|
2016-02-22 15:46:55 +00:00
|
|
|
func (sp *scrapePool) stop() {
|
2020-11-12 16:06:25 +00:00
|
|
|
sp.mtx.Lock()
|
|
|
|
defer sp.mtx.Unlock()
|
2017-12-03 17:14:08 +00:00
|
|
|
sp.cancel()
|
2016-02-22 15:46:55 +00:00
|
|
|
var wg sync.WaitGroup
|
2016-02-17 18:33:17 +00:00
|
|
|
|
2020-10-26 14:46:20 +00:00
|
|
|
sp.targetMtx.Lock()
|
2016-02-17 18:33:17 +00:00
|
|
|
|
2016-02-28 08:51:02 +00:00
|
|
|
for fp, l := range sp.loops {
|
2016-02-23 13:37:25 +00:00
|
|
|
wg.Add(1)
|
2016-02-17 18:33:17 +00:00
|
|
|
|
2016-02-23 13:37:25 +00:00
|
|
|
go func(l loop) {
|
|
|
|
l.stop()
|
|
|
|
wg.Done()
|
|
|
|
}(l)
|
2016-02-28 08:51:02 +00:00
|
|
|
|
|
|
|
delete(sp.loops, fp)
|
2018-09-26 09:20:56 +00:00
|
|
|
delete(sp.activeTargets, fp)
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
2020-10-26 14:46:20 +00:00
|
|
|
|
|
|
|
sp.targetMtx.Unlock()
|
|
|
|
|
2016-02-22 15:46:55 +00:00
|
|
|
wg.Wait()
|
2019-04-10 12:20:00 +00:00
|
|
|
sp.client.CloseIdleConnections()
|
2020-07-30 12:20:24 +00:00
|
|
|
|
|
|
|
if sp.config != nil {
|
2023-09-22 16:47:44 +00:00
|
|
|
sp.metrics.targetScrapePoolSyncsCounter.DeleteLabelValues(sp.config.JobName)
|
|
|
|
sp.metrics.targetScrapePoolTargetLimit.DeleteLabelValues(sp.config.JobName)
|
|
|
|
sp.metrics.targetScrapePoolTargetsAdded.DeleteLabelValues(sp.config.JobName)
|
|
|
|
sp.metrics.targetSyncIntervalLength.DeleteLabelValues(sp.config.JobName)
|
|
|
|
sp.metrics.targetSyncFailed.DeleteLabelValues(sp.config.JobName)
|
2020-07-30 12:20:24 +00:00
|
|
|
}
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
|
|
|
|
2016-02-23 13:37:25 +00:00
|
|
|
// reload the scrape pool with the given scrape configuration. The target state is preserved
|
|
|
|
// but all scrape loops are restarted with the new scrape configuration.
|
2017-05-10 15:59:02 +00:00
|
|
|
// This method returns after all scrape loops that were stopped have stopped scraping.
|
2019-02-13 13:24:22 +00:00
|
|
|
func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
2020-11-12 16:06:25 +00:00
|
|
|
sp.mtx.Lock()
|
|
|
|
defer sp.mtx.Unlock()
|
2023-09-22 16:47:44 +00:00
|
|
|
sp.metrics.targetScrapePoolReloads.Inc()
|
2016-03-09 15:33:10 +00:00
|
|
|
start := time.Now()
|
2016-11-22 11:48:30 +00:00
|
|
|
|
2022-03-24 22:16:59 +00:00
|
|
|
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, sp.httpOpts...)
|
2016-02-28 18:21:50 +00:00
|
|
|
if err != nil {
|
2023-09-22 16:47:44 +00:00
|
|
|
sp.metrics.targetScrapePoolReloadsFailed.Inc()
|
2023-11-01 19:06:46 +00:00
|
|
|
return fmt.Errorf("error creating HTTP client: %w", err)
|
2016-02-28 18:21:50 +00:00
|
|
|
}
|
2020-01-22 12:13:47 +00:00
|
|
|
|
|
|
|
reuseCache := reusableCache(sp.config, cfg)
|
2016-02-23 12:34:24 +00:00
|
|
|
sp.config = cfg
|
2019-04-10 12:20:00 +00:00
|
|
|
oldClient := sp.client
|
2016-02-28 18:21:50 +00:00
|
|
|
sp.client = client
|
2016-02-23 12:34:24 +00:00
|
|
|
|
2023-09-22 16:47:44 +00:00
|
|
|
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
|
2020-07-30 12:20:24 +00:00
|
|
|
|
2016-02-23 13:37:25 +00:00
|
|
|
var (
|
2021-05-16 02:19:22 +00:00
|
|
|
wg sync.WaitGroup
|
|
|
|
interval = time.Duration(sp.config.ScrapeInterval)
|
|
|
|
timeout = time.Duration(sp.config.ScrapeTimeout)
|
|
|
|
bodySizeLimit = int64(sp.config.BodySizeLimit)
|
|
|
|
sampleLimit = int(sp.config.SampleLimit)
|
2023-04-21 19:14:19 +00:00
|
|
|
bucketLimit = int(sp.config.NativeHistogramBucketLimit)
|
2024-01-17 15:58:54 +00:00
|
|
|
maxSchema = pickSchema(sp.config.NativeHistogramMinBucketFactor)
|
2021-05-16 02:19:22 +00:00
|
|
|
labelLimits = &labelLimits{
|
2021-05-06 08:56:21 +00:00
|
|
|
labelLimit: int(sp.config.LabelLimit),
|
|
|
|
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
|
|
|
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
|
|
|
|
}
|
2023-10-31 20:58:42 +00:00
|
|
|
honorLabels = sp.config.HonorLabels
|
|
|
|
honorTimestamps = sp.config.HonorTimestamps
|
2023-11-20 12:02:53 +00:00
|
|
|
enableCompression = sp.config.EnableCompression
|
2023-10-31 20:58:42 +00:00
|
|
|
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
|
|
|
|
mrc = sp.config.MetricRelabelConfigs
|
2016-02-23 13:37:25 +00:00
|
|
|
)
|
2016-02-23 12:34:24 +00:00
|
|
|
|
2020-10-26 14:46:20 +00:00
|
|
|
sp.targetMtx.Lock()
|
|
|
|
|
2020-07-30 12:20:24 +00:00
|
|
|
forcedErr := sp.refreshTargetLimitErr()
|
2016-02-23 13:37:25 +00:00
|
|
|
for fp, oldLoop := range sp.loops {
|
2020-01-22 12:13:47 +00:00
|
|
|
var cache *scrapeCache
|
|
|
|
if oc := oldLoop.getCache(); reuseCache && oc != nil {
|
2020-03-20 16:43:26 +00:00
|
|
|
oldLoop.disableEndOfRunStalenessMarkers()
|
2020-01-22 12:13:47 +00:00
|
|
|
cache = oc
|
|
|
|
} else {
|
2023-09-22 16:47:44 +00:00
|
|
|
cache = newScrapeCache(sp.metrics)
|
2020-01-22 12:13:47 +00:00
|
|
|
}
|
2021-08-31 15:37:32 +00:00
|
|
|
|
2022-06-28 09:58:52 +00:00
|
|
|
t := sp.activeTargets[fp]
|
|
|
|
interval, timeout, err := t.intervalAndTimeout(interval, timeout)
|
2016-02-23 13:37:25 +00:00
|
|
|
var (
|
2023-10-10 10:16:55 +00:00
|
|
|
s = &targetScraper{
|
2023-11-20 12:02:53 +00:00
|
|
|
Target: t,
|
|
|
|
client: sp.client,
|
|
|
|
timeout: timeout,
|
|
|
|
bodySizeLimit: bodySizeLimit,
|
|
|
|
acceptHeader: acceptHeader(cfg.ScrapeProtocols),
|
|
|
|
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
|
2023-10-10 10:16:55 +00:00
|
|
|
}
|
2019-03-12 10:26:18 +00:00
|
|
|
newLoop = sp.newLoop(scrapeLoopOptions{
|
2023-10-31 20:58:42 +00:00
|
|
|
target: t,
|
|
|
|
scraper: s,
|
|
|
|
sampleLimit: sampleLimit,
|
|
|
|
bucketLimit: bucketLimit,
|
2024-01-17 15:58:54 +00:00
|
|
|
maxSchema: maxSchema,
|
2023-10-31 20:58:42 +00:00
|
|
|
labelLimits: labelLimits,
|
|
|
|
honorLabels: honorLabels,
|
|
|
|
honorTimestamps: honorTimestamps,
|
2023-11-20 12:02:53 +00:00
|
|
|
enableCompression: enableCompression,
|
2023-10-31 20:58:42 +00:00
|
|
|
trackTimestampsStaleness: trackTimestampsStaleness,
|
|
|
|
mrc: mrc,
|
|
|
|
cache: cache,
|
|
|
|
interval: interval,
|
|
|
|
timeout: timeout,
|
2019-03-12 10:26:18 +00:00
|
|
|
})
|
2016-02-23 13:37:25 +00:00
|
|
|
)
|
2022-06-28 09:58:52 +00:00
|
|
|
if err != nil {
|
|
|
|
newLoop.setForcedError(err)
|
|
|
|
}
|
2016-02-23 13:37:25 +00:00
|
|
|
wg.Add(1)
|
2016-02-23 12:34:24 +00:00
|
|
|
|
2016-02-23 13:37:25 +00:00
|
|
|
go func(oldLoop, newLoop loop) {
|
|
|
|
oldLoop.stop()
|
|
|
|
wg.Done()
|
2016-02-23 12:34:24 +00:00
|
|
|
|
2020-07-30 12:20:24 +00:00
|
|
|
newLoop.setForcedError(forcedErr)
|
2021-08-31 15:37:32 +00:00
|
|
|
newLoop.run(nil)
|
2016-02-23 13:37:25 +00:00
|
|
|
}(oldLoop, newLoop)
|
|
|
|
|
|
|
|
sp.loops[fp] = newLoop
|
2016-02-23 12:34:24 +00:00
|
|
|
}
|
|
|
|
|
2020-10-26 14:46:20 +00:00
|
|
|
sp.targetMtx.Unlock()
|
|
|
|
|
2016-02-23 12:34:24 +00:00
|
|
|
wg.Wait()
|
2019-04-10 12:20:00 +00:00
|
|
|
oldClient.CloseIdleConnections()
|
2023-09-22 16:47:44 +00:00
|
|
|
sp.metrics.targetReloadIntervalLength.WithLabelValues(interval.String()).Observe(
|
2016-07-07 13:24:35 +00:00
|
|
|
time.Since(start).Seconds(),
|
2016-03-09 15:33:10 +00:00
|
|
|
)
|
2019-02-13 13:24:22 +00:00
|
|
|
return nil
|
2016-02-23 12:34:24 +00:00
|
|
|
}
|
|
|
|
|
2016-11-22 11:48:30 +00:00
|
|
|
// Sync converts target groups into actual scrape targets and synchronizes
|
2018-04-09 14:18:25 +00:00
|
|
|
// the currently running scraper with the resulting set and returns all scraped and dropped targets.
|
2018-09-26 09:20:56 +00:00
|
|
|
func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
|
2020-11-12 16:06:25 +00:00
|
|
|
sp.mtx.Lock()
|
|
|
|
defer sp.mtx.Unlock()
|
2016-11-22 11:48:30 +00:00
|
|
|
start := time.Now()
|
|
|
|
|
2020-10-26 14:46:20 +00:00
|
|
|
sp.targetMtx.Lock()
|
2016-11-22 11:48:30 +00:00
|
|
|
var all []*Target
|
2023-03-07 09:23:34 +00:00
|
|
|
var targets []*Target
|
|
|
|
lb := labels.NewBuilder(labels.EmptyLabels())
|
2018-01-04 14:13:31 +00:00
|
|
|
sp.droppedTargets = []*Target{}
|
2023-08-14 14:39:25 +00:00
|
|
|
sp.droppedTargetsCount = 0
|
2016-11-22 11:48:30 +00:00
|
|
|
for _, tg := range tgs {
|
2023-03-07 09:23:34 +00:00
|
|
|
targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort, targets, lb)
|
2021-05-28 21:50:59 +00:00
|
|
|
for _, err := range failures {
|
|
|
|
level.Error(sp.logger).Log("msg", "Creating target failed", "err", err)
|
2016-11-22 11:48:30 +00:00
|
|
|
}
|
2023-09-22 16:47:44 +00:00
|
|
|
sp.metrics.targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures)))
|
2017-12-04 15:12:28 +00:00
|
|
|
for _, t := range targets {
|
2023-03-07 17:11:24 +00:00
|
|
|
// Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage.
|
|
|
|
nonEmpty := false
|
|
|
|
t.LabelsRange(func(l labels.Label) { nonEmpty = true })
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 14:14:31 +00:00
|
|
|
switch {
|
|
|
|
case nonEmpty:
|
2017-12-04 15:12:28 +00:00
|
|
|
all = append(all, t)
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 14:14:31 +00:00
|
|
|
case !t.discoveredLabels.IsEmpty():
|
2023-08-20 13:30:36 +00:00
|
|
|
if sp.config.KeepDroppedTargets == 0 || uint(len(sp.droppedTargets)) < sp.config.KeepDroppedTargets {
|
2023-08-14 14:39:25 +00:00
|
|
|
sp.droppedTargets = append(sp.droppedTargets, t)
|
|
|
|
}
|
|
|
|
sp.droppedTargetsCount++
|
2017-12-04 15:12:28 +00:00
|
|
|
}
|
|
|
|
}
|
2016-11-22 11:48:30 +00:00
|
|
|
}
|
2020-10-26 14:46:20 +00:00
|
|
|
sp.targetMtx.Unlock()
|
2016-11-22 11:48:30 +00:00
|
|
|
sp.sync(all)
|
|
|
|
|
2023-09-22 16:47:44 +00:00
|
|
|
sp.metrics.targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe(
|
2016-11-22 11:48:30 +00:00
|
|
|
time.Since(start).Seconds(),
|
|
|
|
)
|
2023-09-22 16:47:44 +00:00
|
|
|
sp.metrics.targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc()
|
2016-11-22 11:48:30 +00:00
|
|
|
}
|
|
|
|
|
2016-02-23 13:37:25 +00:00
|
|
|
// sync takes a list of potentially duplicated targets, deduplicates them, starts
|
|
|
|
// scrape loops for new targets, and stops scrape loops for disappeared targets.
|
|
|
|
// It returns after all stopped scrape loops terminated.
|
|
|
|
func (sp *scrapePool) sync(targets []*Target) {
|
2016-02-22 15:46:55 +00:00
|
|
|
var (
|
2021-05-16 02:19:22 +00:00
|
|
|
uniqueLoops = make(map[uint64]loop)
|
|
|
|
interval = time.Duration(sp.config.ScrapeInterval)
|
|
|
|
timeout = time.Duration(sp.config.ScrapeTimeout)
|
|
|
|
bodySizeLimit = int64(sp.config.BodySizeLimit)
|
|
|
|
sampleLimit = int(sp.config.SampleLimit)
|
2023-04-21 19:14:19 +00:00
|
|
|
bucketLimit = int(sp.config.NativeHistogramBucketLimit)
|
2021-05-16 02:19:22 +00:00
|
|
|
labelLimits = &labelLimits{
|
2021-05-06 08:56:21 +00:00
|
|
|
labelLimit: int(sp.config.LabelLimit),
|
|
|
|
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
|
|
|
|
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
|
|
|
|
}
|
2023-10-31 20:58:42 +00:00
|
|
|
honorLabels = sp.config.HonorLabels
|
|
|
|
honorTimestamps = sp.config.HonorTimestamps
|
2023-11-20 12:02:53 +00:00
|
|
|
enableCompression = sp.config.EnableCompression
|
2023-10-31 20:58:42 +00:00
|
|
|
trackTimestampsStaleness = sp.config.TrackTimestampsStaleness
|
|
|
|
mrc = sp.config.MetricRelabelConfigs
|
|
|
|
scrapeClassicHistograms = sp.config.ScrapeClassicHistograms
|
2016-02-22 15:46:55 +00:00
|
|
|
)
|
|
|
|
|
2020-10-26 14:46:20 +00:00
|
|
|
sp.targetMtx.Lock()
|
2016-02-23 13:37:25 +00:00
|
|
|
for _, t := range targets {
|
2016-02-28 18:56:18 +00:00
|
|
|
hash := t.hash()
|
2016-02-23 13:37:25 +00:00
|
|
|
|
2018-09-26 09:20:56 +00:00
|
|
|
if _, ok := sp.activeTargets[hash]; !ok {
|
2021-08-31 15:37:32 +00:00
|
|
|
// The scrape interval and timeout labels are set to the config's values initially,
|
|
|
|
// so whether changed via relabeling or not, they'll exist and hold the correct values
|
|
|
|
// for every target.
|
|
|
|
var err error
|
|
|
|
interval, timeout, err = t.intervalAndTimeout(interval, timeout)
|
2023-10-10 10:16:55 +00:00
|
|
|
s := &targetScraper{
|
2023-11-20 12:02:53 +00:00
|
|
|
Target: t,
|
|
|
|
client: sp.client,
|
|
|
|
timeout: timeout,
|
|
|
|
bodySizeLimit: bodySizeLimit,
|
|
|
|
acceptHeader: acceptHeader(sp.config.ScrapeProtocols),
|
|
|
|
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
|
|
|
|
metrics: sp.metrics,
|
2022-10-12 07:48:25 +00:00
|
|
|
}
|
2019-03-12 10:26:18 +00:00
|
|
|
l := sp.newLoop(scrapeLoopOptions{
|
2023-10-31 20:58:42 +00:00
|
|
|
target: t,
|
|
|
|
scraper: s,
|
|
|
|
sampleLimit: sampleLimit,
|
|
|
|
bucketLimit: bucketLimit,
|
|
|
|
labelLimits: labelLimits,
|
|
|
|
honorLabels: honorLabels,
|
|
|
|
honorTimestamps: honorTimestamps,
|
2023-11-20 12:02:53 +00:00
|
|
|
enableCompression: enableCompression,
|
2023-10-31 20:58:42 +00:00
|
|
|
trackTimestampsStaleness: trackTimestampsStaleness,
|
|
|
|
mrc: mrc,
|
|
|
|
interval: interval,
|
|
|
|
timeout: timeout,
|
|
|
|
scrapeClassicHistograms: scrapeClassicHistograms,
|
2019-03-12 10:26:18 +00:00
|
|
|
})
|
2021-08-31 15:37:32 +00:00
|
|
|
if err != nil {
|
|
|
|
l.setForcedError(err)
|
|
|
|
}
|
2016-02-23 13:37:25 +00:00
|
|
|
|
2018-09-26 09:20:56 +00:00
|
|
|
sp.activeTargets[hash] = t
|
2016-02-28 18:56:18 +00:00
|
|
|
sp.loops[hash] = l
|
2016-02-23 13:37:25 +00:00
|
|
|
|
2020-07-30 12:20:24 +00:00
|
|
|
uniqueLoops[hash] = l
|
2018-02-07 10:29:27 +00:00
|
|
|
} else {
|
2020-07-30 12:20:24 +00:00
|
|
|
// This might be a duplicated target.
|
|
|
|
if _, ok := uniqueLoops[hash]; !ok {
|
|
|
|
uniqueLoops[hash] = nil
|
|
|
|
}
|
2018-02-07 10:29:27 +00:00
|
|
|
// Need to keep the most updated labels information
|
|
|
|
// for displaying it in the Service Discovery web page.
|
2018-09-26 09:20:56 +00:00
|
|
|
sp.activeTargets[hash].SetDiscoveredLabels(t.DiscoveredLabels())
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-23 13:37:25 +00:00
|
|
|
var wg sync.WaitGroup
|
2016-02-22 15:46:55 +00:00
|
|
|
|
2016-02-23 13:37:25 +00:00
|
|
|
// Stop and remove old targets and scraper loops.
|
2018-09-26 09:20:56 +00:00
|
|
|
for hash := range sp.activeTargets {
|
2020-07-30 12:20:24 +00:00
|
|
|
if _, ok := uniqueLoops[hash]; !ok {
|
2016-02-23 13:37:25 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(l loop) {
|
|
|
|
l.stop()
|
2016-02-22 15:46:55 +00:00
|
|
|
wg.Done()
|
2016-02-28 18:56:18 +00:00
|
|
|
}(sp.loops[hash])
|
2016-02-23 13:37:25 +00:00
|
|
|
|
2016-02-28 18:56:18 +00:00
|
|
|
delete(sp.loops, hash)
|
2018-09-26 09:20:56 +00:00
|
|
|
delete(sp.activeTargets, hash)
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-26 14:46:20 +00:00
|
|
|
sp.targetMtx.Unlock()
|
|
|
|
|
2023-09-22 16:47:44 +00:00
|
|
|
sp.metrics.targetScrapePoolTargetsAdded.WithLabelValues(sp.config.JobName).Set(float64(len(uniqueLoops)))
|
2020-07-30 12:20:24 +00:00
|
|
|
forcedErr := sp.refreshTargetLimitErr()
|
|
|
|
for _, l := range sp.loops {
|
|
|
|
l.setForcedError(forcedErr)
|
|
|
|
}
|
|
|
|
for _, l := range uniqueLoops {
|
|
|
|
if l != nil {
|
2021-08-31 15:37:32 +00:00
|
|
|
go l.run(nil)
|
2020-07-30 12:20:24 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-22 15:46:55 +00:00
|
|
|
// Wait for all potentially stopped scrapers to terminate.
|
|
|
|
// This covers the case of flapping targets. If the server is under high load, a new scraper
|
|
|
|
// may be active and tries to insert. The old scraper that didn't terminate yet could still
|
|
|
|
// be inserting a previous sample set.
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
2020-07-30 12:20:24 +00:00
|
|
|
// refreshTargetLimitErr returns an error that can be passed to the scrape loops
|
|
|
|
// if the number of targets exceeds the configured limit.
|
|
|
|
func (sp *scrapePool) refreshTargetLimitErr() error {
|
2021-07-27 10:48:55 +00:00
|
|
|
if sp.config == nil || sp.config.TargetLimit == 0 {
|
2020-07-30 12:20:24 +00:00
|
|
|
return nil
|
|
|
|
}
|
2021-07-27 10:48:55 +00:00
|
|
|
if l := len(sp.activeTargets); l > int(sp.config.TargetLimit) {
|
2023-09-22 16:47:44 +00:00
|
|
|
sp.metrics.targetScrapePoolExceededTargetLimit.Inc()
|
2021-07-27 10:48:55 +00:00
|
|
|
return fmt.Errorf("target_limit exceeded (number of targets: %d, limit: %d)", l, sp.config.TargetLimit)
|
2020-07-30 12:20:24 +00:00
|
|
|
}
|
2021-07-27 10:48:55 +00:00
|
|
|
return nil
|
2020-07-30 12:20:24 +00:00
|
|
|
}
|
|
|
|
|
2021-05-06 08:56:21 +00:00
|
|
|
func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
|
|
|
|
if limits == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
met := lset.Get(labels.MetricName)
|
|
|
|
if limits.labelLimit > 0 {
|
2022-03-09 22:26:24 +00:00
|
|
|
nbLabels := lset.Len()
|
2023-04-09 07:08:40 +00:00
|
|
|
if nbLabels > limits.labelLimit {
|
2022-03-09 13:27:50 +00:00
|
|
|
return fmt.Errorf("label_limit exceeded (metric: %.50s, number of labels: %d, limit: %d)", met, nbLabels, limits.labelLimit)
|
2021-05-06 08:56:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if limits.labelNameLengthLimit == 0 && limits.labelValueLengthLimit == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-03-09 22:26:24 +00:00
|
|
|
return lset.Validate(func(l labels.Label) error {
|
2021-05-06 08:56:21 +00:00
|
|
|
if limits.labelNameLengthLimit > 0 {
|
|
|
|
nameLength := len(l.Name)
|
2023-04-09 07:08:40 +00:00
|
|
|
if nameLength > limits.labelNameLengthLimit {
|
2022-03-09 13:27:50 +00:00
|
|
|
return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label name: %.50s, length: %d, limit: %d)", met, l.Name, nameLength, limits.labelNameLengthLimit)
|
2021-05-06 08:56:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if limits.labelValueLengthLimit > 0 {
|
|
|
|
valueLength := len(l.Value)
|
2023-04-09 07:08:40 +00:00
|
|
|
if valueLength > limits.labelValueLengthLimit {
|
2022-03-09 13:27:50 +00:00
|
|
|
return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label name: %.50s, value: %.50q, length: %d, limit: %d)", met, l.Name, l.Value, valueLength, limits.labelValueLengthLimit)
|
2021-05-06 08:56:21 +00:00
|
|
|
}
|
|
|
|
}
|
2022-03-09 22:26:24 +00:00
|
|
|
return nil
|
|
|
|
})
|
2021-05-06 08:56:21 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 11:26:36 +00:00
|
|
|
func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels {
|
2017-09-08 12:34:45 +00:00
|
|
|
lb := labels.NewBuilder(lset)
|
2017-01-30 16:30:28 +00:00
|
|
|
|
2018-04-12 14:54:53 +00:00
|
|
|
if honor {
|
2023-03-07 17:11:24 +00:00
|
|
|
target.LabelsRange(func(l labels.Label) {
|
2018-02-14 17:03:58 +00:00
|
|
|
if !lset.Has(l.Name) {
|
2017-09-08 12:34:45 +00:00
|
|
|
lb.Set(l.Name, l.Value)
|
|
|
|
}
|
2022-03-09 22:26:24 +00:00
|
|
|
})
|
2017-09-08 12:34:45 +00:00
|
|
|
} else {
|
2022-03-09 22:26:24 +00:00
|
|
|
var conflictingExposedLabels []labels.Label
|
2023-03-07 17:11:24 +00:00
|
|
|
target.LabelsRange(func(l labels.Label) {
|
2019-08-13 10:19:17 +00:00
|
|
|
existingValue := lset.Get(l.Name)
|
2019-11-20 15:50:05 +00:00
|
|
|
if existingValue != "" {
|
2021-10-15 18:31:03 +00:00
|
|
|
conflictingExposedLabels = append(conflictingExposedLabels, labels.Label{Name: l.Name, Value: existingValue})
|
2019-11-20 15:50:05 +00:00
|
|
|
}
|
2019-08-13 10:19:17 +00:00
|
|
|
// It is now safe to set the target label.
|
2017-09-08 12:34:45 +00:00
|
|
|
lb.Set(l.Name, l.Value)
|
2022-03-09 22:26:24 +00:00
|
|
|
})
|
2021-10-15 18:31:03 +00:00
|
|
|
|
|
|
|
if len(conflictingExposedLabels) > 0 {
|
2023-03-07 17:11:24 +00:00
|
|
|
resolveConflictingExposedLabels(lb, conflictingExposedLabels)
|
2021-10-15 18:31:03 +00:00
|
|
|
}
|
2017-01-30 16:30:28 +00:00
|
|
|
}
|
|
|
|
|
2023-03-22 15:46:02 +00:00
|
|
|
res := lb.Labels()
|
2017-09-08 12:34:45 +00:00
|
|
|
|
2018-04-12 14:54:53 +00:00
|
|
|
if len(rc) > 0 {
|
2022-03-09 22:26:24 +00:00
|
|
|
res, _ = relabel.Process(res, rc...)
|
2016-02-28 08:51:02 +00:00
|
|
|
}
|
|
|
|
|
2017-09-08 12:34:45 +00:00
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2023-03-07 17:11:24 +00:00
|
|
|
func resolveConflictingExposedLabels(lb *labels.Builder, conflictingExposedLabels []labels.Label) {
|
2023-09-21 20:53:51 +00:00
|
|
|
slices.SortStableFunc(conflictingExposedLabels, func(a, b labels.Label) int {
|
|
|
|
return len(a.Name) - len(b.Name)
|
2021-10-15 18:31:03 +00:00
|
|
|
})
|
|
|
|
|
2023-03-07 17:11:24 +00:00
|
|
|
for _, l := range conflictingExposedLabels {
|
2021-10-15 19:56:48 +00:00
|
|
|
newName := l.Name
|
|
|
|
for {
|
|
|
|
newName = model.ExportedLabelPrefix + newName
|
2023-03-07 17:11:24 +00:00
|
|
|
if lb.Get(newName) == "" {
|
|
|
|
lb.Set(newName, l.Value)
|
2021-10-15 19:56:48 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2021-10-15 18:31:03 +00:00
|
|
|
}
|
2022-03-09 22:26:24 +00:00
|
|
|
}
|
|
|
|
|
2018-04-12 14:54:53 +00:00
|
|
|
func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels {
|
2017-09-08 12:34:45 +00:00
|
|
|
lb := labels.NewBuilder(lset)
|
|
|
|
|
2023-03-07 17:11:24 +00:00
|
|
|
target.LabelsRange(func(l labels.Label) {
|
2019-08-13 10:19:17 +00:00
|
|
|
lb.Set(model.ExportedLabelPrefix+l.Name, lset.Get(l.Name))
|
2017-09-08 12:34:45 +00:00
|
|
|
lb.Set(l.Name, l.Value)
|
2022-03-09 22:26:24 +00:00
|
|
|
})
|
2017-09-08 12:34:45 +00:00
|
|
|
|
2023-03-22 15:46:02 +00:00
|
|
|
return lb.Labels()
|
2016-02-28 08:51:02 +00:00
|
|
|
}
|
|
|
|
|
2017-09-08 12:34:45 +00:00
|
|
|
// appender returns an appender for ingested samples from the target.
|
2024-01-17 15:58:54 +00:00
|
|
|
func appender(app storage.Appender, sampleLimit, bucketLimit int, maxSchema int32) storage.Appender {
|
2017-09-08 12:34:45 +00:00
|
|
|
app = &timeLimitAppender{
|
2016-12-30 20:35:35 +00:00
|
|
|
Appender: app,
|
2017-09-08 12:34:45 +00:00
|
|
|
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
|
|
|
}
|
|
|
|
|
2023-05-04 18:29:50 +00:00
|
|
|
// The sampleLimit is applied after metrics are potentially dropped via relabeling.
|
|
|
|
if sampleLimit > 0 {
|
2017-09-08 12:34:45 +00:00
|
|
|
app = &limitAppender{
|
|
|
|
Appender: app,
|
2023-05-04 18:29:50 +00:00
|
|
|
limit: sampleLimit,
|
2017-09-08 12:34:45 +00:00
|
|
|
}
|
2016-02-28 08:51:02 +00:00
|
|
|
}
|
2023-04-21 19:14:19 +00:00
|
|
|
|
|
|
|
if bucketLimit > 0 {
|
|
|
|
app = &bucketLimitAppender{
|
|
|
|
Appender: app,
|
|
|
|
limit: bucketLimit,
|
|
|
|
}
|
|
|
|
}
|
2024-01-17 15:58:54 +00:00
|
|
|
|
|
|
|
if maxSchema < nativeHistogramMaxSchema {
|
|
|
|
app = &maxSchemaAppender{
|
|
|
|
Appender: app,
|
|
|
|
maxSchema: maxSchema,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-08 12:34:45 +00:00
|
|
|
return app
|
2016-02-28 08:51:02 +00:00
|
|
|
}
|
|
|
|
|
2016-02-25 12:58:46 +00:00
|
|
|
// A scraper retrieves samples and accepts a status report at the end.
|
|
|
|
type scraper interface {
|
2023-10-09 16:23:53 +00:00
|
|
|
scrape(ctx context.Context) (*http.Response, error)
|
|
|
|
readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error)
|
2019-11-11 21:42:24 +00:00
|
|
|
Report(start time.Time, dur time.Duration, err error)
|
2023-05-25 09:49:43 +00:00
|
|
|
offset(interval time.Duration, offsetSeed uint64) time.Duration
|
2016-02-25 12:58:46 +00:00
|
|
|
}
|
|
|
|
|
2016-02-28 18:21:50 +00:00
|
|
|
// targetScraper implements the scraper interface for a target.
|
|
|
|
type targetScraper struct {
|
|
|
|
*Target
|
|
|
|
|
2017-04-04 17:26:28 +00:00
|
|
|
client *http.Client
|
2017-04-27 08:19:55 +00:00
|
|
|
req *http.Request
|
2017-04-04 17:26:28 +00:00
|
|
|
timeout time.Duration
|
2017-01-15 16:33:07 +00:00
|
|
|
|
2017-02-22 12:00:51 +00:00
|
|
|
gzipr *gzip.Reader
|
|
|
|
buf *bufio.Reader
|
2021-05-16 02:19:22 +00:00
|
|
|
|
2023-11-20 12:02:53 +00:00
|
|
|
bodySizeLimit int64
|
|
|
|
acceptHeader string
|
|
|
|
acceptEncodingHeader string
|
2023-09-22 16:47:44 +00:00
|
|
|
|
|
|
|
metrics *scrapeMetrics
|
2017-01-15 16:33:07 +00:00
|
|
|
}
|
|
|
|
|
2021-05-16 02:19:22 +00:00
|
|
|
var errBodySizeLimit = errors.New("body size limit exceeded")
|
|
|
|
|
2023-10-10 10:16:55 +00:00
|
|
|
// acceptHeader transforms preference from the options into specific header values as
|
|
|
|
// https://www.rfc-editor.org/rfc/rfc9110.html#name-accept defines.
|
|
|
|
// No validation is here, we expect scrape protocols to be validated already.
|
|
|
|
func acceptHeader(sps []config.ScrapeProtocol) string {
|
|
|
|
var vals []string
|
|
|
|
weight := len(config.ScrapeProtocolsHeaders) + 1
|
|
|
|
for _, sp := range sps {
|
|
|
|
vals = append(vals, fmt.Sprintf("%s;q=0.%d", config.ScrapeProtocolsHeaders[sp], weight))
|
|
|
|
weight--
|
|
|
|
}
|
|
|
|
// Default match anything.
|
2023-12-19 18:58:59 +00:00
|
|
|
vals = append(vals, fmt.Sprintf("*/*;q=0.%d", weight))
|
2023-10-10 10:16:55 +00:00
|
|
|
return strings.Join(vals, ",")
|
|
|
|
}
|
2017-01-15 16:33:07 +00:00
|
|
|
|
2023-11-20 12:02:53 +00:00
|
|
|
func acceptEncodingHeader(enableCompression bool) string {
|
|
|
|
if enableCompression {
|
|
|
|
return "gzip"
|
|
|
|
}
|
|
|
|
return "identity"
|
|
|
|
}
|
|
|
|
|
2021-09-13 18:10:14 +00:00
|
|
|
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
2017-02-28 13:59:33 +00:00
|
|
|
|
2023-10-09 16:23:53 +00:00
|
|
|
func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) {
|
2017-02-22 12:00:51 +00:00
|
|
|
if s.req == nil {
|
|
|
|
req, err := http.NewRequest("GET", s.URL().String(), nil)
|
|
|
|
if err != nil {
|
2023-10-09 16:23:53 +00:00
|
|
|
return nil, err
|
2017-02-22 12:00:51 +00:00
|
|
|
}
|
2022-10-12 07:48:25 +00:00
|
|
|
req.Header.Add("Accept", s.acceptHeader)
|
2023-11-20 12:02:53 +00:00
|
|
|
req.Header.Add("Accept-Encoding", s.acceptEncodingHeader)
|
2021-09-13 18:10:14 +00:00
|
|
|
req.Header.Set("User-Agent", UserAgent)
|
2021-06-18 07:38:12 +00:00
|
|
|
req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64))
|
2017-02-22 12:00:51 +00:00
|
|
|
|
|
|
|
s.req = req
|
2016-02-28 18:21:50 +00:00
|
|
|
}
|
2017-08-09 14:30:49 +00:00
|
|
|
|
2023-10-09 16:23:53 +00:00
|
|
|
return s.client.Do(s.req.WithContext(ctx))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w io.Writer) (string, error) {
|
2019-04-18 08:50:37 +00:00
|
|
|
defer func() {
|
2022-04-27 09:24:36 +00:00
|
|
|
io.Copy(io.Discard, resp.Body)
|
2019-04-18 08:50:37 +00:00
|
|
|
resp.Body.Close()
|
|
|
|
}()
|
2016-02-28 18:21:50 +00:00
|
|
|
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
2023-11-01 19:06:46 +00:00
|
|
|
return "", fmt.Errorf("server returned HTTP status %s", resp.Status)
|
2016-02-28 18:21:50 +00:00
|
|
|
}
|
|
|
|
|
2021-05-16 02:19:22 +00:00
|
|
|
if s.bodySizeLimit <= 0 {
|
|
|
|
s.bodySizeLimit = math.MaxInt64
|
|
|
|
}
|
2017-02-22 12:00:51 +00:00
|
|
|
if resp.Header.Get("Content-Encoding") != "gzip" {
|
2021-05-16 02:19:22 +00:00
|
|
|
n, err := io.Copy(w, io.LimitReader(resp.Body, s.bodySizeLimit))
|
2018-11-26 13:05:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2021-05-16 02:19:22 +00:00
|
|
|
if n >= s.bodySizeLimit {
|
2023-09-22 16:47:44 +00:00
|
|
|
s.metrics.targetScrapeExceededBodySizeLimit.Inc()
|
2021-05-16 02:19:22 +00:00
|
|
|
return "", errBodySizeLimit
|
|
|
|
}
|
2018-11-26 13:05:07 +00:00
|
|
|
return resp.Header.Get("Content-Type"), nil
|
2017-02-22 12:00:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if s.gzipr == nil {
|
|
|
|
s.buf = bufio.NewReader(resp.Body)
|
2023-10-09 16:23:53 +00:00
|
|
|
var err error
|
2017-02-22 12:00:51 +00:00
|
|
|
s.gzipr, err = gzip.NewReader(s.buf)
|
|
|
|
if err != nil {
|
2018-10-04 13:52:03 +00:00
|
|
|
return "", err
|
2017-02-22 12:00:51 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s.buf.Reset(resp.Body)
|
2023-10-09 16:23:53 +00:00
|
|
|
if err := s.gzipr.Reset(s.buf); err != nil {
|
2018-10-04 13:52:03 +00:00
|
|
|
return "", err
|
2018-08-17 15:24:35 +00:00
|
|
|
}
|
2017-02-22 12:00:51 +00:00
|
|
|
}
|
|
|
|
|
2021-05-16 02:19:22 +00:00
|
|
|
n, err := io.Copy(w, io.LimitReader(s.gzipr, s.bodySizeLimit))
|
2017-02-22 12:00:51 +00:00
|
|
|
s.gzipr.Close()
|
2018-10-04 13:52:03 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2021-05-16 02:19:22 +00:00
|
|
|
if n >= s.bodySizeLimit {
|
2023-09-22 16:47:44 +00:00
|
|
|
s.metrics.targetScrapeExceededBodySizeLimit.Inc()
|
2021-05-16 02:19:22 +00:00
|
|
|
return "", errBodySizeLimit
|
|
|
|
}
|
2018-10-04 13:52:03 +00:00
|
|
|
return resp.Header.Get("Content-Type"), nil
|
2016-02-28 18:21:50 +00:00
|
|
|
}
|
|
|
|
|
2016-02-28 08:51:02 +00:00
|
|
|
// A loop can run and be stopped again. It must not be reused after it was stopped.
|
2016-02-22 15:46:55 +00:00
|
|
|
type loop interface {
|
2021-08-31 15:37:32 +00:00
|
|
|
run(errc chan<- error)
|
2020-07-30 12:20:24 +00:00
|
|
|
setForcedError(err error)
|
2016-02-22 15:46:55 +00:00
|
|
|
stop()
|
2020-01-22 12:13:47 +00:00
|
|
|
getCache() *scrapeCache
|
2020-03-20 16:43:26 +00:00
|
|
|
disableEndOfRunStalenessMarkers()
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
|
|
|
|
2017-09-15 09:08:51 +00:00
|
|
|
type cacheEntry struct {
|
2021-11-06 10:10:04 +00:00
|
|
|
ref storage.SeriesRef
|
2017-05-26 06:44:24 +00:00
|
|
|
lastIter uint64
|
2017-09-15 09:08:51 +00:00
|
|
|
hash uint64
|
|
|
|
lset labels.Labels
|
2017-04-13 17:07:23 +00:00
|
|
|
}
|
|
|
|
|
2016-02-22 15:46:55 +00:00
|
|
|
type scrapeLoop struct {
|
2023-10-31 20:58:42 +00:00
|
|
|
scraper scraper
|
|
|
|
l log.Logger
|
|
|
|
cache *scrapeCache
|
|
|
|
lastScrapeSize int
|
|
|
|
buffers *pool.Pool
|
|
|
|
offsetSeed uint64
|
|
|
|
honorTimestamps bool
|
|
|
|
trackTimestampsStaleness bool
|
2023-11-20 12:02:53 +00:00
|
|
|
enableCompression bool
|
2023-10-31 20:58:42 +00:00
|
|
|
forcedErr error
|
|
|
|
forcedErrMtx sync.Mutex
|
|
|
|
sampleLimit int
|
|
|
|
bucketLimit int
|
2024-01-17 15:58:54 +00:00
|
|
|
maxSchema int32
|
2023-10-31 20:58:42 +00:00
|
|
|
labelLimits *labelLimits
|
|
|
|
interval time.Duration
|
|
|
|
timeout time.Duration
|
|
|
|
scrapeClassicHistograms bool
|
2023-12-11 08:43:42 +00:00
|
|
|
enableCTZeroIngestion bool
|
2017-05-26 06:44:24 +00:00
|
|
|
|
2020-07-24 14:10:51 +00:00
|
|
|
appender func(ctx context.Context) storage.Appender
|
2017-09-08 12:34:45 +00:00
|
|
|
sampleMutator labelsMutator
|
|
|
|
reportSampleMutator labelsMutator
|
2016-02-22 15:46:55 +00:00
|
|
|
|
2022-05-03 18:45:52 +00:00
|
|
|
parentCtx context.Context
|
|
|
|
appenderCtx context.Context
|
|
|
|
ctx context.Context
|
|
|
|
cancel func()
|
|
|
|
stopped chan struct{}
|
2020-03-20 16:43:26 +00:00
|
|
|
|
|
|
|
disabledEndOfRunStalenessMarkers bool
|
2021-08-24 12:31:14 +00:00
|
|
|
|
2022-08-31 13:50:05 +00:00
|
|
|
reportExtraMetrics bool
|
|
|
|
appendMetadataToWAL bool
|
2023-09-22 16:47:44 +00:00
|
|
|
|
|
|
|
metrics *scrapeMetrics
|
2023-12-11 08:43:42 +00:00
|
|
|
|
|
|
|
skipOffsetting bool // For testability.
|
2017-05-26 08:44:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// scrapeCache tracks mappings of exposed metric strings to label sets and
|
|
|
|
// storage references. Additionally, it tracks staleness of series between
|
|
|
|
// scrapes.
|
|
|
|
type scrapeCache struct {
|
|
|
|
iter uint64 // Current scrape iteration.
|
|
|
|
|
2019-03-28 17:52:46 +00:00
|
|
|
// How many series and metadata entries there were at the last success.
|
|
|
|
successfulCount int
|
|
|
|
|
2017-09-15 09:08:51 +00:00
|
|
|
// Parsed string to an entry with information about the actual label set
|
|
|
|
// and its storage reference.
|
2018-05-18 07:32:11 +00:00
|
|
|
series map[string]*cacheEntry
|
2017-05-24 14:23:48 +00:00
|
|
|
|
2017-09-08 12:34:45 +00:00
|
|
|
// Cache of dropped metric strings and their iteration. The iteration must
|
2023-01-04 12:05:42 +00:00
|
|
|
// be a pointer so we can update it.
|
2018-05-18 07:32:11 +00:00
|
|
|
droppedSeries map[string]*uint64
|
2017-09-08 12:34:45 +00:00
|
|
|
|
2017-05-24 14:23:48 +00:00
|
|
|
// seriesCur and seriesPrev store the labels of series that were seen
|
|
|
|
// in the current and previous scrape.
|
2017-05-24 15:05:42 +00:00
|
|
|
// We hold two maps and swap them out to save allocations.
|
2017-05-26 06:44:24 +00:00
|
|
|
seriesCur map[uint64]labels.Labels
|
|
|
|
seriesPrev map[uint64]labels.Labels
|
2018-05-18 07:32:11 +00:00
|
|
|
|
|
|
|
metaMtx sync.Mutex
|
|
|
|
metadata map[string]*metaEntry
|
2023-09-22 16:47:44 +00:00
|
|
|
|
|
|
|
metrics *scrapeMetrics
|
2018-05-18 07:32:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// metaEntry holds meta information about a metric.
|
|
|
|
type metaEntry struct {
|
2022-08-31 13:50:05 +00:00
|
|
|
metadata.Metadata
|
|
|
|
|
|
|
|
lastIter uint64 // Last scrape iteration the entry was observed at.
|
|
|
|
lastIterChange uint64 // Last scrape iteration the entry was changed at.
|
2017-05-26 08:44:48 +00:00
|
|
|
}
|
2017-01-15 16:33:07 +00:00
|
|
|
|
2020-01-29 11:13:18 +00:00
|
|
|
func (m *metaEntry) size() int {
|
|
|
|
// The attribute lastIter although part of the struct it is not metadata.
|
2022-08-31 13:50:05 +00:00
|
|
|
return len(m.Help) + len(m.Unit) + len(m.Type)
|
2020-01-29 11:13:18 +00:00
|
|
|
}
|
|
|
|
|
2023-09-22 16:47:44 +00:00
|
|
|
func newScrapeCache(metrics *scrapeMetrics) *scrapeCache {
|
2017-05-26 08:44:48 +00:00
|
|
|
return &scrapeCache{
|
2018-05-18 07:32:11 +00:00
|
|
|
series: map[string]*cacheEntry{},
|
|
|
|
droppedSeries: map[string]*uint64{},
|
|
|
|
seriesCur: map[uint64]labels.Labels{},
|
|
|
|
seriesPrev: map[uint64]labels.Labels{},
|
|
|
|
metadata: map[string]*metaEntry{},
|
2023-09-22 16:47:44 +00:00
|
|
|
metrics: metrics,
|
2017-05-26 08:44:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-28 17:52:46 +00:00
|
|
|
func (c *scrapeCache) iterDone(flushCache bool) {
|
|
|
|
c.metaMtx.Lock()
|
|
|
|
count := len(c.series) + len(c.droppedSeries) + len(c.metadata)
|
|
|
|
c.metaMtx.Unlock()
|
|
|
|
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 14:14:31 +00:00
|
|
|
switch {
|
|
|
|
case flushCache:
|
2019-03-28 17:52:46 +00:00
|
|
|
c.successfulCount = count
|
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 14:14:31 +00:00
|
|
|
case count > c.successfulCount*2+1000:
|
2019-03-28 17:52:46 +00:00
|
|
|
// If a target had varying labels in scrapes that ultimately failed,
|
|
|
|
// the caches would grow indefinitely. Force a flush when this happens.
|
|
|
|
// We use the heuristic that this is a doubling of the cache size
|
|
|
|
// since the last scrape, and allow an additional 1000 in case
|
|
|
|
// initial scrapes all fail.
|
|
|
|
flushCache = true
|
2023-09-22 16:47:44 +00:00
|
|
|
c.metrics.targetScrapeCacheFlushForced.Inc()
|
2019-03-28 17:52:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if flushCache {
|
2019-03-28 17:07:14 +00:00
|
|
|
// All caches may grow over time through series churn
|
|
|
|
// or multiple string representations of the same metric. Clean up entries
|
|
|
|
// that haven't appeared in the last scrape.
|
|
|
|
for s, e := range c.series {
|
|
|
|
if c.iter != e.lastIter {
|
|
|
|
delete(c.series, s)
|
|
|
|
}
|
2017-05-26 08:44:48 +00:00
|
|
|
}
|
2019-03-28 17:07:14 +00:00
|
|
|
for s, iter := range c.droppedSeries {
|
|
|
|
if c.iter != *iter {
|
|
|
|
delete(c.droppedSeries, s)
|
|
|
|
}
|
2017-09-08 12:34:45 +00:00
|
|
|
}
|
2019-03-28 17:07:14 +00:00
|
|
|
c.metaMtx.Lock()
|
|
|
|
for m, e := range c.metadata {
|
|
|
|
// Keep metadata around for 10 scrapes after its metric disappeared.
|
|
|
|
if c.iter-e.lastIter > 10 {
|
|
|
|
delete(c.metadata, m)
|
|
|
|
}
|
2018-05-18 07:32:11 +00:00
|
|
|
}
|
2019-03-28 17:07:14 +00:00
|
|
|
c.metaMtx.Unlock()
|
|
|
|
|
|
|
|
c.iter++
|
2018-05-18 07:32:11 +00:00
|
|
|
}
|
2017-05-26 08:44:48 +00:00
|
|
|
|
|
|
|
// Swap current and previous series.
|
|
|
|
c.seriesPrev, c.seriesCur = c.seriesCur, c.seriesPrev
|
|
|
|
|
|
|
|
// We have to delete every single key in the map.
|
|
|
|
for k := range c.seriesCur {
|
|
|
|
delete(c.seriesCur, k)
|
|
|
|
}
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
|
|
|
|
2022-12-20 16:54:07 +00:00
|
|
|
func (c *scrapeCache) get(met []byte) (*cacheEntry, bool) {
|
|
|
|
e, ok := c.series[string(met)]
|
2017-05-26 08:44:48 +00:00
|
|
|
if !ok {
|
2017-09-15 09:08:51 +00:00
|
|
|
return nil, false
|
2017-05-26 08:44:48 +00:00
|
|
|
}
|
|
|
|
e.lastIter = c.iter
|
2017-09-15 09:08:51 +00:00
|
|
|
return e, true
|
2017-05-26 08:44:48 +00:00
|
|
|
}
|
|
|
|
|
2022-12-20 16:54:07 +00:00
|
|
|
func (c *scrapeCache) addRef(met []byte, ref storage.SeriesRef, lset labels.Labels, hash uint64) {
|
2017-09-07 12:14:41 +00:00
|
|
|
if ref == 0 {
|
2017-06-26 06:56:40 +00:00
|
|
|
return
|
|
|
|
}
|
2022-12-20 16:54:07 +00:00
|
|
|
c.series[string(met)] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash}
|
2017-05-26 08:44:48 +00:00
|
|
|
}
|
|
|
|
|
2023-01-04 12:05:42 +00:00
|
|
|
func (c *scrapeCache) addDropped(met []byte) {
|
2017-09-08 12:34:45 +00:00
|
|
|
iter := c.iter
|
2023-01-04 12:05:42 +00:00
|
|
|
c.droppedSeries[string(met)] = &iter
|
2017-09-08 12:34:45 +00:00
|
|
|
}
|
|
|
|
|
2022-12-20 16:54:07 +00:00
|
|
|
func (c *scrapeCache) getDropped(met []byte) bool {
|
|
|
|
iterp, ok := c.droppedSeries[string(met)]
|
2017-09-08 12:34:45 +00:00
|
|
|
if ok {
|
|
|
|
*iterp = c.iter
|
|
|
|
}
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2017-06-26 06:56:40 +00:00
|
|
|
func (c *scrapeCache) trackStaleness(hash uint64, lset labels.Labels) {
|
|
|
|
c.seriesCur[hash] = lset
|
2017-05-26 08:44:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) {
|
|
|
|
for h, lset := range c.seriesPrev {
|
|
|
|
if _, ok := c.seriesCur[h]; !ok {
|
|
|
|
if !f(lset) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-12 12:14:36 +00:00
|
|
|
func (c *scrapeCache) setType(metric []byte, t model.MetricType) {
|
2018-05-18 07:32:11 +00:00
|
|
|
c.metaMtx.Lock()
|
|
|
|
|
2022-12-20 16:54:07 +00:00
|
|
|
e, ok := c.metadata[string(metric)]
|
2018-05-18 07:32:11 +00:00
|
|
|
if !ok {
|
2023-11-22 14:39:21 +00:00
|
|
|
e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}}
|
2018-05-18 07:32:11 +00:00
|
|
|
c.metadata[string(metric)] = e
|
|
|
|
}
|
2022-08-31 13:50:05 +00:00
|
|
|
if e.Type != t {
|
|
|
|
e.Type = t
|
|
|
|
e.lastIterChange = c.iter
|
|
|
|
}
|
2018-05-18 07:32:11 +00:00
|
|
|
e.lastIter = c.iter
|
|
|
|
|
|
|
|
c.metaMtx.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *scrapeCache) setHelp(metric, help []byte) {
|
|
|
|
c.metaMtx.Lock()
|
|
|
|
|
2022-12-20 16:54:07 +00:00
|
|
|
e, ok := c.metadata[string(metric)]
|
2018-05-18 07:32:11 +00:00
|
|
|
if !ok {
|
2023-11-22 14:39:21 +00:00
|
|
|
e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}}
|
2018-05-18 07:32:11 +00:00
|
|
|
c.metadata[string(metric)] = e
|
|
|
|
}
|
2022-12-20 16:54:07 +00:00
|
|
|
if e.Help != string(help) {
|
2022-08-31 13:50:05 +00:00
|
|
|
e.Help = string(help)
|
|
|
|
e.lastIterChange = c.iter
|
2018-05-18 07:32:11 +00:00
|
|
|
}
|
|
|
|
e.lastIter = c.iter
|
|
|
|
|
|
|
|
c.metaMtx.Unlock()
|
|
|
|
}
|
|
|
|
|
2018-10-05 16:11:16 +00:00
|
|
|
func (c *scrapeCache) setUnit(metric, unit []byte) {
|
|
|
|
c.metaMtx.Lock()
|
|
|
|
|
2022-12-20 16:54:07 +00:00
|
|
|
e, ok := c.metadata[string(metric)]
|
2018-10-05 16:11:16 +00:00
|
|
|
if !ok {
|
2023-11-22 14:39:21 +00:00
|
|
|
e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}}
|
2018-10-05 16:11:16 +00:00
|
|
|
c.metadata[string(metric)] = e
|
|
|
|
}
|
2022-12-20 16:54:07 +00:00
|
|
|
if e.Unit != string(unit) {
|
2022-08-31 13:50:05 +00:00
|
|
|
e.Unit = string(unit)
|
|
|
|
e.lastIterChange = c.iter
|
2018-10-05 16:11:16 +00:00
|
|
|
}
|
|
|
|
e.lastIter = c.iter
|
|
|
|
|
|
|
|
c.metaMtx.Unlock()
|
|
|
|
}
|
|
|
|
|
2019-12-04 15:18:27 +00:00
|
|
|
func (c *scrapeCache) GetMetadata(metric string) (MetricMetadata, bool) {
|
2018-05-18 07:32:11 +00:00
|
|
|
c.metaMtx.Lock()
|
|
|
|
defer c.metaMtx.Unlock()
|
|
|
|
|
|
|
|
m, ok := c.metadata[metric]
|
|
|
|
if !ok {
|
|
|
|
return MetricMetadata{}, false
|
|
|
|
}
|
|
|
|
return MetricMetadata{
|
|
|
|
Metric: metric,
|
2022-08-31 13:50:05 +00:00
|
|
|
Type: m.Type,
|
|
|
|
Help: m.Help,
|
|
|
|
Unit: m.Unit,
|
2018-05-18 07:32:11 +00:00
|
|
|
}, true
|
|
|
|
}
|
|
|
|
|
2019-12-04 15:18:27 +00:00
|
|
|
func (c *scrapeCache) ListMetadata() []MetricMetadata {
|
2018-05-18 07:32:11 +00:00
|
|
|
c.metaMtx.Lock()
|
|
|
|
defer c.metaMtx.Unlock()
|
|
|
|
|
2018-06-05 10:30:19 +00:00
|
|
|
res := make([]MetricMetadata, 0, len(c.metadata))
|
|
|
|
|
2018-05-18 07:32:11 +00:00
|
|
|
for m, e := range c.metadata {
|
|
|
|
res = append(res, MetricMetadata{
|
|
|
|
Metric: m,
|
2022-08-31 13:50:05 +00:00
|
|
|
Type: e.Type,
|
|
|
|
Help: e.Help,
|
|
|
|
Unit: e.Unit,
|
2018-05-18 07:32:11 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2020-01-29 11:13:18 +00:00
|
|
|
// MetadataSize returns the size of the metadata cache.
|
|
|
|
func (c *scrapeCache) SizeMetadata() (s int) {
|
|
|
|
c.metaMtx.Lock()
|
|
|
|
defer c.metaMtx.Unlock()
|
|
|
|
for _, e := range c.metadata {
|
|
|
|
s += e.size()
|
|
|
|
}
|
|
|
|
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
// MetadataLen returns the number of metadata entries in the cache.
|
|
|
|
func (c *scrapeCache) LengthMetadata() int {
|
|
|
|
c.metaMtx.Lock()
|
|
|
|
defer c.metaMtx.Unlock()
|
|
|
|
|
|
|
|
return len(c.metadata)
|
|
|
|
}
|
|
|
|
|
2017-11-26 15:15:15 +00:00
|
|
|
func newScrapeLoop(ctx context.Context,
|
2017-05-26 08:44:48 +00:00
|
|
|
sc scraper,
|
|
|
|
l log.Logger,
|
2018-02-13 20:44:51 +00:00
|
|
|
buffers *pool.Pool,
|
2017-09-08 12:34:45 +00:00
|
|
|
sampleMutator labelsMutator,
|
|
|
|
reportSampleMutator labelsMutator,
|
2020-07-24 14:10:51 +00:00
|
|
|
appender func(ctx context.Context) storage.Appender,
|
2018-05-18 07:32:11 +00:00
|
|
|
cache *scrapeCache,
|
2023-05-25 09:49:43 +00:00
|
|
|
offsetSeed uint64,
|
2019-03-15 10:04:15 +00:00
|
|
|
honorTimestamps bool,
|
2023-10-31 20:58:42 +00:00
|
|
|
trackTimestampsStaleness bool,
|
2023-11-20 12:02:53 +00:00
|
|
|
enableCompression bool,
|
2021-09-03 13:37:42 +00:00
|
|
|
sampleLimit int,
|
2023-04-21 19:14:19 +00:00
|
|
|
bucketLimit int,
|
2024-01-17 15:58:54 +00:00
|
|
|
maxSchema int32,
|
2021-05-06 08:56:21 +00:00
|
|
|
labelLimits *labelLimits,
|
2021-08-31 15:37:32 +00:00
|
|
|
interval time.Duration,
|
|
|
|
timeout time.Duration,
|
2023-05-10 23:59:21 +00:00
|
|
|
scrapeClassicHistograms bool,
|
2023-12-11 08:43:42 +00:00
|
|
|
enableCTZeroIngestion bool,
|
2021-10-24 21:45:31 +00:00
|
|
|
reportExtraMetrics bool,
|
2022-08-31 13:50:05 +00:00
|
|
|
appendMetadataToWAL bool,
|
2022-05-03 18:45:52 +00:00
|
|
|
target *Target,
|
|
|
|
passMetadataInContext bool,
|
2023-09-22 16:47:44 +00:00
|
|
|
metrics *scrapeMetrics,
|
2023-12-11 08:43:42 +00:00
|
|
|
skipOffsetting bool,
|
2017-05-26 08:44:48 +00:00
|
|
|
) *scrapeLoop {
|
2017-05-16 13:04:37 +00:00
|
|
|
if l == nil {
|
2017-08-11 18:45:52 +00:00
|
|
|
l = log.NewNopLogger()
|
2017-05-16 13:04:37 +00:00
|
|
|
}
|
2017-09-07 12:43:21 +00:00
|
|
|
if buffers == nil {
|
2018-02-13 20:44:51 +00:00
|
|
|
buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
|
2017-09-07 12:43:21 +00:00
|
|
|
}
|
2018-05-18 07:32:11 +00:00
|
|
|
if cache == nil {
|
2023-09-22 16:47:44 +00:00
|
|
|
cache = newScrapeCache(metrics)
|
2018-05-18 07:32:11 +00:00
|
|
|
}
|
2022-05-03 18:45:52 +00:00
|
|
|
|
|
|
|
appenderCtx := ctx
|
|
|
|
|
|
|
|
if passMetadataInContext {
|
|
|
|
// Store the cache and target in the context. This is then used by downstream OTel Collector
|
|
|
|
// to lookup the metadata required to process the samples. Not used by Prometheus itself.
|
|
|
|
// TODO(gouthamve) We're using a dedicated context because using the parentCtx caused a memory
|
|
|
|
// leak. We should ideally fix the main leak. See: https://github.com/prometheus/prometheus/pull/10590
|
|
|
|
appenderCtx = ContextWithMetricMetadataStore(appenderCtx, cache)
|
|
|
|
appenderCtx = ContextWithTarget(appenderCtx, target)
|
|
|
|
}
|
|
|
|
|
2016-02-22 15:46:55 +00:00
|
|
|
sl := &scrapeLoop{
|
2023-10-31 20:58:42 +00:00
|
|
|
scraper: sc,
|
|
|
|
buffers: buffers,
|
|
|
|
cache: cache,
|
|
|
|
appender: appender,
|
|
|
|
sampleMutator: sampleMutator,
|
|
|
|
reportSampleMutator: reportSampleMutator,
|
|
|
|
stopped: make(chan struct{}),
|
|
|
|
offsetSeed: offsetSeed,
|
|
|
|
l: l,
|
|
|
|
parentCtx: ctx,
|
|
|
|
appenderCtx: appenderCtx,
|
|
|
|
honorTimestamps: honorTimestamps,
|
|
|
|
trackTimestampsStaleness: trackTimestampsStaleness,
|
2023-11-20 12:02:53 +00:00
|
|
|
enableCompression: enableCompression,
|
2023-10-31 20:58:42 +00:00
|
|
|
sampleLimit: sampleLimit,
|
|
|
|
bucketLimit: bucketLimit,
|
2024-01-17 15:58:54 +00:00
|
|
|
maxSchema: maxSchema,
|
2023-10-31 20:58:42 +00:00
|
|
|
labelLimits: labelLimits,
|
|
|
|
interval: interval,
|
|
|
|
timeout: timeout,
|
|
|
|
scrapeClassicHistograms: scrapeClassicHistograms,
|
2023-12-11 08:43:42 +00:00
|
|
|
enableCTZeroIngestion: enableCTZeroIngestion,
|
2023-10-31 20:58:42 +00:00
|
|
|
reportExtraMetrics: reportExtraMetrics,
|
|
|
|
appendMetadataToWAL: appendMetadataToWAL,
|
|
|
|
metrics: metrics,
|
2023-12-11 08:43:42 +00:00
|
|
|
skipOffsetting: skipOffsetting,
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
2019-08-28 13:55:09 +00:00
|
|
|
sl.ctx, sl.cancel = context.WithCancel(ctx)
|
2016-02-22 15:46:55 +00:00
|
|
|
|
|
|
|
return sl
|
|
|
|
}
|
|
|
|
|
2021-08-31 15:37:32 +00:00
|
|
|
func (sl *scrapeLoop) run(errc chan<- error) {
|
2023-12-11 08:43:42 +00:00
|
|
|
if !sl.skipOffsetting {
|
|
|
|
select {
|
|
|
|
case <-time.After(sl.scraper.offset(sl.interval, sl.offsetSeed)):
|
|
|
|
// Continue after a scraping offset.
|
|
|
|
case <-sl.ctx.Done():
|
|
|
|
close(sl.stopped)
|
|
|
|
return
|
|
|
|
}
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var last time.Time
|
|
|
|
|
2021-03-15 13:05:17 +00:00
|
|
|
alignedScrapeTime := time.Now().Round(0)
|
2021-08-31 15:37:32 +00:00
|
|
|
ticker := time.NewTicker(sl.interval)
|
2016-02-22 15:46:55 +00:00
|
|
|
defer ticker.Stop()
|
|
|
|
|
2017-05-10 15:59:02 +00:00
|
|
|
mainLoop:
|
2016-02-22 15:46:55 +00:00
|
|
|
for {
|
|
|
|
select {
|
2019-08-28 13:55:09 +00:00
|
|
|
case <-sl.parentCtx.Done():
|
2017-05-10 15:59:02 +00:00
|
|
|
close(sl.stopped)
|
2016-02-22 15:46:55 +00:00
|
|
|
return
|
2019-08-28 13:55:09 +00:00
|
|
|
case <-sl.ctx.Done():
|
2017-05-10 15:59:02 +00:00
|
|
|
break mainLoop
|
2016-02-22 15:46:55 +00:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2020-10-05 16:17:50 +00:00
|
|
|
// Temporary workaround for a jitter in go timers that causes disk space
|
|
|
|
// increase in TSDB.
|
|
|
|
// See https://github.com/prometheus/prometheus/issues/7846
|
2021-03-15 13:05:17 +00:00
|
|
|
// Calling Round ensures the time used is the wall clock, as otherwise .Sub
|
|
|
|
// and .Add on time.Time behave differently (see time package docs).
|
|
|
|
scrapeTime := time.Now().Round(0)
|
2021-09-08 11:57:33 +00:00
|
|
|
if AlignScrapeTimestamps && sl.interval > 100*ScrapeTimestampTolerance {
|
2020-10-06 11:48:24 +00:00
|
|
|
// For some reason, a tick might have been skipped, in which case we
|
2020-10-05 16:17:50 +00:00
|
|
|
// would call alignedScrapeTime.Add(interval) multiple times.
|
2021-08-31 15:37:32 +00:00
|
|
|
for scrapeTime.Sub(alignedScrapeTime) >= sl.interval {
|
|
|
|
alignedScrapeTime = alignedScrapeTime.Add(sl.interval)
|
2020-10-05 16:17:50 +00:00
|
|
|
}
|
|
|
|
// Align the scrape time if we are in the tolerance boundaries.
|
2021-09-08 11:57:33 +00:00
|
|
|
if scrapeTime.Sub(alignedScrapeTime) <= ScrapeTimestampTolerance {
|
2020-10-05 16:17:50 +00:00
|
|
|
scrapeTime = alignedScrapeTime
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-24 14:38:21 +00:00
|
|
|
last = sl.scrapeAndReport(last, scrapeTime, errc)
|
2016-02-22 17:49:26 +00:00
|
|
|
|
2020-07-16 11:53:39 +00:00
|
|
|
select {
|
|
|
|
case <-sl.parentCtx.Done():
|
|
|
|
close(sl.stopped)
|
|
|
|
return
|
|
|
|
case <-sl.ctx.Done():
|
|
|
|
break mainLoop
|
|
|
|
case <-ticker.C:
|
2016-12-29 08:27:30 +00:00
|
|
|
}
|
2020-07-16 11:53:39 +00:00
|
|
|
}
|
2018-02-20 11:32:23 +00:00
|
|
|
|
2020-07-16 11:53:39 +00:00
|
|
|
close(sl.stopped)
|
2016-02-22 15:46:55 +00:00
|
|
|
|
2020-07-16 11:53:39 +00:00
|
|
|
if !sl.disabledEndOfRunStalenessMarkers {
|
2021-08-31 15:37:32 +00:00
|
|
|
sl.endOfRunStaleness(last, ticker, sl.interval)
|
2020-07-16 11:53:39 +00:00
|
|
|
}
|
|
|
|
}
|
2017-09-07 12:43:21 +00:00
|
|
|
|
2020-07-16 11:53:39 +00:00
|
|
|
// scrapeAndReport performs a scrape and then appends the result to the storage
|
|
|
|
// together with reporting metrics, by using as few appenders as possible.
|
|
|
|
// In the happy scenario, a single appender is used.
|
2022-05-03 18:45:52 +00:00
|
|
|
// This function uses sl.appenderCtx instead of sl.ctx on purpose. A scrape should
|
2020-08-07 13:58:16 +00:00
|
|
|
// only be cancelled on shutdown, not on reloads.
|
2021-10-24 14:38:21 +00:00
|
|
|
func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- error) time.Time {
|
2020-07-30 12:20:24 +00:00
|
|
|
start := time.Now()
|
2017-06-15 02:08:03 +00:00
|
|
|
|
2020-07-16 11:53:39 +00:00
|
|
|
// Only record after the first scrape.
|
|
|
|
if !last.IsZero() {
|
2023-09-22 16:47:44 +00:00
|
|
|
sl.metrics.targetIntervalLength.WithLabelValues(sl.interval.String()).Observe(
|
2020-07-16 11:53:39 +00:00
|
|
|
time.Since(last).Seconds(),
|
|
|
|
)
|
|
|
|
}
|
2016-02-22 15:46:55 +00:00
|
|
|
|
2023-10-09 16:23:53 +00:00
|
|
|
var total, added, seriesAdded, bytesRead int
|
2020-07-30 12:20:24 +00:00
|
|
|
var err, appErr, scrapeErr error
|
2020-07-31 17:11:08 +00:00
|
|
|
|
2022-05-03 18:45:52 +00:00
|
|
|
app := sl.appender(sl.appenderCtx)
|
2020-07-16 11:53:39 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
app.Rollback()
|
2016-02-22 15:46:55 +00:00
|
|
|
return
|
2020-07-16 11:53:39 +00:00
|
|
|
}
|
|
|
|
err = app.Commit()
|
|
|
|
if err != nil {
|
|
|
|
level.Error(sl.l).Log("msg", "Scrape commit failed", "err", err)
|
|
|
|
}
|
|
|
|
}()
|
2020-07-31 17:11:08 +00:00
|
|
|
|
|
|
|
defer func() {
|
2023-10-09 16:23:53 +00:00
|
|
|
if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytesRead, scrapeErr); err != nil {
|
2020-07-31 17:11:08 +00:00
|
|
|
level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-07-30 12:20:24 +00:00
|
|
|
if forcedErr := sl.getForcedError(); forcedErr != nil {
|
2020-07-31 17:11:08 +00:00
|
|
|
scrapeErr = forcedErr
|
2020-07-30 12:20:24 +00:00
|
|
|
// Add stale markers.
|
2020-10-05 16:17:50 +00:00
|
|
|
if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil {
|
2020-07-16 11:53:39 +00:00
|
|
|
app.Rollback()
|
2022-05-03 18:45:52 +00:00
|
|
|
app = sl.appender(sl.appenderCtx)
|
2020-07-16 11:53:39 +00:00
|
|
|
level.Warn(sl.l).Log("msg", "Append failed", "err", err)
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
2020-07-30 12:20:24 +00:00
|
|
|
if errc != nil {
|
|
|
|
errc <- forcedErr
|
|
|
|
}
|
2020-07-31 17:11:08 +00:00
|
|
|
|
|
|
|
return start
|
|
|
|
}
|
|
|
|
|
|
|
|
var contentType string
|
2023-10-09 16:23:53 +00:00
|
|
|
var resp *http.Response
|
|
|
|
var b []byte
|
|
|
|
var buf *bytes.Buffer
|
2021-10-24 14:38:21 +00:00
|
|
|
scrapeCtx, cancel := context.WithTimeout(sl.parentCtx, sl.timeout)
|
2023-10-09 16:23:53 +00:00
|
|
|
resp, scrapeErr = sl.scraper.scrape(scrapeCtx)
|
|
|
|
if scrapeErr == nil {
|
|
|
|
b = sl.buffers.Get(sl.lastScrapeSize).([]byte)
|
|
|
|
defer sl.buffers.Put(b)
|
|
|
|
buf = bytes.NewBuffer(b)
|
|
|
|
contentType, scrapeErr = sl.scraper.readResponse(scrapeCtx, resp, buf)
|
|
|
|
}
|
2020-07-31 17:11:08 +00:00
|
|
|
cancel()
|
|
|
|
|
|
|
|
if scrapeErr == nil {
|
|
|
|
b = buf.Bytes()
|
|
|
|
// NOTE: There were issues with misbehaving clients in the past
|
|
|
|
// that occasionally returned empty results. We don't want those
|
|
|
|
// to falsely reset our buffer size.
|
|
|
|
if len(b) > 0 {
|
|
|
|
sl.lastScrapeSize = len(b)
|
|
|
|
}
|
2023-10-09 16:23:53 +00:00
|
|
|
bytesRead = len(b)
|
2020-07-30 12:20:24 +00:00
|
|
|
} else {
|
2020-08-01 08:56:21 +00:00
|
|
|
level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr)
|
2020-07-31 17:11:08 +00:00
|
|
|
if errc != nil {
|
|
|
|
errc <- scrapeErr
|
2020-07-30 12:20:24 +00:00
|
|
|
}
|
2021-10-24 21:45:31 +00:00
|
|
|
if errors.Is(scrapeErr, errBodySizeLimit) {
|
2023-10-09 16:23:53 +00:00
|
|
|
bytesRead = -1
|
2021-10-24 21:45:31 +00:00
|
|
|
}
|
2020-07-31 17:11:08 +00:00
|
|
|
}
|
2020-07-30 12:20:24 +00:00
|
|
|
|
2020-07-31 17:11:08 +00:00
|
|
|
// A failed scrape is the same as an empty scrape,
|
|
|
|
// we still call sl.append to trigger stale markers.
|
2020-10-05 16:17:50 +00:00
|
|
|
total, added, seriesAdded, appErr = sl.append(app, b, contentType, appendTime)
|
2020-07-31 17:11:08 +00:00
|
|
|
if appErr != nil {
|
|
|
|
app.Rollback()
|
2022-05-03 18:45:52 +00:00
|
|
|
app = sl.appender(sl.appenderCtx)
|
2020-07-31 17:11:08 +00:00
|
|
|
level.Debug(sl.l).Log("msg", "Append failed", "err", appErr)
|
|
|
|
// The append failed, probably due to a parse error or sample limit.
|
|
|
|
// Call sl.append again with an empty scrape to trigger stale markers.
|
2020-10-05 16:17:50 +00:00
|
|
|
if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil {
|
2020-07-30 12:20:24 +00:00
|
|
|
app.Rollback()
|
2022-05-03 18:45:52 +00:00
|
|
|
app = sl.appender(sl.appenderCtx)
|
2020-07-31 17:11:08 +00:00
|
|
|
level.Warn(sl.l).Log("msg", "Append failed", "err", err)
|
2020-07-30 12:20:24 +00:00
|
|
|
}
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
2017-05-10 15:59:02 +00:00
|
|
|
|
2020-07-16 11:53:39 +00:00
|
|
|
if scrapeErr == nil {
|
|
|
|
scrapeErr = appErr
|
|
|
|
}
|
|
|
|
|
|
|
|
return start
|
2017-05-16 12:12:21 +00:00
|
|
|
}
|
|
|
|
|
2020-07-30 12:20:24 +00:00
|
|
|
func (sl *scrapeLoop) setForcedError(err error) {
|
|
|
|
sl.forcedErrMtx.Lock()
|
|
|
|
defer sl.forcedErrMtx.Unlock()
|
|
|
|
sl.forcedErr = err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sl *scrapeLoop) getForcedError() error {
|
|
|
|
sl.forcedErrMtx.Lock()
|
|
|
|
defer sl.forcedErrMtx.Unlock()
|
|
|
|
return sl.forcedErr
|
|
|
|
}
|
|
|
|
|
2017-05-16 12:12:21 +00:00
|
|
|
func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, interval time.Duration) {
|
2017-05-10 15:59:02 +00:00
|
|
|
// Scraping has stopped. We want to write stale markers but
|
|
|
|
// the target may be recreated, so we wait just over 2 scrape intervals
|
|
|
|
// before creating them.
|
2018-11-27 16:44:29 +00:00
|
|
|
// If the context is canceled, we presume the server is shutting down
|
2017-05-10 15:59:02 +00:00
|
|
|
// and will restart where is was. We do not attempt to write stale markers
|
|
|
|
// in this case.
|
|
|
|
|
|
|
|
if last.IsZero() {
|
|
|
|
// There never was a scrape, so there will be no stale markers.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for when the next scrape would have been, record its timestamp.
|
|
|
|
var staleTime time.Time
|
|
|
|
select {
|
2019-08-28 13:55:09 +00:00
|
|
|
case <-sl.parentCtx.Done():
|
2017-05-10 15:59:02 +00:00
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
staleTime = time.Now()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for when the next scrape would have been, if the target was recreated
|
|
|
|
// samples should have been ingested by now.
|
|
|
|
select {
|
2019-08-28 13:55:09 +00:00
|
|
|
case <-sl.parentCtx.Done():
|
2017-05-10 15:59:02 +00:00
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for an extra 10% of the interval, just to be safe.
|
|
|
|
select {
|
2019-08-28 13:55:09 +00:00
|
|
|
case <-sl.parentCtx.Done():
|
2017-05-10 15:59:02 +00:00
|
|
|
return
|
|
|
|
case <-time.After(interval / 10):
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call sl.append again with an empty scrape to trigger stale markers.
|
|
|
|
// If the target has since been recreated and scraped, the
|
|
|
|
// stale markers will be out of order and ignored.
|
2022-05-03 18:45:52 +00:00
|
|
|
// sl.context would have been cancelled, hence using sl.appenderCtx.
|
|
|
|
app := sl.appender(sl.appenderCtx)
|
2020-07-16 11:53:39 +00:00
|
|
|
var err error
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
app.Rollback()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = app.Commit()
|
|
|
|
if err != nil {
|
|
|
|
level.Warn(sl.l).Log("msg", "Stale commit failed", "err", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil {
|
|
|
|
app.Rollback()
|
2022-05-03 18:45:52 +00:00
|
|
|
app = sl.appender(sl.appenderCtx)
|
2020-07-16 11:53:39 +00:00
|
|
|
level.Warn(sl.l).Log("msg", "Stale append failed", "err", err)
|
2017-05-10 15:59:02 +00:00
|
|
|
}
|
2020-07-16 11:53:39 +00:00
|
|
|
if err = sl.reportStale(app, staleTime); err != nil {
|
|
|
|
level.Warn(sl.l).Log("msg", "Stale report failed", "err", err)
|
2017-05-11 13:43:43 +00:00
|
|
|
}
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
|
|
|
|
2017-05-10 15:59:02 +00:00
|
|
|
// Stop the scraping. May still write data and stale markers after it has
|
|
|
|
// returned. Cancel the context to stop all writes.
|
2016-02-22 15:46:55 +00:00
|
|
|
func (sl *scrapeLoop) stop() {
|
|
|
|
sl.cancel()
|
2017-05-10 15:59:02 +00:00
|
|
|
<-sl.stopped
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
|
|
|
|
2020-03-20 16:43:26 +00:00
|
|
|
func (sl *scrapeLoop) disableEndOfRunStalenessMarkers() {
|
|
|
|
sl.disabledEndOfRunStalenessMarkers = true
|
|
|
|
}
|
|
|
|
|
2020-01-22 12:13:47 +00:00
|
|
|
func (sl *scrapeLoop) getCache() *scrapeCache {
|
|
|
|
return sl.cache
|
|
|
|
}
|
|
|
|
|
2020-03-26 02:31:48 +00:00
|
|
|
type appendErrors struct {
|
2023-04-21 19:14:19 +00:00
|
|
|
numOutOfOrder int
|
|
|
|
numDuplicates int
|
|
|
|
numOutOfBounds int
|
|
|
|
numExemplarOutOfOrder int
|
2020-03-26 02:31:48 +00:00
|
|
|
}
|
|
|
|
|
2020-07-16 11:53:39 +00:00
|
|
|
func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
|
2023-05-10 23:59:21 +00:00
|
|
|
p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms)
|
2022-02-08 10:01:37 +00:00
|
|
|
if err != nil {
|
2022-02-08 09:57:56 +00:00
|
|
|
level.Debug(sl.l).Log(
|
2022-02-08 10:01:37 +00:00
|
|
|
"msg", "Invalid content type on scrape, using prometheus parser as fallback.",
|
|
|
|
"content_type", contentType,
|
|
|
|
"err", err,
|
2022-02-08 09:57:56 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2016-04-25 14:43:52 +00:00
|
|
|
var (
|
2022-08-31 13:50:05 +00:00
|
|
|
defTime = timestamp.FromTime(ts)
|
|
|
|
appErrs = appendErrors{}
|
|
|
|
sampleLimitErr error
|
2023-04-21 19:14:19 +00:00
|
|
|
bucketLimitErr error
|
2023-10-05 11:04:59 +00:00
|
|
|
lset labels.Labels // escapes to heap so hoisted out of loop
|
2022-08-31 13:50:05 +00:00
|
|
|
e exemplar.Exemplar // escapes to heap so hoisted out of loop
|
|
|
|
meta metadata.Metadata
|
|
|
|
metadataChanged bool
|
2016-04-25 14:43:52 +00:00
|
|
|
)
|
2016-02-22 15:46:55 +00:00
|
|
|
|
2023-11-16 14:07:37 +00:00
|
|
|
exemplars := make([]exemplar.Exemplar, 1)
|
|
|
|
|
2022-08-31 13:50:05 +00:00
|
|
|
// updateMetadata updates the current iteration's metadata object and the
|
|
|
|
// metadataChanged value if we have metadata in the scrape cache AND the
|
|
|
|
// labelset is for a new series or the metadata for this series has just
|
|
|
|
// changed. It returns a boolean based on whether the metadata was updated.
|
|
|
|
updateMetadata := func(lset labels.Labels, isNewSeries bool) bool {
|
|
|
|
if !sl.appendMetadataToWAL {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
sl.cache.metaMtx.Lock()
|
|
|
|
defer sl.cache.metaMtx.Unlock()
|
2022-12-20 16:54:07 +00:00
|
|
|
metaEntry, metaOk := sl.cache.metadata[lset.Get(labels.MetricName)]
|
2022-08-31 13:50:05 +00:00
|
|
|
if metaOk && (isNewSeries || metaEntry.lastIterChange == sl.cache.iter) {
|
|
|
|
metadataChanged = true
|
|
|
|
meta.Type = metaEntry.Type
|
|
|
|
meta.Unit = metaEntry.Unit
|
|
|
|
meta.Help = metaEntry.Help
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-12-10 12:03:28 +00:00
|
|
|
// Take an appender with limits.
|
2024-01-17 15:58:54 +00:00
|
|
|
app = appender(app, sl.sampleLimit, sl.bucketLimit, sl.maxSchema)
|
2021-12-10 12:03:28 +00:00
|
|
|
|
2020-03-13 19:54:47 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Only perform cache cleaning if the scrape was not empty.
|
|
|
|
// An empty scrape (usually) is used to indicate a failed scrape.
|
|
|
|
sl.cache.iterDone(len(b) > 0)
|
|
|
|
}()
|
|
|
|
|
2017-02-01 14:59:37 +00:00
|
|
|
loop:
|
2018-05-14 20:19:53 +00:00
|
|
|
for {
|
2020-03-26 02:31:48 +00:00
|
|
|
var (
|
2021-06-29 21:45:23 +00:00
|
|
|
et textparse.Entry
|
|
|
|
sampleAdded, isHistogram bool
|
|
|
|
met []byte
|
|
|
|
parsedTimestamp *int64
|
|
|
|
val float64
|
2021-11-12 18:07:41 +00:00
|
|
|
h *histogram.Histogram
|
2022-12-28 08:55:07 +00:00
|
|
|
fh *histogram.FloatHistogram
|
2020-03-26 02:31:48 +00:00
|
|
|
)
|
2018-05-14 20:19:53 +00:00
|
|
|
if et, err = p.Next(); err != nil {
|
2022-12-29 15:23:07 +00:00
|
|
|
if errors.Is(err, io.EOF) {
|
2018-05-14 20:19:53 +00:00
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
2018-05-18 07:32:11 +00:00
|
|
|
switch et {
|
|
|
|
case textparse.EntryType:
|
|
|
|
sl.cache.setType(p.Type())
|
|
|
|
continue
|
|
|
|
case textparse.EntryHelp:
|
|
|
|
sl.cache.setHelp(p.Help())
|
2018-05-14 20:19:53 +00:00
|
|
|
continue
|
2018-10-05 16:11:16 +00:00
|
|
|
case textparse.EntryUnit:
|
|
|
|
sl.cache.setUnit(p.Unit())
|
|
|
|
continue
|
2018-05-18 07:32:11 +00:00
|
|
|
case textparse.EntryComment:
|
|
|
|
continue
|
2021-06-29 21:45:23 +00:00
|
|
|
case textparse.EntryHistogram:
|
|
|
|
isHistogram = true
|
2018-05-18 07:32:11 +00:00
|
|
|
default:
|
2018-05-14 20:19:53 +00:00
|
|
|
}
|
2017-01-30 16:30:28 +00:00
|
|
|
total++
|
|
|
|
|
2017-01-15 16:33:07 +00:00
|
|
|
t := defTime
|
2021-06-29 21:45:23 +00:00
|
|
|
if isHistogram {
|
2022-12-28 08:55:07 +00:00
|
|
|
met, parsedTimestamp, h, fh = p.Histogram()
|
2021-06-29 21:45:23 +00:00
|
|
|
} else {
|
|
|
|
met, parsedTimestamp, val = p.Series()
|
|
|
|
}
|
2019-03-15 10:04:15 +00:00
|
|
|
if !sl.honorTimestamps {
|
2021-06-29 21:45:23 +00:00
|
|
|
parsedTimestamp = nil
|
2019-03-15 10:04:15 +00:00
|
|
|
}
|
2021-06-29 21:45:23 +00:00
|
|
|
if parsedTimestamp != nil {
|
|
|
|
t = *parsedTimestamp
|
2017-01-15 16:33:07 +00:00
|
|
|
}
|
|
|
|
|
2022-08-31 13:50:05 +00:00
|
|
|
// Zero metadata out for current iteration until it's resolved.
|
|
|
|
meta = metadata.Metadata{}
|
|
|
|
metadataChanged = false
|
|
|
|
|
2022-12-20 16:54:07 +00:00
|
|
|
if sl.cache.getDropped(met) {
|
2017-09-08 12:34:45 +00:00
|
|
|
continue
|
|
|
|
}
|
2022-12-20 16:54:07 +00:00
|
|
|
ce, ok := sl.cache.get(met)
|
2021-02-18 12:07:00 +00:00
|
|
|
var (
|
2021-11-06 10:10:04 +00:00
|
|
|
ref storage.SeriesRef
|
2021-02-18 12:07:00 +00:00
|
|
|
hash uint64
|
|
|
|
)
|
2020-03-26 02:31:48 +00:00
|
|
|
|
2017-01-15 16:33:07 +00:00
|
|
|
if ok {
|
2021-02-18 12:07:00 +00:00
|
|
|
ref = ce.ref
|
|
|
|
lset = ce.lset
|
2022-08-31 13:50:05 +00:00
|
|
|
|
|
|
|
// Update metadata only if it changed in the current iteration.
|
|
|
|
updateMetadata(lset, false)
|
2021-02-18 12:07:00 +00:00
|
|
|
} else {
|
2023-01-04 12:05:42 +00:00
|
|
|
p.Metric(&lset)
|
2021-02-18 12:07:00 +00:00
|
|
|
hash = lset.Hash()
|
2017-09-15 09:08:51 +00:00
|
|
|
|
|
|
|
// Hash label set as it is seen local to the target. Then add target labels
|
|
|
|
// and relabeling and store the final label set.
|
|
|
|
lset = sl.sampleMutator(lset)
|
|
|
|
|
2022-03-09 22:26:24 +00:00
|
|
|
// The label set may be set to empty to indicate dropping.
|
|
|
|
if lset.IsEmpty() {
|
2023-01-04 12:05:42 +00:00
|
|
|
sl.cache.addDropped(met)
|
2017-09-15 09:08:51 +00:00
|
|
|
continue
|
2017-06-26 06:56:40 +00:00
|
|
|
}
|
2017-01-30 16:30:28 +00:00
|
|
|
|
2020-03-02 07:18:05 +00:00
|
|
|
if !lset.Has(labels.MetricName) {
|
|
|
|
err = errNameLabelMandatory
|
|
|
|
break loop
|
|
|
|
}
|
2022-12-08 03:09:43 +00:00
|
|
|
if !lset.IsValid() {
|
|
|
|
err = fmt.Errorf("invalid metric name or label names: %s", lset.String())
|
|
|
|
break loop
|
|
|
|
}
|
2021-05-06 08:56:21 +00:00
|
|
|
|
|
|
|
// If any label limits is exceeded the scrape should fail.
|
|
|
|
if err = verifyLabelLimits(lset, sl.labelLimits); err != nil {
|
2023-09-22 16:47:44 +00:00
|
|
|
sl.metrics.targetScrapePoolExceededLabelLimits.Inc()
|
2021-05-06 08:56:21 +00:00
|
|
|
break loop
|
|
|
|
}
|
2022-08-31 13:50:05 +00:00
|
|
|
|
|
|
|
// Append metadata for new series if they were present.
|
|
|
|
updateMetadata(lset, true)
|
2021-02-18 12:07:00 +00:00
|
|
|
}
|
2020-03-02 07:18:05 +00:00
|
|
|
|
2023-12-11 08:43:42 +00:00
|
|
|
if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil {
|
|
|
|
ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs)
|
|
|
|
if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now.
|
|
|
|
// CT is an experimental feature. For now, we don't need to fail the
|
|
|
|
// scrape on errors updating the created timestamp, log debug.
|
|
|
|
level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-29 21:45:23 +00:00
|
|
|
if isHistogram {
|
2022-08-25 15:07:41 +00:00
|
|
|
if h != nil {
|
2022-12-28 08:55:07 +00:00
|
|
|
ref, err = app.AppendHistogram(ref, lset, t, h, nil)
|
|
|
|
} else {
|
|
|
|
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
|
2022-08-25 15:07:41 +00:00
|
|
|
}
|
2021-06-29 21:45:23 +00:00
|
|
|
} else {
|
|
|
|
ref, err = app.Append(ref, lset, t, val)
|
|
|
|
}
|
2023-04-21 19:14:19 +00:00
|
|
|
sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
|
2021-02-18 12:07:00 +00:00
|
|
|
if err != nil {
|
2023-11-01 19:06:46 +00:00
|
|
|
if !errors.Is(err, storage.ErrNotFound) {
|
2021-02-18 12:07:00 +00:00
|
|
|
level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err)
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
2021-02-18 12:07:00 +00:00
|
|
|
break loop
|
|
|
|
}
|
2020-03-26 02:31:48 +00:00
|
|
|
|
2021-02-18 12:07:00 +00:00
|
|
|
if !ok {
|
2023-10-31 20:58:42 +00:00
|
|
|
if parsedTimestamp == nil || sl.trackTimestampsStaleness {
|
2017-04-28 15:36:36 +00:00
|
|
|
// Bypass staleness logic if there is an explicit timestamp.
|
2017-06-26 06:56:40 +00:00
|
|
|
sl.cache.trackStaleness(hash, lset)
|
2017-04-28 15:36:36 +00:00
|
|
|
}
|
2022-12-20 16:54:07 +00:00
|
|
|
sl.cache.addRef(met, ref, lset, hash)
|
2023-04-21 19:14:19 +00:00
|
|
|
if sampleAdded && sampleLimitErr == nil && bucketLimitErr == nil {
|
2020-03-26 02:31:48 +00:00
|
|
|
seriesAdded++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-04 15:00:37 +00:00
|
|
|
// Increment added even if there's an error so we correctly report the
|
2020-07-11 13:37:13 +00:00
|
|
|
// number of samples remaining after relabeling.
|
2020-06-04 15:00:37 +00:00
|
|
|
added++
|
2023-11-16 14:07:37 +00:00
|
|
|
exemplars = exemplars[:0] // Reset and reuse the exemplar slice.
|
2023-07-13 12:16:10 +00:00
|
|
|
for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
|
2021-03-16 09:47:45 +00:00
|
|
|
if !e.HasTs {
|
2023-11-16 14:07:37 +00:00
|
|
|
if isHistogram {
|
|
|
|
// We drop exemplars for native histograms if they don't have a timestamp.
|
|
|
|
// Missing timestamps are deliberately not supported as we want to start
|
|
|
|
// enforcing timestamps for exemplars as otherwise proper deduplication
|
|
|
|
// is inefficient and purely based on heuristics: we cannot distinguish
|
|
|
|
// between repeated exemplars and new instances with the same values.
|
|
|
|
// This is done silently without logs as it is not an error but out of spec.
|
|
|
|
// This does not affect classic histograms so that behaviour is unchanged.
|
|
|
|
e = exemplar.Exemplar{} // Reset for next time round loop.
|
|
|
|
continue
|
|
|
|
}
|
2021-03-16 09:47:45 +00:00
|
|
|
e.Ts = t
|
|
|
|
}
|
2023-11-16 14:07:37 +00:00
|
|
|
exemplars = append(exemplars, e)
|
|
|
|
e = exemplar.Exemplar{} // Reset for next time round loop.
|
|
|
|
}
|
2023-11-24 14:38:35 +00:00
|
|
|
// Sort so that checking for duplicates / out of order is more efficient during validation.
|
|
|
|
slices.SortFunc(exemplars, exemplar.Compare)
|
2023-11-16 14:07:37 +00:00
|
|
|
outOfOrderExemplars := 0
|
|
|
|
for _, e := range exemplars {
|
2021-03-16 09:47:45 +00:00
|
|
|
_, exemplarErr := app.AppendExemplar(ref, lset, e)
|
2023-11-16 14:07:37 +00:00
|
|
|
switch {
|
|
|
|
case exemplarErr == nil:
|
|
|
|
// Do nothing.
|
|
|
|
case errors.Is(exemplarErr, storage.ErrOutOfOrderExemplar):
|
|
|
|
outOfOrderExemplars++
|
|
|
|
default:
|
2021-03-16 09:47:45 +00:00
|
|
|
// Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors.
|
|
|
|
level.Debug(sl.l).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
|
|
|
}
|
2023-11-16 14:07:37 +00:00
|
|
|
}
|
|
|
|
if outOfOrderExemplars > 0 && outOfOrderExemplars == len(exemplars) {
|
|
|
|
// Only report out of order exemplars if all are out of order, otherwise this was a partial update
|
|
|
|
// to some existing set of exemplars.
|
|
|
|
appErrs.numExemplarOutOfOrder += outOfOrderExemplars
|
|
|
|
level.Debug(sl.l).Log("msg", "Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1]))
|
|
|
|
sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars))
|
2021-03-16 09:47:45 +00:00
|
|
|
}
|
|
|
|
|
2022-08-31 13:50:05 +00:00
|
|
|
if sl.appendMetadataToWAL && metadataChanged {
|
|
|
|
if _, merr := app.UpdateMetadata(ref, lset, meta); merr != nil {
|
|
|
|
// No need to fail the scrape on errors appending metadata.
|
|
|
|
level.Debug(sl.l).Log("msg", "Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr)
|
|
|
|
}
|
|
|
|
}
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
2018-01-09 15:43:28 +00:00
|
|
|
if sampleLimitErr != nil {
|
2018-05-14 20:19:53 +00:00
|
|
|
if err == nil {
|
|
|
|
err = sampleLimitErr
|
|
|
|
}
|
2018-01-09 15:43:28 +00:00
|
|
|
// We only want to increment this once per scrape, so this is Inc'd outside the loop.
|
2023-09-22 16:47:44 +00:00
|
|
|
sl.metrics.targetScrapeSampleLimit.Inc()
|
2017-05-29 13:08:55 +00:00
|
|
|
}
|
2023-04-21 19:14:19 +00:00
|
|
|
if bucketLimitErr != nil {
|
|
|
|
if err == nil {
|
2023-05-04 18:29:50 +00:00
|
|
|
err = bucketLimitErr // If sample limit is hit, that error takes precedence.
|
2023-04-21 19:14:19 +00:00
|
|
|
}
|
|
|
|
// We only want to increment this once per scrape, so this is Inc'd outside the loop.
|
2023-09-22 16:47:44 +00:00
|
|
|
sl.metrics.targetScrapeNativeHistogramBucketLimit.Inc()
|
2023-04-21 19:14:19 +00:00
|
|
|
}
|
2020-03-26 02:31:48 +00:00
|
|
|
if appErrs.numOutOfOrder > 0 {
|
|
|
|
level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder)
|
2017-05-16 12:30:40 +00:00
|
|
|
}
|
2020-03-26 02:31:48 +00:00
|
|
|
if appErrs.numDuplicates > 0 {
|
|
|
|
level.Warn(sl.l).Log("msg", "Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates)
|
2017-05-16 12:30:40 +00:00
|
|
|
}
|
2020-03-26 02:31:48 +00:00
|
|
|
if appErrs.numOutOfBounds > 0 {
|
|
|
|
level.Warn(sl.l).Log("msg", "Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds)
|
2017-07-04 09:24:13 +00:00
|
|
|
}
|
2021-03-16 09:47:45 +00:00
|
|
|
if appErrs.numExemplarOutOfOrder > 0 {
|
|
|
|
level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder)
|
|
|
|
}
|
2017-04-13 17:07:23 +00:00
|
|
|
if err == nil {
|
2017-05-26 08:44:48 +00:00
|
|
|
sl.cache.forEachStale(func(lset labels.Labels) bool {
|
|
|
|
// Series no longer exposed, mark it stale.
|
2021-02-18 12:07:00 +00:00
|
|
|
_, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN))
|
2023-11-01 19:06:46 +00:00
|
|
|
switch {
|
|
|
|
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
2017-05-26 08:44:48 +00:00
|
|
|
// Do not count these in logging, as this is expected if a target
|
|
|
|
// goes away and comes back again with a new scrape loop.
|
|
|
|
err = nil
|
2017-04-13 17:07:23 +00:00
|
|
|
}
|
2017-05-26 08:44:48 +00:00
|
|
|
return err == nil
|
|
|
|
})
|
2017-04-13 17:07:23 +00:00
|
|
|
}
|
2020-03-13 19:54:47 +00:00
|
|
|
return
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
|
|
|
|
2020-03-26 02:31:48 +00:00
|
|
|
// Adds samples to the appender, checking the error, and then returns the # of samples added,
|
2023-04-21 19:14:19 +00:00
|
|
|
// whether the caller should continue to process more samples, and any sample or bucket limit errors.
|
|
|
|
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
|
2023-11-01 19:06:46 +00:00
|
|
|
switch {
|
|
|
|
case err == nil:
|
2023-10-31 20:58:42 +00:00
|
|
|
if (tp == nil || sl.trackTimestampsStaleness) && ce != nil {
|
2020-03-26 02:31:48 +00:00
|
|
|
sl.cache.trackStaleness(ce.hash, ce.lset)
|
|
|
|
}
|
|
|
|
return true, nil
|
2023-11-01 19:06:46 +00:00
|
|
|
case errors.Is(err, storage.ErrNotFound):
|
2020-03-26 02:31:48 +00:00
|
|
|
return false, storage.ErrNotFound
|
2023-11-01 19:06:46 +00:00
|
|
|
case errors.Is(err, storage.ErrOutOfOrderSample):
|
2020-03-26 02:31:48 +00:00
|
|
|
appErrs.numOutOfOrder++
|
|
|
|
level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met))
|
2023-09-22 16:47:44 +00:00
|
|
|
sl.metrics.targetScrapeSampleOutOfOrder.Inc()
|
2020-03-26 02:31:48 +00:00
|
|
|
return false, nil
|
2023-11-01 19:06:46 +00:00
|
|
|
case errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
2020-03-26 02:31:48 +00:00
|
|
|
appErrs.numDuplicates++
|
|
|
|
level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met))
|
2023-09-22 16:47:44 +00:00
|
|
|
sl.metrics.targetScrapeSampleDuplicate.Inc()
|
2020-03-26 02:31:48 +00:00
|
|
|
return false, nil
|
2023-11-01 19:06:46 +00:00
|
|
|
case errors.Is(err, storage.ErrOutOfBounds):
|
2020-03-26 02:31:48 +00:00
|
|
|
appErrs.numOutOfBounds++
|
|
|
|
level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met))
|
2023-09-22 16:47:44 +00:00
|
|
|
sl.metrics.targetScrapeSampleOutOfBounds.Inc()
|
2020-03-26 02:31:48 +00:00
|
|
|
return false, nil
|
2023-11-01 19:06:46 +00:00
|
|
|
case errors.Is(err, errSampleLimit):
|
2020-03-26 02:31:48 +00:00
|
|
|
// Keep on parsing output if we hit the limit, so we report the correct
|
|
|
|
// total number of samples scraped.
|
|
|
|
*sampleLimitErr = err
|
|
|
|
return false, nil
|
2023-11-01 19:06:46 +00:00
|
|
|
case errors.Is(err, errBucketLimit):
|
2023-04-21 19:14:19 +00:00
|
|
|
// Keep on parsing output if we hit the limit, so we report the correct
|
|
|
|
// total number of samples scraped.
|
|
|
|
*bucketLimitErr = err
|
|
|
|
return false, nil
|
2020-03-26 02:31:48 +00:00
|
|
|
default:
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-15 09:08:51 +00:00
|
|
|
// The constants are suffixed with the invalid \xff unicode rune to avoid collisions
|
|
|
|
// with scraped metrics in the cache.
|
2022-12-20 16:54:07 +00:00
|
|
|
var (
|
|
|
|
scrapeHealthMetricName = []byte("up" + "\xff")
|
|
|
|
scrapeDurationMetricName = []byte("scrape_duration_seconds" + "\xff")
|
|
|
|
scrapeSamplesMetricName = []byte("scrape_samples_scraped" + "\xff")
|
|
|
|
samplesPostRelabelMetricName = []byte("scrape_samples_post_metric_relabeling" + "\xff")
|
|
|
|
scrapeSeriesAddedMetricName = []byte("scrape_series_added" + "\xff")
|
|
|
|
scrapeTimeoutMetricName = []byte("scrape_timeout_seconds" + "\xff")
|
|
|
|
scrapeSampleLimitMetricName = []byte("scrape_sample_limit" + "\xff")
|
|
|
|
scrapeBodySizeBytesMetricName = []byte("scrape_body_size_bytes" + "\xff")
|
2017-09-15 09:08:51 +00:00
|
|
|
)
|
|
|
|
|
2021-10-24 21:45:31 +00:00
|
|
|
func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) {
|
2020-03-13 19:54:47 +00:00
|
|
|
sl.scraper.Report(start, duration, scrapeErr)
|
2016-02-22 15:46:55 +00:00
|
|
|
|
2017-01-15 16:33:07 +00:00
|
|
|
ts := timestamp.FromTime(start)
|
2016-02-22 15:46:55 +00:00
|
|
|
|
2016-12-29 08:27:30 +00:00
|
|
|
var health float64
|
2020-03-13 19:54:47 +00:00
|
|
|
if scrapeErr == nil {
|
2016-02-22 15:46:55 +00:00
|
|
|
health = 1
|
|
|
|
}
|
2017-01-13 13:48:01 +00:00
|
|
|
|
2020-03-13 19:54:47 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeHealthMetricName, ts, health); err != nil {
|
|
|
|
return
|
2017-01-13 13:48:01 +00:00
|
|
|
}
|
2020-03-13 19:54:47 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeDurationMetricName, ts, duration.Seconds()); err != nil {
|
|
|
|
return
|
2016-05-19 14:22:49 +00:00
|
|
|
}
|
2020-03-13 19:54:47 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, float64(scraped)); err != nil {
|
|
|
|
return
|
2016-10-26 16:43:01 +00:00
|
|
|
}
|
2020-06-04 15:00:37 +00:00
|
|
|
if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, float64(added)); err != nil {
|
2020-03-13 19:54:47 +00:00
|
|
|
return
|
2017-01-15 16:33:07 +00:00
|
|
|
}
|
2020-03-13 19:54:47 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, float64(seriesAdded)); err != nil {
|
|
|
|
return
|
2019-05-08 21:24:00 +00:00
|
|
|
}
|
2021-10-24 21:45:31 +00:00
|
|
|
if sl.reportExtraMetrics {
|
2021-10-24 14:38:21 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, sl.timeout.Seconds()); err != nil {
|
2021-08-24 12:31:14 +00:00
|
|
|
return
|
|
|
|
}
|
2021-09-03 13:37:42 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, float64(sl.sampleLimit)); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2021-10-24 21:45:31 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, float64(bytes)); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2021-08-24 12:31:14 +00:00
|
|
|
}
|
2020-03-13 19:54:47 +00:00
|
|
|
return
|
2017-01-15 16:33:07 +00:00
|
|
|
}
|
|
|
|
|
2020-07-16 11:53:39 +00:00
|
|
|
func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err error) {
|
2017-05-11 13:43:43 +00:00
|
|
|
ts := timestamp.FromTime(start)
|
2017-09-08 12:34:45 +00:00
|
|
|
|
2017-05-11 13:43:43 +00:00
|
|
|
stale := math.Float64frombits(value.StaleNaN)
|
|
|
|
|
2020-03-13 19:54:47 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeHealthMetricName, ts, stale); err != nil {
|
|
|
|
return
|
2017-05-11 13:43:43 +00:00
|
|
|
}
|
2020-03-13 19:54:47 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeDurationMetricName, ts, stale); err != nil {
|
|
|
|
return
|
2017-05-11 13:43:43 +00:00
|
|
|
}
|
2020-03-13 19:54:47 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, stale); err != nil {
|
|
|
|
return
|
2017-05-11 13:43:43 +00:00
|
|
|
}
|
2020-03-13 19:54:47 +00:00
|
|
|
if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, stale); err != nil {
|
|
|
|
return
|
2017-05-11 13:43:43 +00:00
|
|
|
}
|
2020-03-13 19:54:47 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, stale); err != nil {
|
|
|
|
return
|
2019-05-08 21:24:00 +00:00
|
|
|
}
|
2021-10-24 21:45:31 +00:00
|
|
|
if sl.reportExtraMetrics {
|
2021-08-24 12:31:14 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, stale); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2021-09-03 13:37:42 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, stale); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2021-10-24 21:45:31 +00:00
|
|
|
if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, stale); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2021-08-24 12:31:14 +00:00
|
|
|
}
|
2020-03-13 19:54:47 +00:00
|
|
|
return
|
2017-05-11 13:43:43 +00:00
|
|
|
}
|
|
|
|
|
2022-12-20 16:54:07 +00:00
|
|
|
func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v float64) error {
|
2017-09-15 09:08:51 +00:00
|
|
|
ce, ok := sl.cache.get(s)
|
2021-11-06 10:10:04 +00:00
|
|
|
var ref storage.SeriesRef
|
2021-02-18 12:07:00 +00:00
|
|
|
var lset labels.Labels
|
2017-01-15 16:33:07 +00:00
|
|
|
if ok {
|
2021-02-18 12:07:00 +00:00
|
|
|
ref = ce.ref
|
|
|
|
lset = ce.lset
|
|
|
|
} else {
|
2022-03-09 22:26:24 +00:00
|
|
|
// The constants are suffixed with the invalid \xff unicode rune to avoid collisions
|
|
|
|
// with scraped metrics in the cache.
|
|
|
|
// We have to drop it when building the actual metric.
|
2022-12-20 16:54:07 +00:00
|
|
|
lset = labels.FromStrings(labels.MetricName, string(s[:len(s)-1]))
|
2021-02-18 12:07:00 +00:00
|
|
|
lset = sl.reportSampleMutator(lset)
|
2017-01-13 13:48:01 +00:00
|
|
|
}
|
2017-09-08 12:34:45 +00:00
|
|
|
|
2021-02-18 12:07:00 +00:00
|
|
|
ref, err := app.Append(ref, lset, t, v)
|
2023-11-01 19:06:46 +00:00
|
|
|
switch {
|
|
|
|
case err == nil:
|
2021-02-18 12:07:00 +00:00
|
|
|
if !ok {
|
|
|
|
sl.cache.addRef(s, ref, lset, lset.Hash())
|
|
|
|
}
|
2017-05-11 13:43:43 +00:00
|
|
|
return nil
|
2023-11-01 19:06:46 +00:00
|
|
|
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
2021-02-18 12:07:00 +00:00
|
|
|
// Do not log here, as this is expected if a target goes away and comes back
|
|
|
|
// again with a new scrape loop.
|
2017-05-11 13:43:43 +00:00
|
|
|
return nil
|
|
|
|
default:
|
2017-01-15 16:33:07 +00:00
|
|
|
return err
|
2017-01-13 13:48:01 +00:00
|
|
|
}
|
2016-02-22 15:46:55 +00:00
|
|
|
}
|
2020-01-22 12:13:47 +00:00
|
|
|
|
|
|
|
// zeroConfig returns a new scrape config that only contains configuration items
|
|
|
|
// that alter metrics.
|
|
|
|
func zeroConfig(c *config.ScrapeConfig) *config.ScrapeConfig {
|
|
|
|
z := *c
|
|
|
|
// We zero out the fields that for sure don't affect scrape.
|
|
|
|
z.ScrapeInterval = 0
|
|
|
|
z.ScrapeTimeout = 0
|
|
|
|
z.SampleLimit = 0
|
|
|
|
z.HTTPClientConfig = config_util.HTTPClientConfig{}
|
|
|
|
return &z
|
|
|
|
}
|
|
|
|
|
2020-03-02 08:21:24 +00:00
|
|
|
// reusableCache compares two scrape config and tells whether the cache is still
|
2020-01-22 12:13:47 +00:00
|
|
|
// valid.
|
|
|
|
func reusableCache(r, l *config.ScrapeConfig) bool {
|
|
|
|
if r == nil || l == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return reflect.DeepEqual(zeroConfig(r), zeroConfig(l))
|
|
|
|
}
|
2022-03-16 08:45:15 +00:00
|
|
|
|
|
|
|
// CtxKey is a dedicated type for keys of context-embedded values propagated
|
|
|
|
// with the scrape context.
|
|
|
|
type ctxKey int
|
|
|
|
|
|
|
|
// Valid CtxKey values.
|
|
|
|
const (
|
|
|
|
ctxKeyMetadata ctxKey = iota + 1
|
2022-03-24 15:53:04 +00:00
|
|
|
ctxKeyTarget
|
2022-03-16 08:45:15 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func ContextWithMetricMetadataStore(ctx context.Context, s MetricMetadataStore) context.Context {
|
|
|
|
return context.WithValue(ctx, ctxKeyMetadata, s)
|
|
|
|
}
|
|
|
|
|
|
|
|
func MetricMetadataStoreFromContext(ctx context.Context) (MetricMetadataStore, bool) {
|
|
|
|
s, ok := ctx.Value(ctxKeyMetadata).(MetricMetadataStore)
|
|
|
|
return s, ok
|
|
|
|
}
|
2022-03-24 15:53:04 +00:00
|
|
|
|
|
|
|
func ContextWithTarget(ctx context.Context, t *Target) context.Context {
|
|
|
|
return context.WithValue(ctx, ctxKeyTarget, t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TargetFromContext(ctx context.Context) (*Target, bool) {
|
|
|
|
t, ok := ctx.Value(ctxKeyTarget).(*Target)
|
|
|
|
return t, ok
|
|
|
|
}
|
2024-01-17 15:58:54 +00:00
|
|
|
|
|
|
|
func pickSchema(bucketFactor float64) int32 {
|
|
|
|
if bucketFactor <= 1 {
|
|
|
|
bucketFactor = 1.00271
|
|
|
|
}
|
|
|
|
floor := math.Floor(-math.Log2(math.Log2(bucketFactor)))
|
|
|
|
switch {
|
|
|
|
case floor >= float64(nativeHistogramMaxSchema):
|
|
|
|
return nativeHistogramMaxSchema
|
|
|
|
case floor <= float64(nativeHistogramMinSchema):
|
|
|
|
return nativeHistogramMinSchema
|
|
|
|
default:
|
|
|
|
return int32(floor)
|
|
|
|
}
|
|
|
|
}
|