2017-04-10 18:59:45 +00:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2016-11-15 09:34:25 +00:00
// Package tsdb implements a time series storage for float64 sample data.
package tsdb
import (
2018-12-05 16:34:42 +00:00
"context"
2023-11-16 18:54:41 +00:00
"errors"
2016-12-04 12:16:11 +00:00
"fmt"
2017-02-27 09:46:15 +00:00
"io"
2022-04-27 09:24:36 +00:00
"io/fs"
2024-09-10 01:41:53 +00:00
"log/slog"
2018-04-05 12:51:33 +00:00
"math"
2024-04-08 12:59:30 +00:00
"math/rand"
2016-12-04 12:16:11 +00:00
"os"
2016-12-08 16:43:10 +00:00
"path/filepath"
2024-01-15 16:24:46 +00:00
"slices"
2018-04-05 12:51:33 +00:00
"strings"
2016-12-08 16:43:10 +00:00
"sync"
2017-01-06 14:18:06 +00:00
"time"
2016-12-15 07:31:26 +00:00
2017-05-18 14:09:30 +00:00
"github.com/oklog/ulid"
2016-12-31 08:48:49 +00:00
"github.com/prometheus/client_golang/prometheus"
2024-09-10 01:41:53 +00:00
"github.com/prometheus/common/promslog"
2022-09-20 17:05:50 +00:00
"go.uber.org/atomic"
2020-10-22 09:00:08 +00:00
"golang.org/x/sync/errgroup"
2021-07-20 04:52:57 +00:00
"github.com/prometheus/prometheus/config"
2021-11-08 14:23:17 +00:00
"github.com/prometheus/prometheus/model/labels"
2020-02-06 15:58:38 +00:00
"github.com/prometheus/prometheus/storage"
2019-08-13 08:34:14 +00:00
"github.com/prometheus/prometheus/tsdb/chunkenc"
2020-11-19 13:00:47 +00:00
"github.com/prometheus/prometheus/tsdb/chunks"
2019-08-13 08:34:14 +00:00
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
2024-04-01 16:06:05 +00:00
_ "github.com/prometheus/prometheus/tsdb/goversion" // Load the package into main to make sure minimum Go version is met.
2021-11-11 16:45:25 +00:00
"github.com/prometheus/prometheus/tsdb/tsdbutil"
2022-10-10 15:08:46 +00:00
"github.com/prometheus/prometheus/tsdb/wlog"
2016-11-15 09:34:25 +00:00
)
2019-11-21 12:10:25 +00:00
const (
lint: Revamp our linting rules, mostly around doc comments
Several things done here:
- Set `max-issues-per-linter` to 0 so that we actually see all linter
warnings and not just 50 per linter. (As we also set
`max-same-issues` to 0, I assume this was the intention from the
beginning.)
- Stop using the golangci-lint default excludes (by setting
`exclude-use-default: false`. Those are too generous and don't match
our style conventions. (I have re-added some of the excludes
explicitly in this commit. See below.)
- Re-add the `errcheck` exclusion we have used so far via the
defaults.
- Exclude the signature requirement `govet` has for `Seek` methods
because we use non-standard `Seek` methods a lot. (But we keep other
requirements, while the default excludes completely disabled the
check for common method segnatures.)
- Exclude warnings about missing doc comments on exported symbols. (We
used to be pretty adamant about doc comments, but stopped that at
some point in the past. By now, we have about 500 missing doc
comments. We may consider reintroducing this check, but that's
outside of the scope of this commit. The default excludes of
golangci-lint essentially ignore doc comments completely.)
- By stop using the default excludes, we now get warnings back on
malformed doc comments. That's the most impactful change in this
commit. It does not enforce doc comments (again), but _if_ there is
a doc comment, it has to have the recommended form. (Most of the
changes in this commit are fixing this form.)
- Improve wording/spelling of some comments in .golangci.yml, and
remove an outdated comment.
- Leave `package-comments` inactive, but add a TODO asking if we
should change that.
- Add a new sub-linter `comment-spacings` (and fix corresponding
comments), which avoids missing spaces after the leading `//`.
Signed-off-by: beorn7 <beorn@grafana.com>
2024-08-22 11:59:36 +00:00
// DefaultBlockDuration in milliseconds.
2020-02-11 16:34:09 +00:00
DefaultBlockDuration = int64 ( 2 * time . Hour / time . Millisecond )
2020-08-11 05:56:08 +00:00
2024-11-04 07:26:26 +00:00
// DefaultCompactionDelayMaxPercent in percentage.
DefaultCompactionDelayMaxPercent = 10
2020-08-11 05:56:08 +00:00
// Block dir suffixes to make deletion and creation operations atomic.
// We decided to do suffixes instead of creating meta.json as last (or delete as first) one,
// because in error case you still can recover meta.json from the block content within local TSDB dir.
// TODO(bwplotka): TSDB can end up with various .tmp files (e.g meta.json.tmp, WAL or segment tmp file. Think
// about removing those too on start to save space. Currently only blocks tmp dirs are removed.
tmpForDeletionBlockDirSuffix = ".tmp-for-deletion"
tmpForCreationBlockDirSuffix = ".tmp-for-creation"
2021-01-09 09:02:26 +00:00
// Pre-2.21 tmp dir suffix, used in clean-up functions.
tmpLegacy = ".tmp"
2020-02-06 15:58:38 +00:00
)
2021-10-22 08:06:44 +00:00
// ErrNotReady is returned if the underlying storage is not ready yet.
var ErrNotReady = errors . New ( "TSDB not ready" )
2019-11-21 12:10:25 +00:00
2022-11-28 17:09:18 +00:00
// DefaultOptions used for the DB. They are reasonable for setups using
2018-03-02 11:12:32 +00:00
// millisecond precision timestamps.
2020-02-06 15:58:38 +00:00
func DefaultOptions ( ) * Options {
return & Options {
2024-01-15 15:42:40 +00:00
WALSegmentSize : wlog . DefaultSegmentSize ,
MaxBlockChunkSegmentSize : chunks . DefaultChunkSegmentSize ,
RetentionDuration : int64 ( 15 * 24 * time . Hour / time . Millisecond ) ,
MinBlockDuration : DefaultBlockDuration ,
MaxBlockDuration : DefaultBlockDuration ,
NoLockfile : false ,
SamplesPerChunk : DefaultSamplesPerChunk ,
WALCompression : wlog . CompressionNone ,
StripeSize : DefaultStripeSize ,
HeadChunksWriteBufferSize : chunks . DefaultWriteBufferSize ,
IsolationDisabled : defaultIsolationDisabled ,
HeadChunksWriteQueueSize : chunks . DefaultWriteQueueSize ,
OutOfOrderCapMax : DefaultOutOfOrderCapMax ,
EnableOverlappingCompaction : true ,
2024-01-29 11:57:27 +00:00
EnableSharding : false ,
2024-04-08 12:59:30 +00:00
EnableDelayedCompaction : false ,
2024-11-04 07:26:26 +00:00
CompactionDelayMaxPercent : DefaultCompactionDelayMaxPercent ,
2024-04-08 12:59:30 +00:00
CompactionDelay : time . Duration ( 0 ) ,
2024-11-11 06:59:24 +00:00
PostingsDecoderFactory : DefaultPostingsDecoderFactory ,
2020-02-06 15:58:38 +00:00
}
2016-11-15 09:34:25 +00:00
}
// Options of the DB storage.
type Options struct {
2019-03-25 23:38:12 +00:00
// Segments (wal files) max size.
// WALSegmentSize = 0, segment size is default size.
// WALSegmentSize > 0, segment size is WALSegmentSize.
// WALSegmentSize < 0, wal is disabled.
2020-02-11 16:34:09 +00:00
WALSegmentSize int
2017-01-29 07:11:47 +00:00
2021-04-15 08:55:01 +00:00
// MaxBlockChunkSegmentSize is the max size of block chunk segment files.
// MaxBlockChunkSegmentSize = 0, chunk segment size is default size.
// MaxBlockChunkSegmentSize > 0, chunk segment size is MaxBlockChunkSegmentSize.
MaxBlockChunkSegmentSize int64
2017-02-10 01:54:26 +00:00
// Duration of persisted data to keep.
2020-02-11 16:34:09 +00:00
// Unit agnostic as long as unit is consistent with MinBlockDuration and MaxBlockDuration.
// Typically it is in milliseconds.
RetentionDuration int64
2017-02-10 01:54:26 +00:00
2019-01-16 10:03:52 +00:00
// Maximum number of bytes in blocks to be retained.
// 0 or less means disabled.
// NOTE: For proper storage calculations need to consider
// the size of the WAL folder which is not added when calculating
// the current size of the database.
2020-02-11 16:34:09 +00:00
MaxBytes int64
2017-01-29 07:11:47 +00:00
2017-05-09 10:52:47 +00:00
// NoLockfile disables creation and consideration of a lock file.
NoLockfile bool
2019-02-26 19:50:37 +00:00
2023-07-11 12:57:57 +00:00
// WALCompression configures the compression type to use on records in the WAL.
WALCompression wlog . CompressionType
2020-01-30 07:12:43 +00:00
2023-03-07 16:41:33 +00:00
// Maximum number of CPUs that can simultaneously processes WAL replay.
// If it is <=0, then GOMAXPROCS is used.
WALReplayConcurrency int
2020-02-17 11:45:11 +00:00
// StripeSize is the size in entries of the series hash map. Reducing the size will save memory but impact performance.
2020-01-30 07:12:43 +00:00
StripeSize int
2016-11-15 09:34:25 +00:00
2020-02-06 15:58:38 +00:00
// The timestamp range of head blocks after which they get persisted.
// It's the minimum duration of any persisted block.
2020-02-11 16:34:09 +00:00
// Unit agnostic as long as unit is consistent with RetentionDuration and MaxBlockDuration.
// Typically it is in milliseconds.
MinBlockDuration int64
2020-02-06 15:58:38 +00:00
// The maximum timestamp range of compacted blocks.
2020-02-11 16:34:09 +00:00
// Unit agnostic as long as unit is consistent with MinBlockDuration and RetentionDuration.
// Typically it is in milliseconds.
MaxBlockDuration int64
2020-05-20 13:22:08 +00:00
2020-11-19 13:00:47 +00:00
// HeadChunksWriteBufferSize configures the write buffer size used by the head chunks mapper.
HeadChunksWriteBufferSize int
2022-01-10 13:36:45 +00:00
// HeadChunksWriteQueueSize configures the size of the chunk write queue used in the head chunks mapper.
HeadChunksWriteQueueSize int
2023-04-12 16:48:35 +00:00
// SamplesPerChunk configures the target number of samples per chunk.
SamplesPerChunk int
2020-05-20 13:22:08 +00:00
// SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series.
// It is always a no-op in Prometheus and mainly meant for external users who import TSDB.
SeriesLifecycleCallback SeriesLifecycleCallback
2020-07-22 15:19:33 +00:00
// BlocksToDelete is a function which returns the blocks which can be deleted.
// It is always the default time and size based retention in Prometheus and
// mainly meant for external users who import TSDB.
BlocksToDelete BlocksToDeleteFunc
2021-03-16 09:47:45 +00:00
2021-11-19 10:11:32 +00:00
// Enables the in memory exemplar storage.
2021-07-20 04:52:57 +00:00
EnableExemplarStorage bool
2021-08-06 16:51:01 +00:00
// Enables the snapshot of in-memory chunks on shutdown. This makes restarts faster.
EnableMemorySnapshotOnShutdown bool
2021-03-16 09:47:45 +00:00
// MaxExemplars sets the size, in # of exemplars stored, of the single circular buffer used to store exemplars in memory.
// See tsdb/exemplar.go, specifically the CircularExemplarStorage struct and it's constructor NewCircularExemplarStorage.
2021-07-20 04:52:57 +00:00
MaxExemplars int64
2021-11-19 10:11:32 +00:00
// Disables isolation between reads and in-flight appends.
IsolationDisabled bool
2022-09-20 17:05:50 +00:00
2022-09-14 12:08:34 +00:00
// EnableNativeHistograms enables the ingestion of native histograms.
EnableNativeHistograms bool
2022-10-05 20:14:49 +00:00
2024-09-17 09:19:06 +00:00
// EnableOOONativeHistograms enables the ingestion of OOO native histograms.
// It will only take effect if EnableNativeHistograms is set to true and the
// OutOfOrderTimeWindow is > 0. This flag will be removed after testing of
// OOO Native Histogram ingestion is complete.
EnableOOONativeHistograms bool
2022-09-20 17:05:50 +00:00
// OutOfOrderTimeWindow specifies how much out of order is allowed, if any.
// This can change during run-time, so this value from here should only be used
// while initialising.
OutOfOrderTimeWindow int64
// OutOfOrderCapMax is maximum capacity for OOO chunks (in samples).
// If it is <=0, the default value is assumed.
OutOfOrderCapMax int64
2024-01-15 15:42:40 +00:00
// Compaction of overlapping blocks are allowed if EnableOverlappingCompaction is true.
// This is an optional flag for overlapping blocks.
// The reason why this flag exists is because there are various users of the TSDB
// that do not want vertical compaction happening on ingest time. Instead,
// they'd rather keep overlapping blocks and let another component do the overlapping compaction later.
EnableOverlappingCompaction bool
2024-01-29 11:57:27 +00:00
// EnableSharding enables query sharding support in TSDB.
EnableSharding bool
2024-06-04 23:11:36 +00:00
2024-04-08 12:59:30 +00:00
// EnableDelayedCompaction, when set to true, assigns a random value to CompactionDelay during DB opening.
// When set to false, delayed compaction is disabled, unless CompactionDelay is set directly.
EnableDelayedCompaction bool
// CompactionDelay delays the start time of auto compactions.
// It can be increased by up to one minute if the DB does not commit too often.
CompactionDelay time . Duration
2024-11-04 07:26:26 +00:00
// CompactionDelayMaxPercent is the upper limit for CompactionDelay, specified as a percentage of the head chunk range.
CompactionDelayMaxPercent int
2024-04-08 12:59:30 +00:00
2024-06-04 23:11:36 +00:00
// NewCompactorFunc is a function that returns a TSDB compactor.
NewCompactorFunc NewCompactorFunc
2024-06-25 07:47:06 +00:00
// BlockQuerierFunc is a function to return storage.Querier from a BlockReader.
BlockQuerierFunc BlockQuerierFunc
// BlockChunkQuerierFunc is a function to return storage.ChunkQuerier from a BlockReader.
BlockChunkQuerierFunc BlockChunkQuerierFunc
2024-11-11 06:59:24 +00:00
// PostingsDecoderFactory allows users to customize postings decoders based on BlockMeta.
// By default, DefaultPostingsDecoderFactory will be used to create raw posting decoder.
PostingsDecoderFactory PostingsDecoderFactory
2016-12-10 17:08:50 +00:00
}
2024-09-10 01:41:53 +00:00
type NewCompactorFunc func ( ctx context . Context , r prometheus . Registerer , l * slog . Logger , ranges [ ] int64 , pool chunkenc . Pool , opts * Options ) ( Compactor , error )
2024-06-04 23:11:36 +00:00
2020-07-22 15:19:33 +00:00
type BlocksToDeleteFunc func ( blocks [ ] * Block ) map [ ulid . ULID ] struct { }
2024-06-25 07:47:06 +00:00
type BlockQuerierFunc func ( b BlockReader , mint , maxt int64 ) ( storage . Querier , error )
type BlockChunkQuerierFunc func ( b BlockReader , mint , maxt int64 ) ( storage . ChunkQuerier , error )
2017-01-06 10:40:09 +00:00
// DB handles reads and writes of time series falling into
// a hashed partition of a seriedb.
type DB struct {
2021-11-11 16:45:25 +00:00
dir string
locker * tsdbutil . DirLocker
2017-03-04 15:50:48 +00:00
2024-09-10 01:41:53 +00:00
logger * slog . Logger
2020-07-22 15:19:33 +00:00
metrics * dbMetrics
opts * Options
chunkPool chunkenc . Pool
compactor Compactor
blocksToDelete BlocksToDeleteFunc
2016-12-09 09:00:14 +00:00
2024-05-08 15:57:09 +00:00
// mtx must be held when modifying the general block layout or lastGarbageCollectedMmapRef.
2017-03-20 07:41:56 +00:00
mtx sync . RWMutex
2017-10-09 13:21:46 +00:00
blocks [ ] * Block
2017-03-04 15:50:48 +00:00
2023-11-24 11:38:38 +00:00
// The last OOO chunk that was compacted and written to disk. New queriers must not read chunks less
// than or equal to this reference, as these chunks could be garbage collected at any time.
lastGarbageCollectedMmapRef chunks . ChunkDiskMapperRef
2017-08-28 22:39:17 +00:00
head * Head
2017-01-06 11:37:28 +00:00
compactc chan struct { }
donec chan struct { }
stopc chan struct { }
2017-05-20 07:51:10 +00:00
2018-11-20 10:34:26 +00:00
// cmtx ensures that compactions and deletions don't run simultaneously.
cmtx sync . Mutex
// autoCompactMtx ensures that no compaction gets triggered while
// changing the autoCompact var.
autoCompactMtx sync . Mutex
autoCompact bool
2018-12-05 16:34:42 +00:00
// Cancel a running compaction when a shutdown is initiated.
2019-02-06 12:07:35 +00:00
compactCancel context . CancelFunc
2022-09-20 17:05:50 +00:00
2024-04-08 12:59:30 +00:00
// timeWhenCompactionDelayStarted helps delay the compactions start time.
timeWhenCompactionDelayStarted time . Time
2022-09-20 17:05:50 +00:00
// oooWasEnabled is true if out of order support was enabled at least one time
// during the time TSDB was up. In which case we need to keep supporting
// out-of-order compaction and vertical queries.
oooWasEnabled atomic . Bool
2023-05-15 19:31:49 +00:00
writeNotified wlog . WriteNotified
2022-09-20 17:05:50 +00:00
registerer prometheus . Registerer
2024-06-25 07:47:06 +00:00
blockQuerierFunc BlockQuerierFunc
blockChunkQuerierFunc BlockChunkQuerierFunc
2016-12-09 09:00:14 +00:00
}
2017-01-06 10:40:09 +00:00
type dbMetrics struct {
2021-11-11 16:45:25 +00:00
loadedBlocks prometheus . GaugeFunc
symbolTableSize prometheus . GaugeFunc
reloads prometheus . Counter
reloadsFailed prometheus . Counter
compactionsFailed prometheus . Counter
compactionsTriggered prometheus . Counter
compactionsSkipped prometheus . Counter
sizeRetentionCount prometheus . Counter
timeRetentionCount prometheus . Counter
startTime prometheus . GaugeFunc
tombCleanTimer prometheus . Histogram
blocksBytes prometheus . Gauge
maxBytes prometheus . Gauge
2023-10-24 11:34:42 +00:00
retentionDuration prometheus . Gauge
2016-12-31 08:48:49 +00:00
}
2020-02-14 18:48:55 +00:00
func newDBMetrics ( db * DB , r prometheus . Registerer ) * dbMetrics {
2017-01-06 10:40:09 +00:00
m := & dbMetrics { }
2017-01-03 14:43:26 +00:00
2017-05-26 13:13:03 +00:00
m . loadedBlocks = prometheus . NewGaugeFunc ( prometheus . GaugeOpts {
2018-09-18 17:17:41 +00:00
Name : "prometheus_tsdb_blocks_loaded" ,
2017-05-26 13:13:03 +00:00
Help : "Number of currently loaded data blocks" ,
} , func ( ) float64 {
db . mtx . RLock ( )
defer db . mtx . RUnlock ( )
return float64 ( len ( db . blocks ) )
} )
2018-09-08 18:28:36 +00:00
m . symbolTableSize = prometheus . NewGaugeFunc ( prometheus . GaugeOpts {
2018-09-18 17:17:41 +00:00
Name : "prometheus_tsdb_symbol_table_size_bytes" ,
2020-10-21 13:35:40 +00:00
Help : "Size of symbol table in memory for loaded blocks" ,
2018-09-08 18:28:36 +00:00
} , func ( ) float64 {
db . mtx . RLock ( )
2023-04-09 07:08:40 +00:00
blocks := db . blocks
2018-09-08 18:28:36 +00:00
db . mtx . RUnlock ( )
2018-09-12 09:09:02 +00:00
symTblSize := uint64 ( 0 )
2018-09-08 18:28:36 +00:00
for _ , b := range blocks {
2018-09-12 09:09:02 +00:00
symTblSize += b . GetSymbolTableSize ( )
2018-09-08 18:28:36 +00:00
}
2018-09-12 09:09:02 +00:00
return float64 ( symTblSize )
2018-09-08 18:28:36 +00:00
} )
2017-05-26 13:13:03 +00:00
m . reloads = prometheus . NewCounter ( prometheus . CounterOpts {
2018-09-18 17:17:41 +00:00
Name : "prometheus_tsdb_reloads_total" ,
2017-05-26 13:13:03 +00:00
Help : "Number of times the database reloaded block data from disk." ,
} )
m . reloadsFailed = prometheus . NewCounter ( prometheus . CounterOpts {
2018-09-18 17:17:41 +00:00
Name : "prometheus_tsdb_reloads_failures_total" ,
2020-10-19 15:27:08 +00:00
Help : "Number of times the database failed to reloadBlocks block data from disk." ,
2017-05-26 13:13:03 +00:00
} )
2017-01-06 11:37:28 +00:00
m . compactionsTriggered = prometheus . NewCounter ( prometheus . CounterOpts {
2018-09-18 17:17:41 +00:00
Name : "prometheus_tsdb_compactions_triggered_total" ,
2017-01-06 11:37:28 +00:00
Help : "Total number of triggered compactions for the partition." ,
} )
2019-05-30 11:57:28 +00:00
m . compactionsFailed = prometheus . NewCounter ( prometheus . CounterOpts {
Name : "prometheus_tsdb_compactions_failed_total" ,
Help : "Total number of compactions that failed for the partition." ,
} )
2019-01-16 10:03:52 +00:00
m . timeRetentionCount = prometheus . NewCounter ( prometheus . CounterOpts {
Name : "prometheus_tsdb_time_retentions_total" ,
Help : "The number of times that blocks were deleted because the maximum time limit was exceeded." ,
} )
2018-11-20 10:34:26 +00:00
m . compactionsSkipped = prometheus . NewCounter ( prometheus . CounterOpts {
Name : "prometheus_tsdb_compactions_skipped_total" ,
Help : "Total number of skipped compactions due to disabled auto compaction." ,
} )
2018-09-14 12:07:45 +00:00
m . startTime = prometheus . NewGaugeFunc ( prometheus . GaugeOpts {
2018-09-18 17:17:41 +00:00
Name : "prometheus_tsdb_lowest_timestamp" ,
2018-11-30 18:18:12 +00:00
Help : "Lowest timestamp value stored in the database. The unit is decided by the library consumer." ,
2018-09-14 12:07:45 +00:00
} , func ( ) float64 {
db . mtx . RLock ( )
defer db . mtx . RUnlock ( )
if len ( db . blocks ) == 0 {
2019-10-09 15:41:46 +00:00
return float64 ( db . head . MinTime ( ) )
2018-09-14 12:07:45 +00:00
}
return float64 ( db . blocks [ 0 ] . meta . MinTime )
} )
2017-11-22 12:34:50 +00:00
m . tombCleanTimer = prometheus . NewHistogram ( prometheus . HistogramOpts {
2024-03-01 13:04:54 +00:00
Name : "prometheus_tsdb_tombstone_cleanup_seconds" ,
Help : "The time taken to recompact blocks to remove tombstones." ,
NativeHistogramBucketFactor : 1.1 ,
NativeHistogramMaxBucketNumber : 100 ,
NativeHistogramMinResetDuration : 1 * time . Hour ,
2017-11-22 12:34:50 +00:00
} )
2019-01-16 10:03:52 +00:00
m . blocksBytes = prometheus . NewGauge ( prometheus . GaugeOpts {
2019-01-23 13:46:58 +00:00
Name : "prometheus_tsdb_storage_blocks_bytes" ,
2019-01-16 10:03:52 +00:00
Help : "The number of bytes that are currently used for local storage by all blocks." ,
} )
2019-07-27 08:52:25 +00:00
m . maxBytes = prometheus . NewGauge ( prometheus . GaugeOpts {
Name : "prometheus_tsdb_retention_limit_bytes" ,
Help : "Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled" ,
} )
2023-10-24 11:34:42 +00:00
m . retentionDuration = prometheus . NewGauge ( prometheus . GaugeOpts {
Name : "prometheus_tsdb_retention_limit_seconds" ,
Help : "How long to retain samples in storage." ,
} )
2019-01-16 10:03:52 +00:00
m . sizeRetentionCount = prometheus . NewCounter ( prometheus . CounterOpts {
Name : "prometheus_tsdb_size_retentions_total" ,
Help : "The number of times that blocks were deleted because the maximum number of bytes was exceeded." ,
} )
2020-02-11 16:34:09 +00:00
2016-12-31 08:48:49 +00:00
if r != nil {
r . MustRegister (
2017-05-26 13:13:03 +00:00
m . loadedBlocks ,
2018-09-08 18:28:36 +00:00
m . symbolTableSize ,
2017-05-26 13:13:03 +00:00
m . reloads ,
m . reloadsFailed ,
2019-05-30 11:57:28 +00:00
m . compactionsFailed ,
2020-01-13 22:15:45 +00:00
m . compactionsTriggered ,
m . compactionsSkipped ,
m . sizeRetentionCount ,
m . timeRetentionCount ,
2018-09-14 12:07:45 +00:00
m . startTime ,
2017-11-22 12:34:50 +00:00
m . tombCleanTimer ,
2019-01-16 10:03:52 +00:00
m . blocksBytes ,
2019-07-27 08:52:25 +00:00
m . maxBytes ,
2023-10-24 11:34:42 +00:00
m . retentionDuration ,
2016-12-31 08:48:49 +00:00
)
}
return m
}
2022-03-03 12:03:07 +00:00
// DBStats contains statistics about the DB separated by component (eg. head).
2021-06-05 14:29:32 +00:00
// They are available before the DB has finished initializing.
type DBStats struct {
Head * HeadStats
}
// NewDBStats returns a new DBStats object initialized using the
2022-08-27 20:21:41 +00:00
// new function from each component.
2021-06-05 14:29:32 +00:00
func NewDBStats ( ) * DBStats {
return & DBStats {
Head : NewHeadStats ( ) ,
}
}
2019-07-23 08:04:48 +00:00
// ErrClosed is returned when the db is closed.
var ErrClosed = errors . New ( "db already closed" )
// DBReadOnly provides APIs for read only operations on a database.
2019-09-30 15:54:55 +00:00
// Current implementation doesn't support concurrency so
2019-07-23 08:04:48 +00:00
// all API calls should happen in the same go routine.
type DBReadOnly struct {
2024-09-10 01:41:53 +00:00
logger * slog . Logger
2023-11-29 16:49:01 +00:00
dir string
sandboxDir string
closers [ ] io . Closer
closed chan struct { }
2019-07-23 08:04:48 +00:00
}
// OpenDBReadOnly opens DB in the given directory for read only operations.
2024-09-10 01:41:53 +00:00
func OpenDBReadOnly ( dir , sandboxDirRoot string , l * slog . Logger ) ( * DBReadOnly , error ) {
2019-07-23 08:04:48 +00:00
if _ , err := os . Stat ( dir ) ; err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "opening the db dir: %w" , err )
2019-07-23 08:04:48 +00:00
}
2023-11-29 16:49:01 +00:00
if sandboxDirRoot == "" {
sandboxDirRoot = dir
}
sandboxDir , err := os . MkdirTemp ( sandboxDirRoot , "tmp_dbro_sandbox" )
if err != nil {
return nil , fmt . Errorf ( "setting up sandbox dir: %w" , err )
}
2019-07-23 08:04:48 +00:00
if l == nil {
2024-09-10 01:41:53 +00:00
l = promslog . NewNopLogger ( )
2019-07-23 08:04:48 +00:00
}
return & DBReadOnly {
2023-11-29 16:49:01 +00:00
logger : l ,
dir : dir ,
sandboxDir : sandboxDir ,
closed : make ( chan struct { } ) ,
2019-07-23 08:04:48 +00:00
} , nil
}
2019-09-13 10:25:21 +00:00
// FlushWAL creates a new block containing all data that's currently in the memory buffer/WAL.
// Samples that are in existing blocks will not be written to the new block.
// Note that if the read only database is running concurrently with a
// writable database then writing the WAL to the database directory can race.
2020-03-23 09:19:44 +00:00
func ( db * DBReadOnly ) FlushWAL ( dir string ) ( returnErr error ) {
2019-09-13 10:25:21 +00:00
blockReaders , err := db . Blocks ( )
if err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "read blocks: %w" , err )
2019-09-13 10:25:21 +00:00
}
maxBlockTime := int64 ( math . MinInt64 )
if len ( blockReaders ) > 0 {
maxBlockTime = blockReaders [ len ( blockReaders ) - 1 ] . Meta ( ) . MaxTime
}
2022-10-10 15:08:46 +00:00
w , err := wlog . Open ( db . logger , filepath . Join ( db . dir , "wal" ) )
2019-09-13 10:25:21 +00:00
if err != nil {
return err
}
2022-10-10 15:08:46 +00:00
var wbl * wlog . WL
wblDir := filepath . Join ( db . dir , wlog . WblDirName )
2022-09-20 17:05:50 +00:00
if _ , err := os . Stat ( wblDir ) ; ! os . IsNotExist ( err ) {
2022-10-10 15:08:46 +00:00
wbl , err = wlog . Open ( db . logger , wblDir )
2022-09-20 17:05:50 +00:00
if err != nil {
return err
}
}
2021-02-09 14:12:48 +00:00
opts := DefaultHeadOptions ( )
opts . ChunkDirRoot = db . dir
2022-09-20 17:05:50 +00:00
head , err := NewHead ( nil , db . logger , w , wbl , opts , NewHeadStats ( ) )
2019-09-13 10:25:21 +00:00
if err != nil {
return err
}
2020-03-23 09:19:44 +00:00
defer func ( ) {
2023-11-16 18:54:41 +00:00
errs := tsdb_errors . NewMulti ( returnErr )
if err := head . Close ( ) ; err != nil {
errs . Add ( fmt . Errorf ( "closing Head: %w" , err ) )
}
returnErr = errs . Err ( )
2020-03-23 09:19:44 +00:00
} ( )
2019-09-13 10:25:21 +00:00
// Set the min valid time for the ingested wal samples
// to be no lower than the maxt of the last block.
if err := head . Init ( maxBlockTime ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "read WAL: %w" , err )
2019-09-13 10:25:21 +00:00
}
mint := head . MinTime ( )
maxt := head . MaxTime ( )
2020-08-13 09:55:35 +00:00
rh := NewRangeHead ( head , mint , maxt )
2020-02-06 15:58:38 +00:00
compactor , err := NewLeveledCompactor (
context . Background ( ) ,
nil ,
db . logger ,
2020-02-11 16:34:09 +00:00
ExponentialBlockRanges ( DefaultOptions ( ) . MinBlockDuration , 3 , 5 ) ,
2024-01-08 09:48:27 +00:00
chunkenc . NewPool ( ) , nil ,
2020-02-06 15:58:38 +00:00
)
2019-09-13 10:25:21 +00:00
if err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "create leveled compactor: %w" , err )
2019-09-13 10:25:21 +00:00
}
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
_ , err = compactor . Write ( dir , rh , mint , maxt + 1 , nil )
2023-11-16 18:54:41 +00:00
if err != nil {
return fmt . Errorf ( "writing WAL: %w" , err )
}
return nil
2019-09-13 10:25:21 +00:00
}
2020-07-31 15:03:02 +00:00
func ( db * DBReadOnly ) loadDataAsQueryable ( maxt int64 ) ( storage . SampleAndChunkQueryable , error ) {
2019-07-23 08:04:48 +00:00
select {
case <- db . closed :
return nil , ErrClosed
default :
}
2019-09-13 10:25:21 +00:00
blockReaders , err := db . Blocks ( )
2019-07-23 08:04:48 +00:00
if err != nil {
return nil , err
}
2019-09-13 10:25:21 +00:00
blocks := make ( [ ] * Block , len ( blockReaders ) )
for i , b := range blockReaders {
2019-07-23 08:04:48 +00:00
b , ok := b . ( * Block )
if ! ok {
return nil , errors . New ( "unable to convert a read only block to a normal block" )
}
blocks [ i ] = b
}
2021-02-09 14:12:48 +00:00
opts := DefaultHeadOptions ( )
2023-11-29 16:49:01 +00:00
// Hard link the chunk files to a dir in db.sandboxDir in case the Head needs to truncate some of them
// or cut new ones while replaying the WAL.
// See https://github.com/prometheus/prometheus/issues/11618.
err = chunks . HardLinkChunkFiles ( mmappedChunksDir ( db . dir ) , mmappedChunksDir ( db . sandboxDir ) )
if err != nil {
return nil , err
}
opts . ChunkDirRoot = db . sandboxDir
2022-09-20 17:05:50 +00:00
head , err := NewHead ( nil , db . logger , nil , nil , opts , NewHeadStats ( ) )
2019-07-23 08:04:48 +00:00
if err != nil {
return nil , err
}
maxBlockTime := int64 ( math . MinInt64 )
if len ( blocks ) > 0 {
maxBlockTime = blocks [ len ( blocks ) - 1 ] . Meta ( ) . MaxTime
}
2019-09-30 15:54:55 +00:00
// Also add the WAL if the current blocks don't cover the requests time range.
2019-07-23 08:04:48 +00:00
if maxBlockTime <= maxt {
2020-05-06 15:30:00 +00:00
if err := head . Close ( ) ; err != nil {
return nil , err
}
2022-10-10 15:08:46 +00:00
w , err := wlog . Open ( db . logger , filepath . Join ( db . dir , "wal" ) )
2019-07-23 08:04:48 +00:00
if err != nil {
return nil , err
}
2022-10-10 15:08:46 +00:00
var wbl * wlog . WL
wblDir := filepath . Join ( db . dir , wlog . WblDirName )
2022-09-20 17:05:50 +00:00
if _ , err := os . Stat ( wblDir ) ; ! os . IsNotExist ( err ) {
2022-10-10 15:08:46 +00:00
wbl , err = wlog . Open ( db . logger , wblDir )
2022-09-20 17:05:50 +00:00
if err != nil {
return nil , err
}
}
2021-02-09 14:12:48 +00:00
opts := DefaultHeadOptions ( )
2023-11-29 16:49:01 +00:00
opts . ChunkDirRoot = db . sandboxDir
2022-09-20 17:05:50 +00:00
head , err = NewHead ( nil , db . logger , w , wbl , opts , NewHeadStats ( ) )
2019-07-23 08:04:48 +00:00
if err != nil {
return nil , err
}
// Set the min valid time for the ingested wal samples
// to be no lower than the maxt of the last block.
if err := head . Init ( maxBlockTime ) ; err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "read WAL: %w" , err )
2019-07-23 08:04:48 +00:00
}
2024-03-20 13:09:21 +00:00
// Set the wal and the wbl to nil to disable related operations.
2019-07-23 08:04:48 +00:00
// This is mainly to avoid blocking when closing the head.
head . wal = nil
2024-03-20 13:09:21 +00:00
head . wbl = nil
2019-07-23 08:04:48 +00:00
}
2020-05-06 15:30:00 +00:00
db . closers = append ( db . closers , head )
2020-07-31 15:03:02 +00:00
return & DB {
2024-06-25 07:47:06 +00:00
dir : db . dir ,
logger : db . logger ,
blocks : blocks ,
head : head ,
blockQuerierFunc : NewBlockQuerier ,
blockChunkQuerierFunc : NewBlockChunkQuerier ,
2020-07-31 15:03:02 +00:00
} , nil
}
2019-07-23 08:04:48 +00:00
2020-07-31 15:03:02 +00:00
// Querier loads the blocks and wal and returns a new querier over the data partition for the given time range.
// Current implementation doesn't support multiple Queriers.
2023-09-12 10:37:38 +00:00
func ( db * DBReadOnly ) Querier ( mint , maxt int64 ) ( storage . Querier , error ) {
2020-07-31 15:03:02 +00:00
q , err := db . loadDataAsQueryable ( maxt )
if err != nil {
return nil , err
}
2023-09-12 10:37:38 +00:00
return q . Querier ( mint , maxt )
2019-07-23 08:04:48 +00:00
}
2020-07-31 15:03:02 +00:00
// ChunkQuerier loads blocks and the wal and returns a new chunk querier over the data partition for the given time range.
// Current implementation doesn't support multiple ChunkQueriers.
2023-09-12 10:37:38 +00:00
func ( db * DBReadOnly ) ChunkQuerier ( mint , maxt int64 ) ( storage . ChunkQuerier , error ) {
2020-07-31 15:03:02 +00:00
q , err := db . loadDataAsQueryable ( maxt )
if err != nil {
return nil , err
}
2023-09-12 10:37:38 +00:00
return q . ChunkQuerier ( mint , maxt )
2020-06-24 13:41:52 +00:00
}
2019-07-23 08:04:48 +00:00
// Blocks returns a slice of block readers for persisted blocks.
func ( db * DBReadOnly ) Blocks ( ) ( [ ] BlockReader , error ) {
select {
case <- db . closed :
return nil , ErrClosed
default :
}
2024-11-11 06:59:24 +00:00
loadable , corrupted , err := openBlocks ( db . logger , db . dir , nil , nil , DefaultPostingsDecoderFactory )
2019-07-23 08:04:48 +00:00
if err != nil {
return nil , err
}
// Corrupted blocks that have been superseded by a loadable block can be safely ignored.
for _ , block := range loadable {
for _ , b := range block . Meta ( ) . Compaction . Parents {
delete ( corrupted , b . ULID )
}
}
if len ( corrupted ) > 0 {
for _ , b := range loadable {
if err := b . Close ( ) ; err != nil {
2024-09-10 01:41:53 +00:00
db . logger . Warn ( "Closing block failed" , "err" , err , "block" , b )
2019-07-23 08:04:48 +00:00
}
}
2020-10-28 15:24:58 +00:00
errs := tsdb_errors . NewMulti ( )
2020-06-17 14:40:00 +00:00
for ulid , err := range corrupted {
2023-11-16 18:54:41 +00:00
if err != nil {
errs . Add ( fmt . Errorf ( "corrupted block %s: %w" , ulid . String ( ) , err ) )
}
2020-06-17 14:40:00 +00:00
}
2020-10-28 15:24:58 +00:00
return nil , errs . Err ( )
2019-07-23 08:04:48 +00:00
}
if len ( loadable ) == 0 {
2019-09-13 10:25:21 +00:00
return nil , nil
2019-07-23 08:04:48 +00:00
}
2023-09-21 20:53:51 +00:00
slices . SortFunc ( loadable , func ( a , b * Block ) int {
2023-10-16 14:23:26 +00:00
switch {
case a . Meta ( ) . MinTime < b . Meta ( ) . MinTime :
return - 1
case a . Meta ( ) . MinTime > b . Meta ( ) . MinTime :
return 1
default :
return 0
}
2019-07-23 08:04:48 +00:00
} )
blockMetas := make ( [ ] BlockMeta , 0 , len ( loadable ) )
for _ , b := range loadable {
blockMetas = append ( blockMetas , b . Meta ( ) )
}
if overlaps := OverlappingBlocks ( blockMetas ) ; len ( overlaps ) > 0 {
2024-09-10 01:41:53 +00:00
db . logger . Warn ( "Overlapping blocks found during opening" , "detail" , overlaps . String ( ) )
2019-07-23 08:04:48 +00:00
}
// Close all previously open readers and add the new ones to the cache.
for _ , closer := range db . closers {
closer . Close ( )
}
blockClosers := make ( [ ] io . Closer , len ( loadable ) )
blockReaders := make ( [ ] BlockReader , len ( loadable ) )
for i , b := range loadable {
blockClosers [ i ] = b
blockReaders [ i ] = b
}
db . closers = blockClosers
return blockReaders , nil
}
2023-06-01 11:43:09 +00:00
// LastBlockID returns the BlockID of latest block.
func ( db * DBReadOnly ) LastBlockID ( ) ( string , error ) {
entries , err := os . ReadDir ( db . dir )
if err != nil {
return "" , err
}
2024-08-18 09:27:04 +00:00
maxT := uint64 ( 0 )
2023-06-01 11:43:09 +00:00
lastBlockID := ""
for _ , e := range entries {
// Check if dir is a block dir or not.
dirName := e . Name ( )
ulidObj , err := ulid . ParseStrict ( dirName )
if err != nil {
continue // Not a block dir.
}
timestamp := ulidObj . Time ( )
2024-08-18 09:27:04 +00:00
if timestamp > maxT {
maxT = timestamp
2023-06-01 11:43:09 +00:00
lastBlockID = dirName
}
}
if lastBlockID == "" {
return "" , errors . New ( "no blocks found" )
}
return lastBlockID , nil
}
// Block returns a block reader by given block id.
2024-11-11 06:59:24 +00:00
func ( db * DBReadOnly ) Block ( blockID string , postingsDecoderFactory PostingsDecoderFactory ) ( BlockReader , error ) {
2023-06-01 11:43:09 +00:00
select {
case <- db . closed :
return nil , ErrClosed
default :
}
_ , err := os . Stat ( filepath . Join ( db . dir , blockID ) )
if os . IsNotExist ( err ) {
2023-11-14 13:04:31 +00:00
return nil , fmt . Errorf ( "invalid block ID %s" , blockID )
2023-06-01 11:43:09 +00:00
}
2024-11-11 06:59:24 +00:00
block , err := OpenBlock ( db . logger , filepath . Join ( db . dir , blockID ) , nil , postingsDecoderFactory )
2023-06-01 11:43:09 +00:00
if err != nil {
return nil , err
}
db . closers = append ( db . closers , block )
return block , nil
}
2023-11-29 16:49:01 +00:00
// Close all block readers and delete the sandbox dir.
2019-07-23 08:04:48 +00:00
func ( db * DBReadOnly ) Close ( ) error {
2023-11-29 16:49:01 +00:00
defer func ( ) {
// Delete the temporary sandbox directory that was created when opening the DB.
if err := os . RemoveAll ( db . sandboxDir ) ; err != nil {
2024-09-10 01:41:53 +00:00
db . logger . Error ( "delete sandbox dir" , "err" , err )
2023-11-29 16:49:01 +00:00
}
} ( )
2019-07-23 08:04:48 +00:00
select {
case <- db . closed :
return ErrClosed
default :
}
close ( db . closed )
2020-10-28 15:24:58 +00:00
return tsdb_errors . CloseAll ( db . closers )
2019-07-23 08:04:48 +00:00
}
2020-02-07 16:24:17 +00:00
// Open returns a new DB in the given directory. If options are empty, DefaultOptions will be used.
2024-09-10 01:41:53 +00:00
func Open ( dir string , l * slog . Logger , r prometheus . Registerer , opts * Options , stats * DBStats ) ( db * DB , err error ) {
2020-02-06 15:58:38 +00:00
var rngs [ ] int64
opts , rngs = validateOpts ( opts , nil )
2021-06-05 14:29:32 +00:00
return open ( dir , l , r , opts , rngs , stats )
2020-02-06 15:58:38 +00:00
}
func validateOpts ( opts * Options , rngs [ ] int64 ) ( * Options , [ ] int64 ) {
if opts == nil {
opts = DefaultOptions ( )
}
if opts . StripeSize <= 0 {
opts . StripeSize = DefaultStripeSize
}
2020-11-19 13:00:47 +00:00
if opts . HeadChunksWriteBufferSize <= 0 {
opts . HeadChunksWriteBufferSize = chunks . DefaultWriteBufferSize
}
2022-01-10 13:36:45 +00:00
if opts . HeadChunksWriteQueueSize < 0 {
opts . HeadChunksWriteQueueSize = chunks . DefaultWriteQueueSize
}
2023-05-24 11:00:21 +00:00
if opts . SamplesPerChunk <= 0 {
opts . SamplesPerChunk = DefaultSamplesPerChunk
}
2021-04-15 08:55:01 +00:00
if opts . MaxBlockChunkSegmentSize <= 0 {
opts . MaxBlockChunkSegmentSize = chunks . DefaultChunkSegmentSize
}
2020-02-06 15:58:38 +00:00
if opts . MinBlockDuration <= 0 {
2020-02-11 16:34:09 +00:00
opts . MinBlockDuration = DefaultBlockDuration
2020-02-06 15:58:38 +00:00
}
if opts . MinBlockDuration > opts . MaxBlockDuration {
opts . MaxBlockDuration = opts . MinBlockDuration
}
2022-09-20 17:05:50 +00:00
if opts . OutOfOrderCapMax <= 0 {
opts . OutOfOrderCapMax = DefaultOutOfOrderCapMax
}
if opts . OutOfOrderTimeWindow < 0 {
opts . OutOfOrderTimeWindow = 0
}
2020-02-06 15:58:38 +00:00
if len ( rngs ) == 0 {
// Start with smallest block duration and create exponential buckets until the exceed the
// configured maximum block duration.
2020-02-11 16:34:09 +00:00
rngs = ExponentialBlockRanges ( opts . MinBlockDuration , 10 , 3 )
2020-02-06 15:58:38 +00:00
}
return opts , rngs
}
2021-11-17 10:21:27 +00:00
// open returns a new DB in the given directory.
// It initializes the lockfile, WAL, compactor, and Head (by replaying the WAL), and runs the database.
// It is not safe to open more than one DB in the same directory.
2024-09-10 01:41:53 +00:00
func open ( dir string , l * slog . Logger , r prometheus . Registerer , opts * Options , rngs [ ] int64 , stats * DBStats ) ( _ * DB , returnedErr error ) {
2021-10-22 08:06:44 +00:00
if err := os . MkdirAll ( dir , 0 o777 ) ; err != nil {
2017-02-19 12:01:19 +00:00
return nil , err
}
2017-02-19 15:04:37 +00:00
if l == nil {
2024-09-10 01:41:53 +00:00
l = promslog . NewNopLogger ( )
2017-02-19 15:04:37 +00:00
}
2021-06-05 14:29:32 +00:00
if stats == nil {
stats = NewDBStats ( )
}
2020-02-06 15:58:38 +00:00
for i , v := range rngs {
2020-02-11 16:34:09 +00:00
if v > opts . MaxBlockDuration {
2020-02-06 15:58:38 +00:00
rngs = rngs [ : i ]
break
}
2020-01-30 07:12:43 +00:00
}
2020-02-06 15:58:38 +00:00
2018-02-12 10:40:12 +00:00
// Fixup bad format written by Prometheus 2.1.
2018-02-09 12:37:10 +00:00
if err := repairBadIndexVersion ( l , dir ) ; err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "repair bad index version: %w" , err )
2018-02-09 12:11:03 +00:00
}
2020-08-14 09:45:08 +00:00
walDir := filepath . Join ( dir , "wal" )
2022-10-10 15:08:46 +00:00
wblDir := filepath . Join ( dir , wlog . WblDirName )
2020-08-14 09:45:08 +00:00
2022-03-24 10:44:14 +00:00
for _ , tmpDir := range [ ] string { walDir , dir } {
// Remove tmp dirs.
if err := removeBestEffortTmpDirs ( l , tmpDir ) ; err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "remove tmp dirs: %w" , err )
2022-03-24 10:44:14 +00:00
}
2020-08-11 05:56:08 +00:00
}
2017-01-18 05:18:32 +00:00
2020-10-28 10:09:03 +00:00
db := & DB {
2020-07-22 15:19:33 +00:00
dir : dir ,
logger : l ,
opts : opts ,
compactc : make ( chan struct { } , 1 ) ,
donec : make ( chan struct { } ) ,
stopc : make ( chan struct { } ) ,
autoCompact : true ,
chunkPool : chunkenc . NewPool ( ) ,
blocksToDelete : opts . BlocksToDelete ,
2022-09-20 17:05:50 +00:00
registerer : r ,
2020-07-22 15:19:33 +00:00
}
2020-10-21 15:08:28 +00:00
defer func ( ) {
// Close files if startup fails somewhere.
if returnedErr == nil {
return
}
2020-10-28 10:09:03 +00:00
close ( db . donec ) // DB is never run if it was an error, so close this channel here.
2023-11-16 18:54:41 +00:00
errs := tsdb_errors . NewMulti ( returnedErr )
if err := db . Close ( ) ; err != nil {
errs . Add ( fmt . Errorf ( "close DB after failed startup: %w" , err ) )
}
returnedErr = errs . Err ( )
2020-10-21 15:08:28 +00:00
} ( )
2020-07-22 15:19:33 +00:00
if db . blocksToDelete == nil {
db . blocksToDelete = DefaultBlocksToDelete ( db )
2017-01-06 08:26:39 +00:00
}
2019-07-27 08:52:25 +00:00
2021-11-11 16:45:25 +00:00
var err error
db . locker , err = tsdbutil . NewDirLocker ( dir , "tsdb" , db . logger , r )
if err != nil {
return nil , err
}
2017-05-09 10:52:47 +00:00
if ! opts . NoLockfile {
2021-11-11 16:45:25 +00:00
if err := db . locker . Lock ( ) ; err != nil {
2017-05-18 14:09:30 +00:00
return nil , err
}
2017-05-09 10:52:47 +00:00
}
2019-02-06 12:07:35 +00:00
ctx , cancel := context . WithCancel ( context . Background ( ) )
2024-06-04 23:11:36 +00:00
if opts . NewCompactorFunc != nil {
db . compactor , err = opts . NewCompactorFunc ( ctx , r , l , rngs , db . chunkPool , opts )
} else {
db . compactor , err = NewLeveledCompactorWithOptions ( ctx , r , l , rngs , db . chunkPool , LeveledCompactorOptions {
MaxBlockChunkSegmentSize : opts . MaxBlockChunkSegmentSize ,
EnableOverlappingCompaction : opts . EnableOverlappingCompaction ,
2024-11-11 06:59:24 +00:00
PD : opts . PostingsDecoderFactory ,
2024-06-04 23:11:36 +00:00
} )
}
2020-10-28 10:09:03 +00:00
if err != nil {
2019-02-06 12:07:35 +00:00
cancel ( )
2024-06-04 23:11:36 +00:00
return nil , fmt . Errorf ( "create compactor: %w" , err )
2017-07-07 11:46:41 +00:00
}
2019-02-06 12:07:35 +00:00
db . compactCancel = cancel
2017-07-07 11:46:41 +00:00
2024-06-25 07:47:06 +00:00
if opts . BlockQuerierFunc == nil {
db . blockQuerierFunc = NewBlockQuerier
} else {
db . blockQuerierFunc = opts . BlockQuerierFunc
}
if opts . BlockChunkQuerierFunc == nil {
db . blockChunkQuerierFunc = NewBlockChunkQuerier
} else {
db . blockChunkQuerierFunc = opts . BlockChunkQuerierFunc
}
2022-10-10 15:08:46 +00:00
var wal , wbl * wlog . WL
segmentSize := wlog . DefaultSegmentSize
2019-03-25 23:38:12 +00:00
// Wal is enabled.
if opts . WALSegmentSize >= 0 {
// Wal is set to a custom size.
if opts . WALSegmentSize > 0 {
2020-02-11 16:34:09 +00:00
segmentSize = opts . WALSegmentSize
2019-03-25 23:38:12 +00:00
}
2022-10-10 15:08:46 +00:00
wal , err = wlog . NewSize ( l , r , walDir , segmentSize , opts . WALCompression )
2020-10-28 10:09:03 +00:00
if err != nil {
return nil , err
2019-03-25 23:38:12 +00:00
}
2022-09-20 17:05:50 +00:00
// Check if there is a WBL on disk, in which case we should replay that data.
wblSize , err := fileutil . DirSize ( wblDir )
if err != nil && ! os . IsNotExist ( err ) {
return nil , err
}
if opts . OutOfOrderTimeWindow > 0 || wblSize > 0 {
2022-10-10 15:08:46 +00:00
wbl , err = wlog . NewSize ( l , r , wblDir , segmentSize , opts . WALCompression )
2022-09-20 17:05:50 +00:00
if err != nil {
return nil , err
}
}
2017-08-28 22:39:17 +00:00
}
2022-09-20 17:05:50 +00:00
db . oooWasEnabled . Store ( opts . OutOfOrderTimeWindow > 0 )
2021-02-09 14:12:48 +00:00
headOpts := DefaultHeadOptions ( )
headOpts . ChunkRange = rngs [ 0 ]
headOpts . ChunkDirRoot = dir
headOpts . ChunkPool = db . chunkPool
headOpts . ChunkWriteBufferSize = opts . HeadChunksWriteBufferSize
2022-01-10 13:36:45 +00:00
headOpts . ChunkWriteQueueSize = opts . HeadChunksWriteQueueSize
2023-04-12 16:48:35 +00:00
headOpts . SamplesPerChunk = opts . SamplesPerChunk
2021-02-09 14:12:48 +00:00
headOpts . StripeSize = opts . StripeSize
headOpts . SeriesCallback = opts . SeriesLifecycleCallback
2021-07-20 04:52:57 +00:00
headOpts . EnableExemplarStorage = opts . EnableExemplarStorage
headOpts . MaxExemplars . Store ( opts . MaxExemplars )
2021-08-06 16:51:01 +00:00
headOpts . EnableMemorySnapshotOnShutdown = opts . EnableMemorySnapshotOnShutdown
2022-09-14 12:08:34 +00:00
headOpts . EnableNativeHistograms . Store ( opts . EnableNativeHistograms )
2024-09-17 09:19:06 +00:00
headOpts . EnableOOONativeHistograms . Store ( opts . EnableOOONativeHistograms )
2022-09-20 17:05:50 +00:00
headOpts . OutOfOrderTimeWindow . Store ( opts . OutOfOrderTimeWindow )
headOpts . OutOfOrderCapMax . Store ( opts . OutOfOrderCapMax )
2024-01-29 11:57:27 +00:00
headOpts . EnableSharding = opts . EnableSharding
2023-03-07 16:41:33 +00:00
if opts . WALReplayConcurrency > 0 {
headOpts . WALReplayConcurrency = opts . WALReplayConcurrency
}
2021-11-19 10:11:32 +00:00
if opts . IsolationDisabled {
// We only override this flag if isolation is disabled at DB level. We use the default otherwise.
headOpts . IsolationDisabled = opts . IsolationDisabled
}
2022-10-10 15:08:46 +00:00
db . head , err = NewHead ( r , l , wal , wbl , headOpts , stats . Head )
2020-10-28 10:09:03 +00:00
if err != nil {
return nil , err
2017-08-30 15:38:25 +00:00
}
2023-05-15 19:31:49 +00:00
db . head . writeNotified = db . writeNotified
2018-12-04 10:30:49 +00:00
2020-07-05 04:41:42 +00:00
// Register metrics after assigning the head block.
db . metrics = newDBMetrics ( db , r )
maxBytes := opts . MaxBytes
if maxBytes < 0 {
maxBytes = 0
}
db . metrics . maxBytes . Set ( float64 ( maxBytes ) )
2023-10-24 11:34:42 +00:00
db . metrics . retentionDuration . Set ( ( time . Duration ( opts . RetentionDuration ) * time . Millisecond ) . Seconds ( ) )
2020-07-05 04:41:42 +00:00
2018-11-28 09:23:50 +00:00
if err := db . reload ( ) ; err != nil {
return nil , err
}
2018-12-04 10:30:49 +00:00
// Set the min valid time for the ingested samples
// to be no lower than the maxt of the last block.
minValidTime := int64 ( math . MinInt64 )
2022-09-20 17:05:50 +00:00
// We do not consider blocks created from out-of-order samples for Head's minValidTime
// since minValidTime is only for the in-order data and we do not want to discard unnecessary
// samples from the Head.
inOrderMaxTime , ok := db . inOrderBlocksMaxTime ( )
if ok {
minValidTime = inOrderMaxTime
2018-12-04 10:30:49 +00:00
}
2019-06-14 15:39:22 +00:00
if initErr := db . head . Init ( minValidTime ) ; initErr != nil {
db . head . metrics . walCorruptionsTotal . Inc ( )
2023-11-16 18:54:41 +00:00
var e * errLoadWbl
if errors . As ( initErr , & e ) {
2024-09-10 01:41:53 +00:00
db . logger . Warn ( "Encountered WBL read error, attempting repair" , "err" , initErr )
2023-10-13 12:21:35 +00:00
if err := wbl . Repair ( e . err ) ; err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "repair corrupted WBL: %w" , err )
2022-09-20 17:05:50 +00:00
}
2024-09-10 01:41:53 +00:00
db . logger . Info ( "Successfully repaired WBL" )
2022-09-20 17:05:50 +00:00
} else {
2024-09-10 01:41:53 +00:00
db . logger . Warn ( "Encountered WAL read error, attempting repair" , "err" , initErr )
2022-10-10 15:08:46 +00:00
if err := wal . Repair ( initErr ) ; err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "repair corrupted WAL: %w" , err )
2022-09-20 17:05:50 +00:00
}
2024-09-10 01:41:53 +00:00
db . logger . Info ( "Successfully repaired WAL" )
2019-06-14 15:39:22 +00:00
}
2018-12-04 10:30:49 +00:00
}
2017-08-28 22:39:17 +00:00
2022-09-20 17:05:50 +00:00
if db . head . MinOOOTime ( ) != int64 ( math . MaxInt64 ) {
// Some OOO data was replayed from the disk that needs compaction and cleanup.
db . oooWasEnabled . Store ( true )
}
2024-04-08 12:59:30 +00:00
if opts . EnableDelayedCompaction {
opts . CompactionDelay = db . generateCompactionDelay ( )
}
2023-09-13 15:45:06 +00:00
go db . run ( ctx )
2017-01-06 11:37:28 +00:00
return db , nil
}
2024-09-10 01:41:53 +00:00
func removeBestEffortTmpDirs ( l * slog . Logger , dir string ) error {
2022-04-27 09:24:36 +00:00
files , err := os . ReadDir ( dir )
2022-03-24 10:44:14 +00:00
if os . IsNotExist ( err ) {
return nil
}
2020-08-11 05:56:08 +00:00
if err != nil {
return err
}
2022-04-27 09:24:36 +00:00
for _ , f := range files {
if isTmpDir ( f ) {
if err := os . RemoveAll ( filepath . Join ( dir , f . Name ( ) ) ) ; err != nil {
2024-09-10 01:41:53 +00:00
l . Error ( "failed to delete tmp block dir" , "dir" , filepath . Join ( dir , f . Name ( ) ) , "err" , err )
2020-08-11 05:56:08 +00:00
continue
}
2024-09-10 01:41:53 +00:00
l . Info ( "Found and deleted tmp block dir" , "dir" , filepath . Join ( dir , f . Name ( ) ) )
2020-08-11 05:56:08 +00:00
}
}
return nil
}
2020-02-06 15:58:38 +00:00
// StartTime implements the Storage interface.
func ( db * DB ) StartTime ( ) ( int64 , error ) {
db . mtx . RLock ( )
defer db . mtx . RUnlock ( )
if len ( db . blocks ) > 0 {
return db . blocks [ 0 ] . Meta ( ) . MinTime , nil
}
return db . head . MinTime ( ) , nil
}
2017-06-08 10:14:13 +00:00
// Dir returns the directory of the database.
func ( db * DB ) Dir ( ) string {
return db . dir
}
2023-09-13 15:45:06 +00:00
func ( db * DB ) run ( ctx context . Context ) {
2017-01-06 11:37:28 +00:00
defer close ( db . donec )
2017-08-28 22:39:17 +00:00
backoff := time . Duration ( 0 )
2017-02-28 14:08:52 +00:00
2017-01-20 06:58:19 +00:00
for {
select {
2017-08-28 22:39:17 +00:00
case <- db . stopc :
2017-08-30 16:34:54 +00:00
return
2017-08-28 22:39:17 +00:00
case <- time . After ( backoff ) :
}
select {
case <- time . After ( 1 * time . Minute ) :
2021-01-07 07:30:08 +00:00
db . cmtx . Lock ( )
if err := db . reloadBlocks ( ) ; err != nil {
2024-09-10 01:41:53 +00:00
db . logger . Error ( "reloadBlocks" , "err" , err )
2021-01-07 07:30:08 +00:00
}
db . cmtx . Unlock ( )
2017-02-28 14:08:52 +00:00
select {
case db . compactc <- struct { } { } :
default :
}
Use a linked list for memSeries.headChunk (#11818)
Currently memSeries holds a single head chunk in-memory and a slice of mmapped chunks.
When append() is called on memSeries it might decide that a new headChunk is needed to use for given append() call.
If that happens it will first mmap existing head chunk and only after that happens it will create a new empty headChunk and continue appending
our sample to it.
Since appending samples uses write lock on memSeries no other read or write can happen until any append is completed.
When we have an append() that must create a new head chunk the whole memSeries is blocked until mmapping of existing head chunk finishes.
Mmapping itself uses a lock as it needs to be serialised, which means that the more chunks to mmap we have the longer each chunk might wait
for it to be mmapped.
If there's enough chunks that require mmapping some memSeries will be locked for long enough that it will start affecting
queries and scrapes.
Queries might timeout, since by default they have a 2 minute timeout set.
Scrapes will be blocked inside append() call, which means there will be a gap between samples. This will first affect range queries
or calls using rate() and such, since the time range requested in the query might have too few samples to calculate anything.
To avoid this we need to remove mmapping from append path, since mmapping is blocking.
But this means that when we cut a new head chunk we need to keep the old one around, so we can mmap it later.
This change makes memSeries.headChunk a linked list, memSeries.headChunk still points to the 'open' head chunk that receives new samples,
while older, yet to be mmapped, chunks are linked to it.
Mmapping is done on a schedule by iterating all memSeries one by one. Thanks to this we control when mmapping is done, since we trigger
it manually, which reduces the risk that it will have to compete for mmap locks with other chunks.
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
2023-07-31 09:10:24 +00:00
// We attempt mmapping of head chunks regularly.
db . head . mmapHeadChunks ( )
2017-01-06 11:37:28 +00:00
case <- db . compactc :
db . metrics . compactionsTriggered . Inc ( )
2018-11-20 10:34:26 +00:00
db . autoCompactMtx . Lock ( )
if db . autoCompact {
2023-09-13 15:45:06 +00:00
if err := db . Compact ( ctx ) ; err != nil {
2024-09-10 01:41:53 +00:00
db . logger . Error ( "compaction failed" , "err" , err )
2018-11-20 10:34:26 +00:00
backoff = exponential ( backoff , 1 * time . Second , 1 * time . Minute )
} else {
backoff = 0
}
2017-08-30 16:34:54 +00:00
} else {
2018-11-20 10:34:26 +00:00
db . metrics . compactionsSkipped . Inc ( )
2017-01-06 11:37:28 +00:00
}
2018-11-20 10:34:26 +00:00
db . autoCompactMtx . Unlock ( )
2017-01-06 11:37:28 +00:00
case <- db . stopc :
return
}
}
}
2017-08-30 16:34:54 +00:00
// Appender opens a new appender against the database.
2020-07-24 14:10:51 +00:00
func ( db * DB ) Appender ( ctx context . Context ) storage . Appender {
2020-07-30 11:11:13 +00:00
return dbAppender { db : db , Appender : db . head . Appender ( ctx ) }
2017-08-30 16:34:54 +00:00
}
2022-09-20 17:05:50 +00:00
// ApplyConfig applies a new config to the DB.
// Behaviour of 'OutOfOrderTimeWindow' is as follows:
// OOO enabled = oooTimeWindow > 0. OOO disabled = oooTimeWindow is 0.
// 1) Before: OOO disabled, Now: OOO enabled =>
// - A new WBL is created for the head block.
// - OOO compaction is enabled.
// - Overlapping queries are enabled.
//
// 2) Before: OOO enabled, Now: OOO enabled =>
// - Only the time window is updated.
//
// 3) Before: OOO enabled, Now: OOO disabled =>
// - Time Window set to 0. So no new OOO samples will be allowed.
// - OOO WBL will stay and will be eventually cleaned up.
// - OOO Compaction and overlapping queries will remain enabled until a restart or until all OOO samples are compacted.
//
// 4) Before: OOO disabled, Now: OOO disabled => no-op.
2021-07-20 04:52:57 +00:00
func ( db * DB ) ApplyConfig ( conf * config . Config ) error {
2022-09-20 17:05:50 +00:00
oooTimeWindow := int64 ( 0 )
if conf . StorageConfig . TSDBConfig != nil {
oooTimeWindow = conf . StorageConfig . TSDBConfig . OutOfOrderTimeWindow
}
if oooTimeWindow < 0 {
oooTimeWindow = 0
}
// Create WBL if it was not present and if OOO is enabled with WAL enabled.
2022-10-10 15:08:46 +00:00
var wblog * wlog . WL
2022-09-20 17:05:50 +00:00
var err error
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 14:14:31 +00:00
switch {
case db . head . wbl != nil :
2022-09-20 17:05:50 +00:00
// The existing WBL from the disk might have been replayed while OOO was disabled.
wblog = db . head . wbl
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 14:14:31 +00:00
case ! db . oooWasEnabled . Load ( ) && oooTimeWindow > 0 && db . opts . WALSegmentSize >= 0 :
2022-10-10 15:08:46 +00:00
segmentSize := wlog . DefaultSegmentSize
2022-09-20 17:05:50 +00:00
// Wal is set to a custom size.
if db . opts . WALSegmentSize > 0 {
segmentSize = db . opts . WALSegmentSize
}
2022-10-10 15:08:46 +00:00
oooWalDir := filepath . Join ( db . dir , wlog . WblDirName )
wblog , err = wlog . NewSize ( db . logger , db . registerer , oooWalDir , segmentSize , db . opts . WALCompression )
2022-09-20 17:05:50 +00:00
if err != nil {
return err
}
}
db . opts . OutOfOrderTimeWindow = oooTimeWindow
db . head . ApplyConfig ( conf , wblog )
if ! db . oooWasEnabled . Load ( ) {
db . oooWasEnabled . Store ( oooTimeWindow > 0 )
}
return nil
2021-07-20 04:52:57 +00:00
}
2022-09-14 12:08:34 +00:00
// EnableNativeHistograms enables the native histogram feature.
func ( db * DB ) EnableNativeHistograms ( ) {
db . head . EnableNativeHistograms ( )
}
// DisableNativeHistograms disables the native histogram feature.
func ( db * DB ) DisableNativeHistograms ( ) {
db . head . DisableNativeHistograms ( )
}
2024-09-17 09:19:06 +00:00
// EnableOOONativeHistograms enables the ingestion of out-of-order native histograms.
func ( db * DB ) EnableOOONativeHistograms ( ) {
db . head . EnableOOONativeHistograms ( )
}
// DisableOOONativeHistograms disables the ingestion of out-of-order native histograms.
func ( db * DB ) DisableOOONativeHistograms ( ) {
db . head . DisableOOONativeHistograms ( )
}
2017-08-30 16:34:54 +00:00
// dbAppender wraps the DB's head appender and triggers compactions on commit
// if necessary.
type dbAppender struct {
2020-02-06 15:58:38 +00:00
storage . Appender
2017-08-30 16:34:54 +00:00
db * DB
}
2021-03-19 19:28:55 +00:00
var _ storage . GetRef = dbAppender { }
2022-10-24 08:17:45 +00:00
func ( a dbAppender ) GetRef ( lset labels . Labels , hash uint64 ) ( storage . SeriesRef , labels . Labels ) {
2021-03-19 19:28:55 +00:00
if g , ok := a . Appender . ( storage . GetRef ) ; ok {
2022-10-24 08:17:45 +00:00
return g . GetRef ( lset , hash )
2021-03-19 19:28:55 +00:00
}
2022-03-09 22:17:40 +00:00
return 0 , labels . EmptyLabels ( )
2021-03-19 19:28:55 +00:00
}
2017-08-30 16:34:54 +00:00
func ( a dbAppender ) Commit ( ) error {
err := a . Appender . Commit ( )
2017-09-04 13:07:30 +00:00
// We could just run this check every few minutes practically. But for benchmarks
// and high frequency use cases this is the safer way.
2019-04-01 08:19:06 +00:00
if a . db . head . compactable ( ) {
2017-08-30 16:34:54 +00:00
select {
case a . db . compactc <- struct { } { } :
default :
}
}
return err
}
2024-04-08 12:59:30 +00:00
// waitingForCompactionDelay returns true if the DB is waiting for the Head compaction delay.
// This doesn't guarantee that the Head is really compactable.
func ( db * DB ) waitingForCompactionDelay ( ) bool {
return time . Since ( db . timeWhenCompactionDelayStarted ) < db . opts . CompactionDelay
}
2018-06-27 16:05:21 +00:00
// Compact data if possible. After successful compaction blocks are reloaded
2020-10-19 15:27:08 +00:00
// which will also delete the blocks that fall out of the retention window.
// Old blocks are only deleted on reloadBlocks based on the new block's parent information.
// See DB.reloadBlocks documentation for further information.
2023-09-13 15:45:06 +00:00
func ( db * DB ) Compact ( ctx context . Context ) ( returnErr error ) {
2017-07-13 14:15:13 +00:00
db . cmtx . Lock ( )
defer db . cmtx . Unlock ( )
2019-05-30 11:57:28 +00:00
defer func ( ) {
2022-06-17 05:51:43 +00:00
if returnErr != nil && ! errors . Is ( returnErr , context . Canceled ) {
// If we got an error because context was canceled then we're most likely
// shutting down TSDB and we don't need to report this on metrics
2019-05-30 11:57:28 +00:00
db . metrics . compactionsFailed . Inc ( )
}
} ( )
2020-10-19 15:27:08 +00:00
lastBlockMaxt := int64 ( math . MinInt64 )
defer func ( ) {
2023-11-16 18:54:41 +00:00
errs := tsdb_errors . NewMulti ( returnErr )
if err := db . head . truncateWAL ( lastBlockMaxt ) ; err != nil {
errs . Add ( fmt . Errorf ( "WAL truncation in Compact defer: %w" , err ) )
}
returnErr = errs . Err ( )
2020-10-19 15:27:08 +00:00
} ( )
2020-12-07 21:29:43 +00:00
start := time . Now ( )
2017-07-13 14:15:13 +00:00
// Check whether we have pending head blocks that are ready to be persisted.
// They have the highest priority.
2017-08-28 22:39:17 +00:00
for {
2017-03-04 15:50:48 +00:00
select {
case <- db . stopc :
2018-09-21 06:24:01 +00:00
return nil
2017-03-04 15:50:48 +00:00
default :
2017-02-02 08:32:06 +00:00
}
2024-04-08 12:59:30 +00:00
2019-04-01 08:19:06 +00:00
if ! db . head . compactable ( ) {
2024-04-08 12:59:30 +00:00
// Reset the counter once the head compactions are done.
// This would also reset it if a manual compaction was triggered while the auto compaction was in its delay period.
if ! db . timeWhenCompactionDelayStarted . IsZero ( ) {
db . timeWhenCompactionDelayStarted = time . Time { }
}
break
}
if db . timeWhenCompactionDelayStarted . IsZero ( ) {
// Start counting for the delay.
db . timeWhenCompactionDelayStarted = time . Now ( )
}
if db . waitingForCompactionDelay ( ) {
2017-08-28 22:39:17 +00:00
break
}
2018-12-04 10:30:49 +00:00
mint := db . head . MinTime ( )
2020-07-28 04:42:42 +00:00
maxt := rangeForTimestamp ( mint , db . head . chunkRange . Load ( ) )
2017-02-02 08:32:06 +00:00
2017-08-28 22:39:17 +00:00
// Wrap head into a range that bounds all reads to it.
2020-02-14 09:50:24 +00:00
// We remove 1 millisecond from maxt because block
// intervals are half-open: [b.MinTime, b.MaxTime). But
// chunk intervals are closed: [c.MinTime, c.MaxTime];
// so in order to make sure that overlaps are evaluated
// consistently, we explicitly remove the last value
// from the block interval here.
2022-09-27 14:01:23 +00:00
rh := NewRangeHeadWithIsolationDisabled ( db . head , mint , maxt - 1 )
// Compaction runs with isolation disabled, because head.compactable()
// ensures that maxt is more than chunkRange/2 back from now, and
// head.appendableMinValidTime() ensures that no new appends can start within the compaction range.
// We do need to wait for any overlapping appenders that started previously to finish.
db . head . WaitForAppendersOverlapping ( rh . MaxTime ( ) )
if err := db . compactHead ( rh ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "compact head: %w" , err )
2017-03-04 15:50:48 +00:00
}
2020-10-19 15:27:08 +00:00
// Consider only successful compactions for WAL truncation.
lastBlockMaxt = maxt
}
// Clear some disk space before compacting blocks, especially important
// when Head compaction happened over a long time range.
if err := db . head . truncateWAL ( lastBlockMaxt ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "WAL truncation in Compact: %w" , err )
2020-02-14 09:50:24 +00:00
}
2017-08-09 09:10:29 +00:00
2020-12-07 21:29:43 +00:00
compactionDuration := time . Since ( start )
2020-12-25 13:45:23 +00:00
if compactionDuration . Milliseconds ( ) > db . head . chunkRange . Load ( ) {
2024-09-10 01:41:53 +00:00
db . logger . Warn (
"Head compaction took longer than the block time range, compactions are falling behind and won't be able to catch up" ,
2020-12-07 21:29:43 +00:00
"duration" , compactionDuration . String ( ) ,
"block_range" , db . head . chunkRange . Load ( ) ,
)
}
2022-09-20 17:05:50 +00:00
if lastBlockMaxt != math . MinInt64 {
// The head was compacted, so we compact OOO head as well.
2023-09-13 15:45:06 +00:00
if err := db . compactOOOHead ( ctx ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "compact ooo head: %w" , err )
2022-09-20 17:05:50 +00:00
}
}
2020-02-14 09:50:24 +00:00
return db . compactBlocks ( )
}
2017-09-08 13:09:24 +00:00
2020-10-19 15:27:08 +00:00
// CompactHead compacts the given RangeHead.
func ( db * DB ) CompactHead ( head * RangeHead ) error {
2020-02-14 09:50:24 +00:00
db . cmtx . Lock ( )
defer db . cmtx . Unlock ( )
2020-10-19 15:27:08 +00:00
if err := db . compactHead ( head ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "compact head: %w" , err )
2020-10-19 15:27:08 +00:00
}
if err := db . head . truncateWAL ( head . BlockMaxTime ( ) ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "WAL truncation: %w" , err )
2020-10-19 15:27:08 +00:00
}
return nil
2020-02-14 09:50:24 +00:00
}
2022-09-20 17:05:50 +00:00
// CompactOOOHead compacts the OOO Head.
2023-09-13 15:45:06 +00:00
func ( db * DB ) CompactOOOHead ( ctx context . Context ) error {
2022-09-20 17:05:50 +00:00
db . cmtx . Lock ( )
defer db . cmtx . Unlock ( )
2023-09-13 15:45:06 +00:00
return db . compactOOOHead ( ctx )
2022-09-20 17:05:50 +00:00
}
2024-08-03 21:55:42 +00:00
// Callback for testing.
var compactOOOHeadTestingCallback func ( )
2023-09-13 15:45:06 +00:00
func ( db * DB ) compactOOOHead ( ctx context . Context ) error {
2022-09-20 17:05:50 +00:00
if ! db . oooWasEnabled . Load ( ) {
return nil
}
2023-09-13 15:45:06 +00:00
oooHead , err := NewOOOCompactionHead ( ctx , db . head )
2022-09-20 17:05:50 +00:00
if err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "get ooo compaction head: %w" , err )
2022-09-20 17:05:50 +00:00
}
2024-08-03 21:55:42 +00:00
if compactOOOHeadTestingCallback != nil {
compactOOOHeadTestingCallback ( )
compactOOOHeadTestingCallback = nil
}
2022-09-20 17:05:50 +00:00
ulids , err := db . compactOOO ( db . dir , oooHead )
if err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "compact ooo head: %w" , err )
2022-09-20 17:05:50 +00:00
}
if err := db . reloadBlocks ( ) ; err != nil {
errs := tsdb_errors . NewMulti ( err )
for _ , uid := range ulids {
if errRemoveAll := os . RemoveAll ( filepath . Join ( db . dir , uid . String ( ) ) ) ; errRemoveAll != nil {
errs . Add ( errRemoveAll )
}
}
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "reloadBlocks blocks after failed compact ooo head: %w" , errs . Err ( ) )
2022-09-20 17:05:50 +00:00
}
lastWBLFile , minOOOMmapRef := oooHead . LastWBLFile ( ) , oooHead . LastMmapRef ( )
if lastWBLFile != 0 || minOOOMmapRef != 0 {
2023-11-24 11:38:38 +00:00
if minOOOMmapRef != 0 {
// Ensure that no more queriers are created that will reference chunks we're about to garbage collect.
// truncateOOO waits for any existing queriers that reference chunks we're about to garbage collect to
// complete before running garbage collection, so we don't need to do that here.
//
// We take mtx to ensure that Querier() and ChunkQuerier() don't miss blocks: without this, they could
// capture the list of blocks before the call to reloadBlocks() above runs, but then capture
// lastGarbageCollectedMmapRef after we update it here, and therefore not query either the blocks we've just
// written or the head chunks those blocks were created from.
db . mtx . Lock ( )
db . lastGarbageCollectedMmapRef = minOOOMmapRef
db . mtx . Unlock ( )
}
2022-09-20 17:05:50 +00:00
if err := db . head . truncateOOO ( lastWBLFile , minOOOMmapRef ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "truncate ooo wbl: %w" , err )
2022-09-20 17:05:50 +00:00
}
}
return nil
}
// compactOOO creates a new block per possible block range in the compactor's directory from the OOO Head given.
// Each ULID in the result corresponds to a block in a unique time range.
func ( db * DB ) compactOOO ( dest string , oooHead * OOOCompactionHead ) ( _ [ ] ulid . ULID , err error ) {
start := time . Now ( )
blockSize := oooHead . ChunkRange ( )
oooHeadMint , oooHeadMaxt := oooHead . MinTime ( ) , oooHead . MaxTime ( )
ulids := make ( [ ] ulid . ULID , 0 )
defer func ( ) {
if err != nil {
// Best effort removal of created block on any error.
for _ , uid := range ulids {
_ = os . RemoveAll ( filepath . Join ( db . dir , uid . String ( ) ) )
}
}
} ( )
2024-04-08 15:34:14 +00:00
meta := & BlockMeta { }
meta . Compaction . SetOutOfOrder ( )
2023-04-09 07:08:40 +00:00
for t := blockSize * ( oooHeadMint / blockSize ) ; t <= oooHeadMaxt ; t += blockSize {
2022-09-20 17:05:50 +00:00
mint , maxt := t , t + blockSize
// Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes.
2024-06-12 21:31:25 +00:00
uids , err := db . compactor . Write ( dest , oooHead . CloneForTimeRange ( mint , maxt - 1 ) , mint , maxt , meta )
2022-09-20 17:05:50 +00:00
if err != nil {
return nil , err
}
2024-06-12 21:31:25 +00:00
ulids = append ( ulids , uids ... )
2022-09-20 17:05:50 +00:00
}
if len ( ulids ) == 0 {
2024-09-10 01:41:53 +00:00
db . logger . Info (
"compact ooo head resulted in no blocks" ,
2022-09-20 17:05:50 +00:00
"duration" , time . Since ( start ) ,
)
return nil , nil
}
2024-09-10 01:41:53 +00:00
db . logger . Info (
"out-of-order compaction completed" ,
2022-09-20 17:05:50 +00:00
"duration" , time . Since ( start ) ,
"ulids" , fmt . Sprintf ( "%v" , ulids ) ,
)
return ulids , nil
}
2020-10-19 15:27:08 +00:00
// compactHead compacts the given RangeHead.
2020-02-14 09:50:24 +00:00
// The compaction mutex should be held before calling this method.
2020-10-19 15:27:08 +00:00
func ( db * DB ) compactHead ( head * RangeHead ) error {
2024-06-12 21:31:25 +00:00
uids , err := db . compactor . Write ( db . dir , head , head . MinTime ( ) , head . BlockMaxTime ( ) , nil )
2020-02-14 09:50:24 +00:00
if err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "persist head block: %w" , err )
2020-02-14 09:50:24 +00:00
}
2020-10-19 15:27:08 +00:00
if err := db . reloadBlocks ( ) ; err != nil {
2024-06-12 21:31:25 +00:00
multiErr := tsdb_errors . NewMulti ( fmt . Errorf ( "reloadBlocks blocks: %w" , err ) )
for _ , uid := range uids {
if errRemoveAll := os . RemoveAll ( filepath . Join ( db . dir , uid . String ( ) ) ) ; errRemoveAll != nil {
multiErr . Add ( fmt . Errorf ( "delete persisted head block after failed db reloadBlocks:%s: %w" , uid , errRemoveAll ) )
}
2017-08-09 09:10:29 +00:00
}
2024-06-12 21:31:25 +00:00
return multiErr . Err ( )
2020-02-14 09:50:24 +00:00
}
2020-10-19 15:27:08 +00:00
if err = db . head . truncateMemory ( head . BlockMaxTime ( ) ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "head memory truncate: %w" , err )
2017-03-04 15:50:48 +00:00
}
2024-04-02 14:08:58 +00:00
db . head . RebuildSymbolTable ( db . logger )
2020-02-14 09:50:24 +00:00
return nil
}
2017-01-06 11:37:28 +00:00
2020-02-14 09:50:24 +00:00
// compactBlocks compacts all the eligible on-disk blocks.
// The compaction mutex should be held before calling this method.
func ( db * DB ) compactBlocks ( ) ( err error ) {
2017-03-02 08:13:29 +00:00
// Check for compactions of multiple blocks.
for {
Stop compactions if there's a block to write (#13754)
* Stop compactions if there's a block to write
db.Compact() checks if there's a block to write with HEAD chunks before calling db.compactBlocks().
This is to ensure that if we need to write a block then it happens ASAP, otherwise memory usage might keep growing.
But what can also happen is that we don't need to write any block, we start db.compactBlocks(),
compaction takes hours, and in the meantime HEAD needs to write out chunks to a block.
This can be especially problematic if, for example, you run Thanos sidecar that's uploading block,
which requires that compactions are disabled. Then you disable Thanos sidecar and re-enable compactions.
When db.compactBlocks() is finally called it might have a huge number of blocks to compact, which might
take a very long time, during which HEAD cannot write out chunks to a new block.
In such case memory usage will keep growing until either:
- compactions are finally finished and HEAD can write a block
- we run out of memory and Prometheus gets OOM-killed
This change adds a check for pending HEAD block writes inside db.compactBlocks(), so that
we bail out early if there are still compactions to run, but we also need to write a new
block.
Also add a test for compactBlocks.
---------
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
Signed-off-by: Lukasz Mierzwa <lukasz@cloudflare.com>
2024-04-07 17:28:28 +00:00
// If we have a lot of blocks to compact the whole process might take
// long enough that we end up with a HEAD block that needs to be written.
// Check if that's the case and stop compactions early.
2024-04-08 12:59:30 +00:00
if db . head . compactable ( ) && ! db . waitingForCompactionDelay ( ) {
2024-09-10 01:41:53 +00:00
db . logger . Warn ( "aborting block compactions to persit the head block" )
Stop compactions if there's a block to write (#13754)
* Stop compactions if there's a block to write
db.Compact() checks if there's a block to write with HEAD chunks before calling db.compactBlocks().
This is to ensure that if we need to write a block then it happens ASAP, otherwise memory usage might keep growing.
But what can also happen is that we don't need to write any block, we start db.compactBlocks(),
compaction takes hours, and in the meantime HEAD needs to write out chunks to a block.
This can be especially problematic if, for example, you run Thanos sidecar that's uploading block,
which requires that compactions are disabled. Then you disable Thanos sidecar and re-enable compactions.
When db.compactBlocks() is finally called it might have a huge number of blocks to compact, which might
take a very long time, during which HEAD cannot write out chunks to a new block.
In such case memory usage will keep growing until either:
- compactions are finally finished and HEAD can write a block
- we run out of memory and Prometheus gets OOM-killed
This change adds a check for pending HEAD block writes inside db.compactBlocks(), so that
we bail out early if there are still compactions to run, but we also need to write a new
block.
Also add a test for compactBlocks.
---------
Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
Signed-off-by: Lukasz Mierzwa <lukasz@cloudflare.com>
2024-04-07 17:28:28 +00:00
return nil
}
2017-08-09 09:10:29 +00:00
plan , err := db . compactor . Plan ( db . dir )
2017-03-02 08:13:29 +00:00
if err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "plan compaction: %w" , err )
2017-03-02 08:13:29 +00:00
}
2017-08-09 09:10:29 +00:00
if len ( plan ) == 0 {
2017-03-21 11:21:02 +00:00
break
}
2017-01-06 11:37:28 +00:00
2017-03-02 08:13:29 +00:00
select {
case <- db . stopc :
2018-09-21 06:24:01 +00:00
return nil
2017-03-02 08:13:29 +00:00
default :
}
2017-03-20 09:41:43 +00:00
2024-06-12 21:31:25 +00:00
uids , err := db . compactor . Compact ( db . dir , plan , db . blocks )
2019-01-29 08:26:01 +00:00
if err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "compact %s: %w" , plan , err )
2017-08-09 09:10:29 +00:00
}
2017-08-28 22:39:17 +00:00
2020-10-19 15:27:08 +00:00
if err := db . reloadBlocks ( ) ; err != nil {
2024-06-12 21:31:25 +00:00
errs := tsdb_errors . NewMulti ( fmt . Errorf ( "reloadBlocks blocks: %w" , err ) )
for _ , uid := range uids {
if errRemoveAll := os . RemoveAll ( filepath . Join ( db . dir , uid . String ( ) ) ) ; errRemoveAll != nil {
errs . Add ( fmt . Errorf ( "delete persisted block after failed db reloadBlocks:%s: %w" , uid , errRemoveAll ) )
}
2019-01-29 08:26:01 +00:00
}
2024-06-12 21:31:25 +00:00
return errs . Err ( )
2017-08-28 22:39:17 +00:00
}
2017-02-23 09:50:22 +00:00
}
2018-09-21 06:24:01 +00:00
return nil
2017-02-10 01:54:26 +00:00
}
2019-07-23 08:04:48 +00:00
// getBlock iterates a given block range to find a block by a given id.
// If found it returns the block itself and a boolean to indicate that it was found.
func getBlock ( allBlocks [ ] * Block , id ulid . ULID ) ( * Block , bool ) {
for _ , b := range allBlocks {
2017-05-18 14:09:30 +00:00
if b . Meta ( ) . ULID == id {
2017-03-20 07:41:56 +00:00
return b , true
}
}
return nil , false
}
2020-10-19 15:27:08 +00:00
// reload reloads blocks and truncates the head and its WAL.
func ( db * DB ) reload ( ) error {
if err := db . reloadBlocks ( ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "reloadBlocks: %w" , err )
2020-10-19 15:27:08 +00:00
}
2022-09-20 17:05:50 +00:00
maxt , ok := db . inOrderBlocksMaxTime ( )
if ! ok {
2020-10-19 15:27:08 +00:00
return nil
}
2022-09-20 17:05:50 +00:00
if err := db . head . Truncate ( maxt ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "head truncate: %w" , err )
2020-10-19 15:27:08 +00:00
}
return nil
}
// reloadBlocks reloads blocks without touching head.
2018-06-27 13:47:11 +00:00
// Blocks that are obsolete due to replacement or retention will be deleted.
2020-10-19 15:27:08 +00:00
func ( db * DB ) reloadBlocks ( ) ( err error ) {
2017-08-30 15:38:25 +00:00
defer func ( ) {
2017-05-26 13:13:03 +00:00
if err != nil {
db . metrics . reloadsFailed . Inc ( )
}
db . metrics . reloads . Inc ( )
2017-08-30 15:38:25 +00:00
} ( )
2017-05-26 13:13:03 +00:00
2024-05-08 15:57:09 +00:00
// Now that we reload TSDB every minute, there is a high chance for a race condition with a reload
2021-02-17 05:32:43 +00:00
// triggered by CleanTombstones(). We need to lock the reload to avoid the situation where
// a normal reload and CleanTombstones try to delete the same block.
db . mtx . Lock ( )
defer db . mtx . Unlock ( )
2024-11-11 06:59:24 +00:00
loadable , corrupted , err := openBlocks ( db . logger , db . dir , db . blocks , db . chunkPool , db . opts . PostingsDecoderFactory )
2017-03-02 08:13:29 +00:00
if err != nil {
2019-01-16 10:03:52 +00:00
return err
}
2018-06-27 16:05:21 +00:00
2020-07-22 15:19:33 +00:00
deletableULIDs := db . blocksToDelete ( loadable )
deletable := make ( map [ ulid . ULID ] * Block , len ( deletableULIDs ) )
2019-01-16 10:03:52 +00:00
2020-08-11 14:53:23 +00:00
// Mark all parents of loaded blocks as deletable (no matter if they exists). This makes it resilient against the process
// crashing towards the end of a compaction but before deletions. By doing that, we can pick up the deletion where it left off during a crash.
2019-01-16 10:03:52 +00:00
for _ , block := range loadable {
2020-07-22 15:19:33 +00:00
if _ , ok := deletableULIDs [ block . meta . ULID ] ; ok {
deletable [ block . meta . ULID ] = block
}
2019-01-16 10:03:52 +00:00
for _ , b := range block . Meta ( ) . Compaction . Parents {
2020-08-11 14:53:23 +00:00
if _ , ok := corrupted [ b . ULID ] ; ok {
delete ( corrupted , b . ULID )
2024-09-10 01:41:53 +00:00
db . logger . Warn ( "Found corrupted block, but replaced by compacted one so it's safe to delete. This should not happen with atomic deletes." , "block" , b . ULID )
2020-08-11 14:53:23 +00:00
}
2019-01-16 10:03:52 +00:00
deletable [ b . ULID ] = nil
2017-02-10 01:54:26 +00:00
}
2019-01-16 10:03:52 +00:00
}
2020-08-11 14:53:23 +00:00
2019-01-16 10:03:52 +00:00
if len ( corrupted ) > 0 {
2020-08-11 14:53:23 +00:00
// Corrupted but no child loaded for it.
2019-01-30 09:40:40 +00:00
// Close all new blocks to release the lock for windows.
for _ , block := range loadable {
2019-07-23 08:04:48 +00:00
if _ , open := getBlock ( db . blocks , block . Meta ( ) . ULID ) ; ! open {
2019-01-30 09:40:40 +00:00
block . Close ( )
}
}
2020-10-28 15:24:58 +00:00
errs := tsdb_errors . NewMulti ( )
2020-06-17 14:40:00 +00:00
for ulid , err := range corrupted {
2023-11-16 18:54:41 +00:00
if err != nil {
errs . Add ( fmt . Errorf ( "corrupted block %s: %w" , ulid . String ( ) , err ) )
}
2020-06-17 14:40:00 +00:00
}
2020-10-28 15:24:58 +00:00
return errs . Err ( )
2019-01-16 10:03:52 +00:00
}
var (
2020-08-11 14:53:23 +00:00
toLoad [ ] * Block
2019-01-16 10:03:52 +00:00
blocksSize int64
)
2020-08-11 14:53:23 +00:00
// All deletable blocks should be unloaded.
// NOTE: We need to loop through loadable one more time as there might be loadable ready to be removed (replaced by compacted block).
2019-01-16 10:03:52 +00:00
for _ , block := range loadable {
if _ , ok := deletable [ block . Meta ( ) . ULID ] ; ok {
deletable [ block . Meta ( ) . ULID ] = block
2017-11-03 19:34:21 +00:00
continue
}
2019-01-16 10:03:52 +00:00
2020-08-11 14:53:23 +00:00
toLoad = append ( toLoad , block )
blocksSize += block . Size ( )
2019-01-16 10:03:52 +00:00
}
db . metrics . blocksBytes . Set ( float64 ( blocksSize ) )
2023-09-21 20:53:51 +00:00
slices . SortFunc ( toLoad , func ( a , b * Block ) int {
2023-10-16 14:23:26 +00:00
switch {
case a . Meta ( ) . MinTime < b . Meta ( ) . MinTime :
return - 1
case a . Meta ( ) . MinTime > b . Meta ( ) . MinTime :
return 1
default :
return 0
}
2019-01-16 10:03:52 +00:00
} )
// Swap new blocks first for subsequently created readers to be seen.
oldBlocks := db . blocks
2020-08-11 14:53:23 +00:00
db . blocks = toLoad
2019-01-16 10:03:52 +00:00
2024-06-12 21:31:25 +00:00
// Only check overlapping blocks when overlapping compaction is enabled.
if db . opts . EnableOverlappingCompaction {
blockMetas := make ( [ ] BlockMeta , 0 , len ( toLoad ) )
for _ , b := range toLoad {
blockMetas = append ( blockMetas , b . Meta ( ) )
}
if overlaps := OverlappingBlocks ( blockMetas ) ; len ( overlaps ) > 0 {
2024-09-10 01:41:53 +00:00
db . logger . Warn ( "Overlapping blocks found during reloadBlocks" , "detail" , overlaps . String ( ) )
2024-06-12 21:31:25 +00:00
}
2019-02-14 13:29:41 +00:00
}
2020-08-11 14:53:23 +00:00
// Append blocks to old, deletable blocks, so we can close them.
2019-01-16 10:03:52 +00:00
for _ , b := range oldBlocks {
if _ , ok := deletable [ b . Meta ( ) . ULID ] ; ok {
deletable [ b . Meta ( ) . ULID ] = b
2018-06-27 16:05:21 +00:00
}
}
2019-01-16 10:03:52 +00:00
if err := db . deleteBlocks ( deletable ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "delete %v blocks: %w" , len ( deletable ) , err )
2019-01-16 10:03:52 +00:00
}
2020-10-19 15:27:08 +00:00
return nil
2019-01-16 10:03:52 +00:00
}
2024-11-11 06:59:24 +00:00
func openBlocks ( l * slog . Logger , dir string , loaded [ ] * Block , chunkPool chunkenc . Pool , postingsDecoderFactory PostingsDecoderFactory ) ( blocks [ ] * Block , corrupted map [ ulid . ULID ] error , err error ) {
2019-07-23 08:04:48 +00:00
bDirs , err := blockDirs ( dir )
2019-01-16 10:03:52 +00:00
if err != nil {
2023-11-16 18:54:41 +00:00
return nil , nil , fmt . Errorf ( "find blocks: %w" , err )
2019-01-16 10:03:52 +00:00
}
corrupted = make ( map [ ulid . ULID ] error )
2019-07-23 08:04:48 +00:00
for _ , bDir := range bDirs {
meta , _ , err := readMetaFile ( bDir )
2018-06-27 13:47:11 +00:00
if err != nil {
2024-09-10 01:41:53 +00:00
l . Error ( "Failed to read meta.json for a block during reloadBlocks. Skipping" , "dir" , bDir , "err" , err )
2018-06-27 13:47:11 +00:00
continue
}
2019-01-16 10:03:52 +00:00
2018-06-27 13:47:11 +00:00
// See if we already have the block in memory or open it otherwise.
2019-07-23 08:04:48 +00:00
block , open := getBlock ( loaded , meta . ULID )
if ! open {
2024-11-11 06:59:24 +00:00
block , err = OpenBlock ( l , bDir , chunkPool , postingsDecoderFactory )
2017-05-18 14:09:30 +00:00
if err != nil {
2019-01-16 10:03:52 +00:00
corrupted [ meta . ULID ] = err
continue
2017-03-02 08:13:29 +00:00
}
}
2019-01-16 10:03:52 +00:00
blocks = append ( blocks , block )
2016-12-09 09:00:14 +00:00
}
2019-01-16 10:03:52 +00:00
return blocks , corrupted , nil
}
2020-07-22 15:19:33 +00:00
// DefaultBlocksToDelete returns a filter which decides time based and size based
// retention from the options of the db.
func DefaultBlocksToDelete ( db * DB ) BlocksToDeleteFunc {
return func ( blocks [ ] * Block ) map [ ulid . ULID ] struct { } {
return deletableBlocks ( db , blocks )
}
}
2020-08-11 05:56:08 +00:00
// deletableBlocks returns all currently loaded blocks past retention policy or already compacted into a new block.
2020-07-22 15:19:33 +00:00
func deletableBlocks ( db * DB , blocks [ ] * Block ) map [ ulid . ULID ] struct { } {
deletable := make ( map [ ulid . ULID ] struct { } )
2019-01-16 10:03:52 +00:00
// Sort the blocks by time - newest to oldest (largest to smallest timestamp).
// This ensures that the retentions will remove the oldest blocks.
2023-09-21 20:53:51 +00:00
slices . SortFunc ( blocks , func ( a , b * Block ) int {
2023-10-16 14:23:26 +00:00
switch {
case b . Meta ( ) . MaxTime < a . Meta ( ) . MaxTime :
return - 1
case b . Meta ( ) . MaxTime > a . Meta ( ) . MaxTime :
return 1
default :
return 0
}
2018-05-28 20:00:36 +00:00
} )
2019-01-16 10:03:52 +00:00
2019-01-18 08:35:16 +00:00
for _ , block := range blocks {
if block . Meta ( ) . Compaction . Deletable {
2020-07-22 15:19:33 +00:00
deletable [ block . Meta ( ) . ULID ] = struct { } { }
2019-01-18 08:35:16 +00:00
}
2017-05-18 14:09:30 +00:00
}
2017-05-26 11:01:45 +00:00
2020-07-22 15:19:33 +00:00
for ulid := range BeyondTimeRetention ( db , blocks ) {
deletable [ ulid ] = struct { } { }
2017-05-18 14:09:30 +00:00
}
2020-07-22 15:19:33 +00:00
for ulid := range BeyondSizeRetention ( db , blocks ) {
deletable [ ulid ] = struct { } { }
2019-01-16 10:03:52 +00:00
}
2017-05-18 14:09:30 +00:00
2019-01-16 10:03:52 +00:00
return deletable
}
2020-07-22 15:19:33 +00:00
// BeyondTimeRetention returns those blocks which are beyond the time retention
// set in the db options.
func BeyondTimeRetention ( db * DB , blocks [ ] * Block ) ( deletable map [ ulid . ULID ] struct { } ) {
2019-01-16 10:03:52 +00:00
// Time retention is disabled or no blocks to work with.
2020-10-19 11:21:54 +00:00
if len ( blocks ) == 0 || db . opts . RetentionDuration == 0 {
2019-01-16 10:03:52 +00:00
return
}
2020-07-22 15:19:33 +00:00
deletable = make ( map [ ulid . ULID ] struct { } )
2019-01-16 10:03:52 +00:00
for i , block := range blocks {
2024-03-19 08:10:21 +00:00
// The difference between the first block and this block is greater than or equal to
2020-01-02 14:54:09 +00:00
// the retention period so any blocks after that are added as deletable.
2024-03-15 18:35:16 +00:00
if i > 0 && blocks [ 0 ] . Meta ( ) . MaxTime - block . Meta ( ) . MaxTime >= db . opts . RetentionDuration {
2019-01-16 10:03:52 +00:00
for _ , b := range blocks [ i : ] {
2020-07-22 15:19:33 +00:00
deletable [ b . meta . ULID ] = struct { } { }
2019-01-16 10:03:52 +00:00
}
db . metrics . timeRetentionCount . Inc ( )
break
2017-11-03 19:34:21 +00:00
}
2019-01-16 10:03:52 +00:00
}
2020-01-02 14:54:09 +00:00
return deletable
2019-01-16 10:03:52 +00:00
}
2020-07-22 15:19:33 +00:00
// BeyondSizeRetention returns those blocks which are beyond the size retention
// set in the db options.
func BeyondSizeRetention ( db * DB , blocks [ ] * Block ) ( deletable map [ ulid . ULID ] struct { } ) {
2019-01-16 10:03:52 +00:00
// Size retention is disabled or no blocks to work with.
2020-10-19 11:21:54 +00:00
if len ( blocks ) == 0 || db . opts . MaxBytes <= 0 {
2019-01-16 10:03:52 +00:00
return
}
2020-07-22 15:19:33 +00:00
deletable = make ( map [ ulid . ULID ] struct { } )
2019-11-12 02:40:16 +00:00
2020-05-06 15:30:00 +00:00
// Initializing size counter with WAL size and Head chunks
// written to disk, as that is part of the retention strategy.
2020-10-12 21:15:40 +00:00
blocksSize := db . Head ( ) . Size ( )
2019-01-16 10:03:52 +00:00
for i , block := range blocks {
blocksSize += block . Size ( )
2023-04-09 07:08:40 +00:00
if blocksSize > db . opts . MaxBytes {
2019-01-16 10:03:52 +00:00
// Add this and all following blocks for deletion.
for _ , b := range blocks [ i : ] {
2020-07-22 15:19:33 +00:00
deletable [ b . meta . ULID ] = struct { } { }
2019-01-16 10:03:52 +00:00
}
db . metrics . sizeRetentionCount . Inc ( )
break
2017-11-03 19:34:21 +00:00
}
2018-06-27 13:47:11 +00:00
}
2020-01-02 14:54:09 +00:00
return deletable
2019-01-16 10:03:52 +00:00
}
2020-08-11 14:53:23 +00:00
// deleteBlocks closes the block if loaded and deletes blocks from the disk if exists.
2019-01-16 10:03:52 +00:00
// When the map contains a non nil block object it means it is loaded in memory
// so needs to be closed first as it might need to wait for pending readers to complete.
func ( db * DB ) deleteBlocks ( blocks map [ ulid . ULID ] * Block ) error {
for ulid , block := range blocks {
if block != nil {
if err := block . Close ( ) ; err != nil {
2024-09-10 01:41:53 +00:00
db . logger . Warn ( "Closing block failed" , "err" , err , "block" , ulid )
2019-01-16 10:03:52 +00:00
}
}
2020-08-11 05:56:08 +00:00
2020-08-11 14:53:23 +00:00
toDelete := filepath . Join ( db . dir , ulid . String ( ) )
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 14:14:31 +00:00
switch _ , err := os . Stat ( toDelete ) ; {
case os . IsNotExist ( err ) :
2020-08-11 14:53:23 +00:00
// Noop.
continue
style: Replace `else if` cascades with `switch`
Wiser coders than myself have come to the conclusion that a `switch`
statement is almost always superior to a statement that includes any
`else if`.
The exceptions that I have found in our codebase are just these two:
* The `if else` is followed by an additional statement before the next
condition (separated by a `;`).
* The whole thing is within a `for` loop and `break` statements are
used. In this case, using `switch` would require tagging the `for`
loop, which probably tips the balance.
Why are `switch` statements more readable?
For one, fewer curly braces. But more importantly, the conditions all
have the same alignment, so the whole thing follows the natural flow
of going down a list of conditions. With `else if`, in contrast, all
conditions but the first are "hidden" behind `} else if `, harder to
spot and (for no good reason) presented differently from the first
condition.
I'm sure the aforemention wise coders can list even more reasons.
In any case, I like it so much that I have found myself recommending
it in code reviews. I would like to make it a habit in our code base,
without making it a hard requirement that we would test on the CI. But
for that, there has to be a role model, so this commit eliminates all
`if else` occurrences, unless it is autogenerated code or fits one of
the exceptions above.
Signed-off-by: beorn7 <beorn@grafana.com>
2023-04-12 14:14:31 +00:00
case err != nil :
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "stat dir %v: %w" , toDelete , err )
2020-08-11 14:53:23 +00:00
}
// Replace atomically to avoid partial block when process would crash during deletion.
2020-08-11 05:56:08 +00:00
tmpToDelete := filepath . Join ( db . dir , fmt . Sprintf ( "%s%s" , ulid , tmpForDeletionBlockDirSuffix ) )
2020-08-11 14:53:23 +00:00
if err := fileutil . Replace ( toDelete , tmpToDelete ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "replace of obsolete block for deletion %s: %w" , ulid , err )
2020-08-11 05:56:08 +00:00
}
if err := os . RemoveAll ( tmpToDelete ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "delete obsolete block %s: %w" , ulid , err )
2017-10-23 18:30:03 +00:00
}
2024-09-10 01:41:53 +00:00
db . logger . Info ( "Deleting obsolete block" , "block" , ulid )
2017-10-23 18:30:03 +00:00
}
2020-08-11 05:56:08 +00:00
2019-01-16 10:03:52 +00:00
return nil
2017-05-18 14:09:30 +00:00
}
2019-02-26 19:50:37 +00:00
2018-04-05 13:15:24 +00:00
// TimeRange specifies minTime and maxTime range.
2018-03-29 11:50:46 +00:00
type TimeRange struct {
2018-04-05 12:51:33 +00:00
Min , Max int64
2018-03-29 11:50:46 +00:00
}
2018-04-05 12:51:33 +00:00
2018-04-05 13:15:24 +00:00
// Overlaps contains overlapping blocks aggregated by overlapping range.
type Overlaps map [ TimeRange ] [ ] BlockMeta
// String returns human readable string form of overlapped blocks.
func ( o Overlaps ) String ( ) string {
var res [ ] string
for r , overlaps := range o {
var groups [ ] string
for _ , m := range overlaps {
groups = append ( groups , fmt . Sprintf (
2018-04-05 15:53:24 +00:00
"<ulid: %s, mint: %d, maxt: %d, range: %s>" ,
2018-04-05 13:15:24 +00:00
m . ULID . String ( ) ,
m . MinTime ,
m . MaxTime ,
( time . Duration ( ( m . MaxTime - m . MinTime ) / 1000 ) * time . Second ) . String ( ) ,
) )
}
2018-04-05 15:01:16 +00:00
res = append ( res , fmt . Sprintf (
2018-04-05 15:53:24 +00:00
"[mint: %d, maxt: %d, range: %s, blocks: %d]: %s" ,
2018-04-05 15:01:16 +00:00
r . Min , r . Max ,
( time . Duration ( ( r . Max - r . Min ) / 1000 ) * time . Second ) . String ( ) ,
len ( overlaps ) ,
2018-04-05 15:53:24 +00:00
strings . Join ( groups , ", " ) ) ,
2018-04-05 15:01:16 +00:00
)
2018-04-05 13:15:24 +00:00
}
2018-04-05 15:01:16 +00:00
return strings . Join ( res , "\n" )
2018-04-05 13:15:24 +00:00
}
// OverlappingBlocks returns all overlapping blocks from given meta files.
func OverlappingBlocks ( bm [ ] BlockMeta ) Overlaps {
2018-03-28 17:33:41 +00:00
if len ( bm ) <= 1 {
2018-03-28 14:50:52 +00:00
return nil
}
2018-03-28 22:50:42 +00:00
var (
2018-04-05 12:51:33 +00:00
overlaps [ ] [ ] BlockMeta
2018-03-28 22:50:42 +00:00
// pending contains not ended blocks in regards to "current" timestamp.
pending = [ ] BlockMeta { bm [ 0 ] }
2018-03-29 11:50:46 +00:00
// continuousPending helps to aggregate same overlaps to single group.
continuousPending = true
2018-03-28 22:50:42 +00:00
)
2018-04-05 13:15:24 +00:00
// We have here blocks sorted by minTime. We iterate over each block and treat its minTime as our "current" timestamp.
// We check if any of the pending block finished (blocks that we have seen before, but their maxTime was still ahead current
// timestamp). If not, it means they overlap with our current block. In the same time current block is assumed pending.
2018-03-28 22:18:24 +00:00
for _ , b := range bm [ 1 : ] {
2018-03-28 22:50:42 +00:00
var newPending [ ] BlockMeta
2018-03-28 17:33:41 +00:00
2018-03-28 22:18:24 +00:00
for _ , p := range pending {
2018-03-28 22:50:42 +00:00
// "b.MinTime" is our current time.
2018-03-28 22:18:24 +00:00
if b . MinTime >= p . MaxTime {
2018-03-29 11:50:46 +00:00
continuousPending = false
2018-03-28 22:18:24 +00:00
continue
2018-03-28 17:33:41 +00:00
}
2018-03-28 22:18:24 +00:00
// "p" overlaps with "b" and "p" is still pending.
newPending = append ( newPending , p )
2017-05-18 14:09:30 +00:00
}
2018-03-28 22:50:42 +00:00
2018-03-28 22:18:24 +00:00
// Our block "b" is now pending.
pending = append ( newPending , b )
if len ( newPending ) == 0 {
2018-03-28 22:50:42 +00:00
// No overlaps.
2018-03-28 22:18:24 +00:00
continue
2018-03-28 17:33:41 +00:00
}
2018-03-29 11:50:46 +00:00
if continuousPending && len ( overlaps ) > 0 {
2018-03-28 22:18:24 +00:00
overlaps [ len ( overlaps ) - 1 ] = append ( overlaps [ len ( overlaps ) - 1 ] , b )
2018-03-28 17:33:41 +00:00
continue
2017-05-18 14:09:30 +00:00
}
2018-03-28 22:18:24 +00:00
overlaps = append ( overlaps , append ( newPending , b ) )
2018-03-29 11:50:46 +00:00
// Start new pendings.
continuousPending = true
2017-05-18 14:09:30 +00:00
}
2018-04-05 12:51:33 +00:00
// Fetch the critical overlapped time range foreach overlap groups.
2018-04-05 13:15:24 +00:00
overlapGroups := Overlaps { }
2018-04-05 12:51:33 +00:00
for _ , overlap := range overlaps {
minRange := TimeRange { Min : 0 , Max : math . MaxInt64 }
for _ , b := range overlap {
if minRange . Max > b . MaxTime {
minRange . Max = b . MaxTime
}
if minRange . Min < b . MinTime {
minRange . Min = b . MinTime
}
}
overlapGroups [ minRange ] = overlap
}
return overlapGroups
2017-01-02 21:24:35 +00:00
}
2017-10-09 13:21:46 +00:00
func ( db * DB ) String ( ) string {
return "HEAD"
}
// Blocks returns the databases persisted blocks.
func ( db * DB ) Blocks ( ) [ ] * Block {
2017-08-29 13:39:27 +00:00
db . mtx . RLock ( )
defer db . mtx . RUnlock ( )
return db . blocks
}
2022-09-20 17:05:50 +00:00
// inOrderBlocksMaxTime returns the max time among the blocks that were not totally created
// out of out-of-order data. If the returned boolean is true, it means there is at least
// one such block.
func ( db * DB ) inOrderBlocksMaxTime ( ) ( maxt int64 , ok bool ) {
maxt , ok = int64 ( math . MinInt64 ) , false
// If blocks are overlapping, last block might not have the max time. So check all blocks.
for _ , b := range db . Blocks ( ) {
if ! b . meta . Compaction . FromOutOfOrder ( ) && b . meta . MaxTime > maxt {
ok = true
maxt = b . meta . MaxTime
}
}
return maxt , ok
}
2017-10-09 13:21:46 +00:00
// Head returns the databases's head.
2017-09-25 14:45:24 +00:00
func ( db * DB ) Head ( ) * Head {
return db . head
}
2017-01-06 07:08:02 +00:00
// Close the partition.
2017-01-06 10:40:09 +00:00
func ( db * DB ) Close ( ) error {
2017-01-06 11:37:28 +00:00
close ( db . stopc )
2020-10-28 10:09:03 +00:00
if db . compactCancel != nil {
db . compactCancel ( )
}
2017-01-06 11:37:28 +00:00
<- db . donec
2017-01-06 10:40:09 +00:00
db . mtx . Lock ( )
2017-03-17 11:12:50 +00:00
defer db . mtx . Unlock ( )
2017-03-04 15:50:48 +00:00
2017-03-06 11:13:15 +00:00
var g errgroup . Group
2017-01-02 09:34:55 +00:00
2017-03-20 07:41:56 +00:00
// blocks also contains all head blocks.
for _ , pb := range db . blocks {
2017-03-06 11:13:15 +00:00
g . Go ( pb . Close )
2016-12-15 07:31:26 +00:00
}
2021-11-11 16:45:25 +00:00
errs := tsdb_errors . NewMulti ( g . Wait ( ) , db . locker . Release ( ) )
2020-10-21 15:08:28 +00:00
if db . head != nil {
2020-10-28 15:24:58 +00:00
errs . Add ( db . head . Close ( ) )
2020-10-21 15:08:28 +00:00
}
2020-10-28 15:24:58 +00:00
return errs . Err ( )
2016-12-09 09:00:14 +00:00
}
2018-11-20 10:34:26 +00:00
// DisableCompactions disables auto compactions.
2017-06-06 18:15:23 +00:00
func ( db * DB ) DisableCompactions ( ) {
2018-11-20 10:34:26 +00:00
db . autoCompactMtx . Lock ( )
defer db . autoCompactMtx . Unlock ( )
2017-07-14 08:06:07 +00:00
2018-11-20 10:34:26 +00:00
db . autoCompact = false
2024-09-10 01:41:53 +00:00
db . logger . Info ( "Compactions disabled" )
2017-06-06 14:53:20 +00:00
}
2018-11-20 10:34:26 +00:00
// EnableCompactions enables auto compactions.
2017-06-06 18:15:23 +00:00
func ( db * DB ) EnableCompactions ( ) {
2018-11-20 10:34:26 +00:00
db . autoCompactMtx . Lock ( )
defer db . autoCompactMtx . Unlock ( )
2017-07-14 08:06:07 +00:00
2018-11-20 10:34:26 +00:00
db . autoCompact = true
2024-09-10 01:41:53 +00:00
db . logger . Info ( "Compactions enabled" )
2017-06-05 08:18:31 +00:00
}
2024-04-08 12:59:30 +00:00
func ( db * DB ) generateCompactionDelay ( ) time . Duration {
2024-11-04 07:26:26 +00:00
return time . Duration ( rand . Int63n ( db . head . chunkRange . Load ( ) * int64 ( db . opts . CompactionDelayMaxPercent ) / 100 ) ) * time . Millisecond
2024-04-08 12:59:30 +00:00
}
2023-08-26 09:40:59 +00:00
// ForceHeadMMap is intended for use only in tests and benchmarks.
func ( db * DB ) ForceHeadMMap ( ) {
db . head . mmapHeadChunks ( )
}
2018-02-28 11:04:55 +00:00
// Snapshot writes the current data to the directory. If withHead is set to true it
// will create a new block containing all data that's currently in the memory buffer/WAL.
func ( db * DB ) Snapshot ( dir string , withHead bool ) error {
2017-08-30 16:34:54 +00:00
if dir == db . dir {
2024-11-03 12:15:51 +00:00
return errors . New ( "cannot snapshot into base directory" )
2017-08-30 16:34:54 +00:00
}
2019-03-18 14:14:10 +00:00
if _ , err := ulid . ParseStrict ( dir ) ; err == nil {
2024-11-03 12:15:51 +00:00
return errors . New ( "dir must not be a valid ULID" )
2017-08-30 16:34:54 +00:00
}
2017-06-05 08:18:31 +00:00
2017-08-30 16:34:54 +00:00
db . cmtx . Lock ( )
defer db . cmtx . Unlock ( )
2017-10-23 18:30:03 +00:00
db . mtx . RLock ( )
defer db . mtx . RUnlock ( )
for _ , b := range db . blocks {
2024-09-10 01:41:53 +00:00
db . logger . Info ( "Snapshotting block" , "block" , b )
2017-08-30 16:34:54 +00:00
if err := b . Snapshot ( dir ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "error snapshotting block: %s: %w" , b . Dir ( ) , err )
2017-08-30 16:34:54 +00:00
}
}
2018-02-28 11:04:55 +00:00
if ! withHead {
return nil
}
2019-07-03 10:47:31 +00:00
mint := db . head . MinTime ( )
maxt := db . head . MaxTime ( )
2020-08-13 09:55:35 +00:00
head := NewRangeHead ( db . head , mint , maxt )
2019-07-03 10:47:31 +00:00
// Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime).
// Because of this block intervals are always +1 than the total samples it includes.
if _ , err := db . compactor . Write ( dir , head , mint , maxt + 1 , nil ) ; err != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "snapshot head block: %w" , err )
2019-07-03 10:47:31 +00:00
}
return nil
2017-08-28 22:39:17 +00:00
}
2017-07-14 07:00:22 +00:00
2017-08-28 22:39:17 +00:00
// Querier returns a new querier over the data partition for the given time range.
2023-11-24 11:38:38 +00:00
func ( db * DB ) Querier ( mint , maxt int64 ) ( _ storage . Querier , err error ) {
2017-10-09 13:21:46 +00:00
var blocks [ ] BlockReader
2017-08-28 22:39:17 +00:00
2017-10-23 18:30:03 +00:00
db . mtx . RLock ( )
defer db . mtx . RUnlock ( )
for _ , b := range db . blocks {
2018-07-02 08:23:36 +00:00
if b . OverlapsClosedInterval ( mint , maxt ) {
2017-10-09 13:21:46 +00:00
blocks = append ( blocks , b )
}
}
2023-11-24 11:38:38 +00:00
2024-06-24 12:41:44 +00:00
blockQueriers := make ( [ ] storage . Querier , 0 , len ( blocks ) + 1 ) // +1 to allow for possible head querier.
2023-11-24 11:38:38 +00:00
defer func ( ) {
if err != nil {
// If we fail, all previously opened queriers must be closed.
for _ , q := range blockQueriers {
// TODO(bwplotka): Handle error.
_ = q . Close ( )
}
}
} ( )
2024-07-15 17:27:31 +00:00
overlapsOOO := overlapsClosedInterval ( mint , maxt , db . head . MinOOOTime ( ) , db . head . MaxOOOTime ( ) )
2024-06-24 12:41:44 +00:00
var headQuerier storage . Querier
2024-10-07 12:50:01 +00:00
inoMint := max ( db . head . MinTime ( ) , mint )
2024-07-15 17:27:31 +00:00
if maxt >= db . head . MinTime ( ) || overlapsOOO {
2021-07-20 08:47:20 +00:00
rh := NewRangeHead ( db . head , mint , maxt )
var err error
2024-06-24 12:41:44 +00:00
headQuerier , err = db . blockQuerierFunc ( rh , mint , maxt )
2021-07-20 08:47:20 +00:00
if err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "open block querier for head %s: %w" , rh , err )
2021-07-20 08:47:20 +00:00
}
// Getting the querier above registers itself in the queue that the truncation waits on.
// So if the querier is currently not colliding with any truncation, we can continue to use it and still
// won't run into a race later since any truncation that comes after will wait on this querier if it overlaps.
shouldClose , getNew , newMint := db . head . IsQuerierCollidingWithTruncation ( mint , maxt )
if shouldClose {
2024-06-24 12:41:44 +00:00
if err := headQuerier . Close ( ) ; err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "closing head block querier %s: %w" , rh , err )
2021-07-20 08:47:20 +00:00
}
2024-06-24 12:41:44 +00:00
headQuerier = nil
2021-07-20 08:47:20 +00:00
}
if getNew {
rh := NewRangeHead ( db . head , newMint , maxt )
2024-06-24 12:41:44 +00:00
headQuerier , err = db . blockQuerierFunc ( rh , newMint , maxt )
2021-07-20 08:47:20 +00:00
if err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "open block querier for head while getting new querier %s: %w" , rh , err )
2021-07-20 08:47:20 +00:00
}
2024-09-20 16:40:17 +00:00
inoMint = newMint
2021-07-20 08:47:20 +00:00
}
2017-10-09 13:21:46 +00:00
}
2017-08-28 22:39:17 +00:00
2024-07-15 17:27:31 +00:00
if overlapsOOO {
// We need to fetch from in-order and out-of-order chunks: wrap the headQuerier.
isoState := db . head . oooIso . TrackReadAfter ( db . lastGarbageCollectedMmapRef )
2024-09-20 16:40:17 +00:00
headQuerier = NewHeadAndOOOQuerier ( inoMint , mint , maxt , db . head , isoState , headQuerier )
2024-06-24 12:41:44 +00:00
}
2023-11-24 11:38:38 +00:00
2024-06-24 12:41:44 +00:00
if headQuerier != nil {
blockQueriers = append ( blockQueriers , headQuerier )
2022-09-20 17:05:50 +00:00
}
2017-06-06 12:45:54 +00:00
for _ , b := range blocks {
2024-06-25 07:47:06 +00:00
q , err := db . blockQuerierFunc ( b , mint , maxt )
2023-11-24 11:38:38 +00:00
if err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "open querier for block %s: %w" , b , err )
2017-10-09 13:21:46 +00:00
}
2023-11-24 11:38:38 +00:00
blockQueriers = append ( blockQueriers , q )
2021-07-20 08:47:20 +00:00
}
2023-11-24 11:38:38 +00:00
2020-07-31 15:03:02 +00:00
return storage . NewMergeQuerier ( blockQueriers , nil , storage . ChainedSeriesMerge ) , nil
}
2023-04-25 06:19:16 +00:00
// blockChunkQuerierForRange returns individual block chunk queriers from the persistent blocks, in-order head block, and the
2022-09-20 17:05:50 +00:00
// out-of-order head block, overlapping with the given time range.
2023-11-24 11:38:38 +00:00
func ( db * DB ) blockChunkQuerierForRange ( mint , maxt int64 ) ( _ [ ] storage . ChunkQuerier , err error ) {
2020-07-31 15:03:02 +00:00
var blocks [ ] BlockReader
2019-02-14 13:29:41 +00:00
2020-07-31 15:03:02 +00:00
db . mtx . RLock ( )
defer db . mtx . RUnlock ( )
for _ , b := range db . blocks {
if b . OverlapsClosedInterval ( mint , maxt ) {
blocks = append ( blocks , b )
}
}
2023-11-24 11:38:38 +00:00
2024-06-24 12:41:44 +00:00
blockQueriers := make ( [ ] storage . ChunkQuerier , 0 , len ( blocks ) + 1 ) // +1 to allow for possible head querier.
2023-11-24 11:38:38 +00:00
defer func ( ) {
if err != nil {
// If we fail, all previously opened queriers must be closed.
for _ , q := range blockQueriers {
// TODO(bwplotka): Handle error.
_ = q . Close ( )
}
}
} ( )
2024-07-15 17:27:31 +00:00
overlapsOOO := overlapsClosedInterval ( mint , maxt , db . head . MinOOOTime ( ) , db . head . MaxOOOTime ( ) )
2024-06-24 12:41:44 +00:00
var headQuerier storage . ChunkQuerier
2024-10-07 12:50:01 +00:00
inoMint := max ( db . head . MinTime ( ) , mint )
2024-07-15 17:27:31 +00:00
if maxt >= db . head . MinTime ( ) || overlapsOOO {
2021-07-20 08:47:20 +00:00
rh := NewRangeHead ( db . head , mint , maxt )
2024-06-24 12:41:44 +00:00
headQuerier , err = db . blockChunkQuerierFunc ( rh , mint , maxt )
2021-07-20 08:47:20 +00:00
if err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "open querier for head %s: %w" , rh , err )
2021-07-20 08:47:20 +00:00
}
// Getting the querier above registers itself in the queue that the truncation waits on.
// So if the querier is currently not colliding with any truncation, we can continue to use it and still
// won't run into a race later since any truncation that comes after will wait on this querier if it overlaps.
shouldClose , getNew , newMint := db . head . IsQuerierCollidingWithTruncation ( mint , maxt )
if shouldClose {
2024-06-24 12:41:44 +00:00
if err := headQuerier . Close ( ) ; err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "closing head querier %s: %w" , rh , err )
2021-07-20 08:47:20 +00:00
}
2024-06-24 12:41:44 +00:00
headQuerier = nil
2021-07-20 08:47:20 +00:00
}
if getNew {
rh := NewRangeHead ( db . head , newMint , maxt )
2024-06-24 12:41:44 +00:00
headQuerier , err = db . blockChunkQuerierFunc ( rh , newMint , maxt )
2021-07-20 08:47:20 +00:00
if err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "open querier for head while getting new querier %s: %w" , rh , err )
2021-07-20 08:47:20 +00:00
}
2024-09-20 16:40:17 +00:00
inoMint = newMint
2021-07-20 08:47:20 +00:00
}
2019-02-14 13:29:41 +00:00
}
2024-07-15 17:27:31 +00:00
if overlapsOOO {
// We need to fetch from in-order and out-of-order chunks: wrap the headQuerier.
isoState := db . head . oooIso . TrackReadAfter ( db . lastGarbageCollectedMmapRef )
2024-09-20 16:40:17 +00:00
headQuerier = NewHeadAndOOOChunkQuerier ( inoMint , mint , maxt , db . head , isoState , headQuerier )
2024-06-24 12:41:44 +00:00
}
2023-11-24 11:38:38 +00:00
2024-06-24 12:41:44 +00:00
if headQuerier != nil {
blockQueriers = append ( blockQueriers , headQuerier )
2022-09-20 17:05:50 +00:00
}
2020-07-31 15:03:02 +00:00
for _ , b := range blocks {
2024-06-25 07:47:06 +00:00
q , err := db . blockChunkQuerierFunc ( b , mint , maxt )
2023-11-24 11:38:38 +00:00
if err != nil {
2023-11-16 18:54:41 +00:00
return nil , fmt . Errorf ( "open querier for block %s: %w" , b , err )
2020-07-31 15:03:02 +00:00
}
2023-11-24 11:38:38 +00:00
blockQueriers = append ( blockQueriers , q )
2021-07-20 08:47:20 +00:00
}
2017-08-28 22:39:17 +00:00
2022-09-20 17:05:50 +00:00
return blockQueriers , nil
}
// ChunkQuerier returns a new chunk querier over the data partition for the given time range.
2023-09-12 10:37:38 +00:00
func ( db * DB ) ChunkQuerier ( mint , maxt int64 ) ( storage . ChunkQuerier , error ) {
2022-09-20 17:05:50 +00:00
blockQueriers , err := db . blockChunkQuerierForRange ( mint , maxt )
if err != nil {
return nil , err
}
2020-07-31 15:03:02 +00:00
return storage . NewMergeChunkQuerier ( blockQueriers , nil , storage . NewCompactingChunkSeriesMerger ( storage . ChainedSeriesMerge ) ) , nil
2020-06-24 13:41:52 +00:00
}
2021-03-16 09:47:45 +00:00
func ( db * DB ) ExemplarQuerier ( ctx context . Context ) ( storage . ExemplarQuerier , error ) {
return db . head . exemplars . ExemplarQuerier ( ctx )
}
2021-10-22 08:06:44 +00:00
func rangeForTimestamp ( t , width int64 ) ( maxt int64 ) {
2018-12-04 10:30:49 +00:00
return ( t / width ) * width + width
2017-02-01 14:29:48 +00:00
}
2017-08-28 22:39:17 +00:00
// Delete implements deletion of metrics. It only has atomicity guarantees on a per-block basis.
2023-09-13 13:43:06 +00:00
func ( db * DB ) Delete ( ctx context . Context , mint , maxt int64 , ms ... * labels . Matcher ) error {
2017-05-20 07:51:10 +00:00
db . cmtx . Lock ( )
defer db . cmtx . Unlock ( )
2017-07-14 07:00:22 +00:00
2017-05-19 19:05:50 +00:00
var g errgroup . Group
2017-10-23 18:30:03 +00:00
db . mtx . RLock ( )
defer db . mtx . RUnlock ( )
for _ , b := range db . blocks {
2018-07-02 08:23:36 +00:00
if b . OverlapsClosedInterval ( mint , maxt ) {
2017-10-09 13:21:46 +00:00
g . Go ( func ( b * Block ) func ( ) error {
2023-09-13 13:43:06 +00:00
return func ( ) error { return b . Delete ( ctx , mint , maxt , ms ... ) }
2017-08-28 22:39:17 +00:00
} ( b ) )
}
}
2021-09-16 06:50:03 +00:00
if db . head . OverlapsClosedInterval ( mint , maxt ) {
g . Go ( func ( ) error {
2023-09-13 13:43:06 +00:00
return db . head . Delete ( ctx , mint , maxt , ms ... )
2021-09-16 06:50:03 +00:00
} )
}
2018-03-21 21:23:47 +00:00
return g . Wait ( )
2017-05-19 19:05:50 +00:00
}
2017-11-22 12:34:50 +00:00
// CleanTombstones re-writes any blocks with tombstones.
2018-05-31 02:09:30 +00:00
func ( db * DB ) CleanTombstones ( ) ( err error ) {
2017-11-22 12:34:50 +00:00
db . cmtx . Lock ( )
defer db . cmtx . Unlock ( )
start := time . Now ( )
2022-10-25 22:26:12 +00:00
defer func ( ) {
db . metrics . tombCleanTimer . Observe ( time . Since ( start ) . Seconds ( ) )
} ( )
2017-11-22 12:34:50 +00:00
2021-02-17 05:32:43 +00:00
cleanUpCompleted := false
// Repeat cleanup until there is no tombstones left.
for ! cleanUpCompleted {
cleanUpCompleted = true
for _ , pb := range db . Blocks ( ) {
2024-06-12 21:31:25 +00:00
uids , safeToDelete , cleanErr := pb . CleanTombstones ( db . Dir ( ) , db . compactor )
2021-02-17 05:32:43 +00:00
if cleanErr != nil {
2023-11-16 18:54:41 +00:00
return fmt . Errorf ( "clean tombstones: %s: %w" , pb . Dir ( ) , cleanErr )
2021-02-17 05:32:43 +00:00
}
if ! safeToDelete {
// There was nothing to clean.
continue
}
// In case tombstones of the old block covers the whole block,
// then there would be no resultant block to tell the parent.
// The lock protects against race conditions when deleting blocks
// during an already running reload.
db . mtx . Lock ( )
pb . meta . Compaction . Deletable = safeToDelete
db . mtx . Unlock ( )
cleanUpCompleted = false
if err = db . reloadBlocks ( ) ; err == nil { // Will try to delete old block.
// Successful reload will change the existing blocks.
// We need to loop over the new set of blocks.
break
}
// Delete new block if it was created.
2024-06-12 21:31:25 +00:00
for _ , uid := range uids {
2018-05-31 02:09:30 +00:00
dir := filepath . Join ( db . Dir ( ) , uid . String ( ) )
if err := os . RemoveAll ( dir ) ; err != nil {
2024-09-10 01:41:53 +00:00
db . logger . Error ( "failed to delete block after failed `CleanTombstones`" , "dir" , dir , "err" , err )
2018-05-31 02:09:30 +00:00
}
}
2023-11-16 18:54:41 +00:00
if err != nil {
return fmt . Errorf ( "reload blocks: %w" , err )
}
return nil
2018-05-31 02:09:30 +00:00
}
2020-10-19 15:27:08 +00:00
}
return nil
2017-11-22 12:34:50 +00:00
}
2023-05-15 19:31:49 +00:00
func ( db * DB ) SetWriteNotified ( wn wlog . WriteNotified ) {
db . writeNotified = wn
// It's possible we already created the head struct, so we should also set the WN for that.
db . head . writeNotified = wn
}
2022-04-27 09:24:36 +00:00
func isBlockDir ( fi fs . DirEntry ) bool {
2017-01-19 10:22:47 +00:00
if ! fi . IsDir ( ) {
return false
}
2019-03-18 14:14:10 +00:00
_ , err := ulid . ParseStrict ( fi . Name ( ) )
2017-05-18 14:09:30 +00:00
return err == nil
2017-01-19 10:22:47 +00:00
}
2023-08-08 07:32:51 +00:00
// isTmpDir returns true if the given file-info contains a block ULID, a checkpoint prefix,
// or a chunk snapshot prefix and a tmp extension.
2022-04-27 09:24:36 +00:00
func isTmpDir ( fi fs . DirEntry ) bool {
2020-08-11 05:56:08 +00:00
if ! fi . IsDir ( ) {
return false
}
fn := fi . Name ( )
ext := filepath . Ext ( fn )
2021-01-09 09:02:26 +00:00
if ext == tmpForDeletionBlockDirSuffix || ext == tmpForCreationBlockDirSuffix || ext == tmpLegacy {
2022-03-24 10:44:14 +00:00
if strings . HasPrefix ( fn , "checkpoint." ) {
return true
}
2023-08-08 07:32:51 +00:00
if strings . HasPrefix ( fn , chunkSnapshotPrefix ) {
return true
}
2020-08-11 05:56:08 +00:00
if _ , err := ulid . ParseStrict ( fn [ : len ( fn ) - len ( ext ) ] ) ; err == nil {
return true
}
}
return false
}
2017-01-19 10:22:47 +00:00
func blockDirs ( dir string ) ( [ ] string , error ) {
2022-04-27 09:24:36 +00:00
files , err := os . ReadDir ( dir )
2017-01-19 10:22:47 +00:00
if err != nil {
return nil , err
}
var dirs [ ] string
2022-04-27 09:24:36 +00:00
for _ , f := range files {
if isBlockDir ( f ) {
dirs = append ( dirs , filepath . Join ( dir , f . Name ( ) ) )
2017-01-19 10:22:47 +00:00
}
}
return dirs , nil
2017-01-03 14:43:26 +00:00
}
2016-12-09 12:41:38 +00:00
2024-08-18 09:27:04 +00:00
func exponential ( d , minD , maxD time . Duration ) time . Duration {
2017-08-28 22:39:17 +00:00
d *= 2
2024-08-18 09:27:04 +00:00
if d < minD {
d = minD
2017-08-28 22:39:17 +00:00
}
2024-08-18 09:27:04 +00:00
if d > maxD {
d = maxD
2017-08-28 22:39:17 +00:00
}
return d
}