|
|
|
@ -70,19 +70,20 @@ var ErrNotReady = errors.New("TSDB not ready")
|
|
|
|
|
// millisecond precision timestamps.
|
|
|
|
|
func DefaultOptions() *Options { |
|
|
|
|
return &Options{ |
|
|
|
|
WALSegmentSize: wlog.DefaultSegmentSize, |
|
|
|
|
MaxBlockChunkSegmentSize: chunks.DefaultChunkSegmentSize, |
|
|
|
|
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), |
|
|
|
|
MinBlockDuration: DefaultBlockDuration, |
|
|
|
|
MaxBlockDuration: DefaultBlockDuration, |
|
|
|
|
NoLockfile: false, |
|
|
|
|
SamplesPerChunk: DefaultSamplesPerChunk, |
|
|
|
|
WALCompression: wlog.CompressionNone, |
|
|
|
|
StripeSize: DefaultStripeSize, |
|
|
|
|
HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, |
|
|
|
|
IsolationDisabled: defaultIsolationDisabled, |
|
|
|
|
HeadChunksWriteQueueSize: chunks.DefaultWriteQueueSize, |
|
|
|
|
OutOfOrderCapMax: DefaultOutOfOrderCapMax, |
|
|
|
|
WALSegmentSize: wlog.DefaultSegmentSize, |
|
|
|
|
MaxBlockChunkSegmentSize: chunks.DefaultChunkSegmentSize, |
|
|
|
|
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), |
|
|
|
|
MinBlockDuration: DefaultBlockDuration, |
|
|
|
|
MaxBlockDuration: DefaultBlockDuration, |
|
|
|
|
NoLockfile: false, |
|
|
|
|
SamplesPerChunk: DefaultSamplesPerChunk, |
|
|
|
|
WALCompression: wlog.CompressionNone, |
|
|
|
|
StripeSize: DefaultStripeSize, |
|
|
|
|
HeadChunksWriteBufferSize: chunks.DefaultWriteBufferSize, |
|
|
|
|
IsolationDisabled: defaultIsolationDisabled, |
|
|
|
|
HeadChunksWriteQueueSize: chunks.DefaultWriteQueueSize, |
|
|
|
|
OutOfOrderCapMax: DefaultOutOfOrderCapMax, |
|
|
|
|
EnableOverlappingCompaction: true, |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -177,6 +178,14 @@ type Options struct {
|
|
|
|
|
// OutOfOrderCapMax is maximum capacity for OOO chunks (in samples).
|
|
|
|
|
// If it is <=0, the default value is assumed.
|
|
|
|
|
OutOfOrderCapMax int64 |
|
|
|
|
|
|
|
|
|
// Compaction of overlapping blocks are allowed if EnableOverlappingCompaction is true.
|
|
|
|
|
// This is an optional flag for overlapping blocks.
|
|
|
|
|
// The reason why this flag exists is because there are various users of the TSDB
|
|
|
|
|
// that do not want vertical compaction happening on ingest time. Instead,
|
|
|
|
|
// they'd rather keep overlapping blocks and let another component do the overlapping compaction later.
|
|
|
|
|
// For Prometheus, this will always be true.
|
|
|
|
|
EnableOverlappingCompaction bool |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{} |
|
|
|
@ -816,7 +825,10 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background()) |
|
|
|
|
db.compactor, err = NewLeveledCompactorWithChunkSize(ctx, r, l, rngs, db.chunkPool, opts.MaxBlockChunkSegmentSize, nil) |
|
|
|
|
db.compactor, err = NewLeveledCompactorWithOptions(ctx, r, l, rngs, db.chunkPool, LeveledCompactorOptions{ |
|
|
|
|
MaxBlockChunkSegmentSize: opts.MaxBlockChunkSegmentSize, |
|
|
|
|
EnableOverlappingCompaction: opts.EnableOverlappingCompaction, |
|
|
|
|
}) |
|
|
|
|
if err != nil { |
|
|
|
|
cancel() |
|
|
|
|
return nil, fmt.Errorf("create leveled compactor: %w", err) |
|
|
|
|