2017-04-10 18:59:45 +00:00
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2017-11-30 14:34:49 +00:00
package chunks
2017-03-07 11:47:49 +00:00
import (
"bufio"
"encoding/binary"
"fmt"
2017-04-25 14:45:44 +00:00
"hash"
2017-11-30 14:34:49 +00:00
"hash/crc32"
2017-03-07 11:47:49 +00:00
"io"
"os"
2017-11-30 14:34:49 +00:00
"path/filepath"
"strconv"
2017-03-07 11:47:49 +00:00
"github.com/pkg/errors"
2020-10-22 09:00:08 +00:00
2019-08-13 08:34:14 +00:00
"github.com/prometheus/prometheus/tsdb/chunkenc"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/prometheus/prometheus/tsdb/fileutil"
2017-03-07 11:47:49 +00:00
)
2019-12-04 07:37:49 +00:00
// Segment header fields constants.
2017-03-07 11:47:49 +00:00
const (
2017-04-28 13:41:42 +00:00
// MagicChunks is 4 bytes at the head of a series file.
2017-03-07 11:47:49 +00:00
MagicChunks = 0x85BD40DD
2019-03-24 20:33:08 +00:00
// MagicChunksSize is the size in bytes of MagicChunks.
2019-12-04 07:37:49 +00:00
MagicChunksSize = 4
chunksFormatV1 = 1
ChunksFormatVersionSize = 1
segmentHeaderPaddingSize = 3
// SegmentHeaderSize defines the total size of the header part.
SegmentHeaderSize = MagicChunksSize + ChunksFormatVersionSize + segmentHeaderPaddingSize
)
2019-03-24 20:33:08 +00:00
2019-12-04 07:37:49 +00:00
// Chunk fields constants.
const (
// MaxChunkLengthFieldSize defines the maximum size of the data length part.
MaxChunkLengthFieldSize = binary . MaxVarintLen32
// ChunkEncodingSize defines the size of the chunk encoding part.
ChunkEncodingSize = 1
2017-03-07 11:47:49 +00:00
)
2021-11-06 10:10:04 +00:00
// ChunkRef is a generic reference for reading chunk data. In prometheus it
// is either a HeadChunkRef or BlockChunkRef, though other implementations
// may have their own reference types.
type ChunkRef uint64
// HeadSeriesRef refers to in-memory series.
type HeadSeriesRef uint64
// HeadChunkRef packs a HeadSeriesRef and a ChunkID into a global 8 Byte ID.
// The HeadSeriesRef and ChunkID may not exceed 5 and 3 bytes respectively.
type HeadChunkRef uint64
2021-11-17 13:05:10 +00:00
func NewHeadChunkRef ( hsr HeadSeriesRef , chunkID HeadChunkID ) HeadChunkRef {
2021-11-06 10:10:04 +00:00
if hsr > ( 1 << 40 ) - 1 {
panic ( "series ID exceeds 5 bytes" )
}
if chunkID > ( 1 << 24 ) - 1 {
panic ( "chunk ID exceeds 3 bytes" )
}
2021-11-17 13:05:10 +00:00
return HeadChunkRef ( uint64 ( hsr << 24 ) | uint64 ( chunkID ) )
2021-11-06 10:10:04 +00:00
}
2021-11-17 13:05:10 +00:00
func ( p HeadChunkRef ) Unpack ( ) ( HeadSeriesRef , HeadChunkID ) {
return HeadSeriesRef ( p >> 24 ) , HeadChunkID ( p << 40 ) >> 40
2021-11-06 10:10:04 +00:00
}
2021-11-17 13:05:10 +00:00
// HeadChunkID refers to a specific chunk in a series (memSeries) in the Head.
// Each memSeries has its own monotonically increasing number to refer to its chunks.
// If the HeadChunkID value is...
2022-09-07 09:30:48 +00:00
// - memSeries.firstChunkID+len(memSeries.mmappedChunks), it's the head chunk.
// - less than the above, but >= memSeries.firstID, then it's
// memSeries.mmappedChunks[i] where i = HeadChunkID - memSeries.firstID.
//
2021-11-17 13:05:10 +00:00
// Example:
// assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9].
// | HeadChunkID value | refers to ... |
// |-------------------|----------------------------------------------------------------------------------------|
// | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head |
// | 7-11 | memSeries.mmappedChunks[i] where i is 0 to 4. |
// | 12 | memSeries.headChunk |
type HeadChunkID uint64
2021-11-06 10:10:04 +00:00
// BlockChunkRef refers to a chunk within a persisted block.
// The upper 4 bytes are for the segment index and
// the lower 4 bytes are for the segment offset where the data starts for this chunk.
type BlockChunkRef uint64
// NewBlockChunkRef packs the file index and byte offset into a BlockChunkRef.
func NewBlockChunkRef ( fileIndex , fileOffset uint64 ) BlockChunkRef {
return BlockChunkRef ( fileIndex << 32 | fileOffset )
}
func ( b BlockChunkRef ) Unpack ( ) ( int , int ) {
sgmIndex := int ( b >> 32 )
chkStart := int ( ( b << 32 ) >> 32 )
return sgmIndex , chkStart
}
2017-11-30 14:34:49 +00:00
// Meta holds information about a chunk of data.
type Meta struct {
2017-03-07 11:47:49 +00:00
// Ref and Chunk hold either a reference that can be used to retrieve
// chunk data or the data itself.
2021-11-06 10:10:04 +00:00
// If Chunk is nil, call ChunkReader.Chunk(Meta.Ref) to get the chunk and assign it to the Chunk field
Ref ChunkRef
2017-11-30 14:34:49 +00:00
Chunk chunkenc . Chunk
2017-03-07 11:47:49 +00:00
2019-07-03 10:47:31 +00:00
// Time range the data covers.
// When MaxTime == math.MaxInt64 the chunk is still open and being appended to.
MinTime , MaxTime int64
2017-05-14 09:06:26 +00:00
}
2021-11-17 10:21:27 +00:00
// Iterator iterates over the chunks of a single time series.
2020-03-24 20:15:47 +00:00
type Iterator interface {
// At returns the current meta.
// It depends on implementation if the chunk is populated or not.
At ( ) Meta
// Next advances the iterator by one.
Next ( ) bool
// Err returns optional error if Next is false.
Err ( ) error
}
2017-05-16 07:13:33 +00:00
// writeHash writes the chunk encoding and raw data into the provided hash.
2019-07-12 13:12:34 +00:00
func ( cm * Meta ) writeHash ( h hash . Hash , buf [ ] byte ) error {
buf = append ( buf [ : 0 ] , byte ( cm . Chunk . Encoding ( ) ) )
if _ , err := h . Write ( buf [ : 1 ] ) ; err != nil {
2017-05-16 07:13:33 +00:00
return err
}
if _ , err := h . Write ( cm . Chunk . Bytes ( ) ) ; err != nil {
return err
}
return nil
}
2019-03-25 08:17:28 +00:00
// OverlapsClosedInterval Returns true if the chunk overlaps [mint, maxt].
2018-07-02 08:23:36 +00:00
func ( cm * Meta ) OverlapsClosedInterval ( mint , maxt int64 ) bool {
// The chunk itself is a closed interval [cm.MinTime, cm.MaxTime].
return cm . MinTime <= maxt && mint <= cm . MaxTime
}
2021-10-22 08:06:44 +00:00
var errInvalidSize = fmt . Errorf ( "invalid size" )
2017-05-14 09:06:26 +00:00
2017-11-30 14:34:49 +00:00
var castagnoliTable * crc32 . Table
2017-05-14 09:06:26 +00:00
2017-11-30 14:34:49 +00:00
func init ( ) {
castagnoliTable = crc32 . MakeTable ( crc32 . Castagnoli )
2017-05-14 09:06:26 +00:00
}
2017-11-30 14:34:49 +00:00
// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the
// polynomial may be easily changed in one location at a later time, if necessary.
func newCRC32 ( ) hash . Hash32 {
return crc32 . New ( castagnoliTable )
2017-03-07 11:47:49 +00:00
}
2022-06-08 02:30:59 +00:00
// Check if the CRC of data matches that stored in sum, computed when the chunk was stored.
func checkCRC32 ( data , sum [ ] byte ) error {
got := crc32 . Checksum ( data , castagnoliTable )
// This combination of shifts is the inverse of digest.Sum() in go/src/hash/crc32.
want := uint32 ( sum [ 0 ] ) << 24 + uint32 ( sum [ 1 ] ) << 16 + uint32 ( sum [ 2 ] ) << 8 + uint32 ( sum [ 3 ] )
if got != want {
return errors . Errorf ( "checksum mismatch expected:%x, actual:%x" , want , got )
}
return nil
}
2017-11-30 14:34:49 +00:00
// Writer implements the ChunkWriter interface for the standard
2017-03-07 11:47:49 +00:00
// serialization format.
2017-11-30 14:34:49 +00:00
type Writer struct {
2017-03-07 11:47:49 +00:00
dirFile * os . File
files [ ] * os . File
wbuf * bufio . Writer
n int64
2017-04-28 12:17:53 +00:00
crc32 hash . Hash
2019-07-12 13:12:34 +00:00
buf [ binary . MaxVarintLen32 ] byte
2017-03-07 11:47:49 +00:00
segmentSize int64
}
const (
2019-12-04 07:37:49 +00:00
// DefaultChunkSegmentSize is the default chunks segment size.
DefaultChunkSegmentSize = 512 * 1024 * 1024
2017-03-07 11:47:49 +00:00
)
2019-12-04 07:37:49 +00:00
// NewWriterWithSegSize returns a new writer against the given directory
// and allows setting a custom size for the segments.
func NewWriterWithSegSize ( dir string , segmentSize int64 ) ( * Writer , error ) {
return newWriter ( dir , segmentSize )
}
// NewWriter returns a new writer against the given directory
// using the default segment size.
2017-11-30 14:34:49 +00:00
func NewWriter ( dir string ) ( * Writer , error ) {
2019-12-04 07:37:49 +00:00
return newWriter ( dir , DefaultChunkSegmentSize )
}
func newWriter ( dir string , segmentSize int64 ) ( * Writer , error ) {
if segmentSize <= 0 {
segmentSize = DefaultChunkSegmentSize
}
2021-10-22 08:06:44 +00:00
if err := os . MkdirAll ( dir , 0 o777 ) ; err != nil {
2017-03-07 11:47:49 +00:00
return nil , err
}
dirFile , err := fileutil . OpenDir ( dir )
if err != nil {
return nil , err
}
2019-12-04 07:37:49 +00:00
return & Writer {
2017-03-07 11:47:49 +00:00
dirFile : dirFile ,
n : 0 ,
2017-08-26 16:04:00 +00:00
crc32 : newCRC32 ( ) ,
2019-12-04 07:37:49 +00:00
segmentSize : segmentSize ,
} , nil
2017-03-07 11:47:49 +00:00
}
2017-11-30 14:34:49 +00:00
func ( w * Writer ) tail ( ) * os . File {
2017-03-07 11:47:49 +00:00
if len ( w . files ) == 0 {
return nil
}
return w . files [ len ( w . files ) - 1 ]
}
// finalizeTail writes all pending data to the current tail file,
// truncates its size, and closes it.
2017-11-30 14:34:49 +00:00
func ( w * Writer ) finalizeTail ( ) error {
2017-03-07 11:47:49 +00:00
tf := w . tail ( )
if tf == nil {
return nil
}
if err := w . wbuf . Flush ( ) ; err != nil {
return err
}
2019-04-03 08:16:54 +00:00
if err := tf . Sync ( ) ; err != nil {
2017-03-07 11:47:49 +00:00
return err
}
// As the file was pre-allocated, we truncate any superfluous zero bytes.
2018-03-21 20:39:43 +00:00
off , err := tf . Seek ( 0 , io . SeekCurrent )
2017-03-07 11:47:49 +00:00
if err != nil {
return err
}
if err := tf . Truncate ( off ) ; err != nil {
return err
}
2017-10-31 14:37:41 +00:00
2017-03-07 11:47:49 +00:00
return tf . Close ( )
}
2017-11-30 14:34:49 +00:00
func ( w * Writer ) cut ( ) error {
2017-03-07 11:47:49 +00:00
// Sync current tail to disk and close.
2017-04-28 13:59:23 +00:00
if err := w . finalizeTail ( ) ; err != nil {
return err
}
2017-03-07 11:47:49 +00:00
2020-03-19 16:33:44 +00:00
n , f , _ , err := cutSegmentFile ( w . dirFile , MagicChunks , chunksFormatV1 , w . segmentSize )
2017-03-07 11:47:49 +00:00
if err != nil {
return err
}
2020-02-05 13:39:40 +00:00
w . n = int64 ( n )
w . files = append ( w . files , f )
if w . wbuf != nil {
w . wbuf . Reset ( f )
} else {
w . wbuf = bufio . NewWriterSize ( f , 8 * 1024 * 1024 )
}
return nil
}
2020-07-15 05:48:48 +00:00
func cutSegmentFile ( dirFile * os . File , magicNumber uint32 , chunksFormat byte , allocSize int64 ) ( headerSize int , newFile * os . File , seq int , returnErr error ) {
2020-02-05 13:39:40 +00:00
p , seq , err := nextSequenceFile ( dirFile . Name ( ) )
if err != nil {
2020-07-14 09:29:28 +00:00
return 0 , nil , 0 , errors . Wrap ( err , "next sequence file" )
2020-02-05 13:39:40 +00:00
}
2020-07-14 09:29:28 +00:00
ptmp := p + ".tmp"
2021-10-22 08:06:44 +00:00
f , err := os . OpenFile ( ptmp , os . O_WRONLY | os . O_CREATE , 0 o666 )
2017-03-07 11:47:49 +00:00
if err != nil {
2020-07-14 09:29:28 +00:00
return 0 , nil , 0 , errors . Wrap ( err , "open temp file" )
2017-03-07 11:47:49 +00:00
}
2020-07-15 05:48:48 +00:00
defer func ( ) {
if returnErr != nil {
2020-10-28 15:24:58 +00:00
errs := tsdb_errors . NewMulti ( returnErr )
2020-07-15 05:48:48 +00:00
if f != nil {
2020-10-28 15:24:58 +00:00
errs . Add ( f . Close ( ) )
2020-07-15 05:48:48 +00:00
}
// Calling RemoveAll on a non-existent file does not return error.
2020-10-28 15:24:58 +00:00
errs . Add ( os . RemoveAll ( ptmp ) )
returnErr = errs . Err ( )
2020-07-15 05:48:48 +00:00
}
} ( )
2020-03-19 16:33:44 +00:00
if allocSize > 0 {
if err = fileutil . Preallocate ( f , allocSize , true ) ; err != nil {
2020-07-14 09:29:28 +00:00
return 0 , nil , 0 , errors . Wrap ( err , "preallocate" )
2020-02-05 13:39:40 +00:00
}
2017-03-07 11:47:49 +00:00
}
2020-02-05 13:39:40 +00:00
if err = dirFile . Sync ( ) ; err != nil {
2020-07-14 09:29:28 +00:00
return 0 , nil , 0 , errors . Wrap ( err , "sync directory" )
2017-03-07 11:47:49 +00:00
}
// Write header metadata for new file.
2019-12-04 07:37:49 +00:00
metab := make ( [ ] byte , SegmentHeaderSize )
2020-03-19 16:33:44 +00:00
binary . BigEndian . PutUint32 ( metab [ : MagicChunksSize ] , magicNumber )
2020-02-05 13:39:40 +00:00
metab [ 4 ] = chunksFormat
2017-03-07 11:47:49 +00:00
2019-12-04 07:37:49 +00:00
n , err := f . Write ( metab )
if err != nil {
2020-07-14 09:29:28 +00:00
return 0 , nil , 0 , errors . Wrap ( err , "write header" )
}
if err := f . Close ( ) ; err != nil {
return 0 , nil , 0 , errors . Wrap ( err , "close temp file" )
}
2020-07-15 05:48:48 +00:00
f = nil
2020-07-14 09:29:28 +00:00
if err := fileutil . Rename ( ptmp , p ) ; err != nil {
return 0 , nil , 0 , errors . Wrap ( err , "replace file" )
}
2021-10-22 08:06:44 +00:00
f , err = os . OpenFile ( p , os . O_WRONLY , 0 o666 )
2020-07-14 09:29:28 +00:00
if err != nil {
return 0 , nil , 0 , errors . Wrap ( err , "open final file" )
}
2020-07-15 05:48:48 +00:00
// Skip header for further writes.
2020-07-14 09:29:28 +00:00
if _ , err := f . Seek ( int64 ( n ) , 0 ) ; err != nil {
return 0 , nil , 0 , errors . Wrap ( err , "seek in final file" )
2017-03-07 11:47:49 +00:00
}
2020-02-05 13:39:40 +00:00
return n , f , seq , nil
2017-03-07 11:47:49 +00:00
}
2017-11-30 14:34:49 +00:00
func ( w * Writer ) write ( b [ ] byte ) error {
2017-04-24 15:10:12 +00:00
n , err := w . wbuf . Write ( b )
2017-03-07 11:47:49 +00:00
w . n += int64 ( n )
return err
}
2019-12-04 07:37:49 +00:00
// WriteChunks writes as many chunks as possible to the current segment,
// cuts a new segment when the current segment is full and
// writes the rest of the chunks in the new segment.
2017-11-30 14:34:49 +00:00
func ( w * Writer ) WriteChunks ( chks ... Meta ) error {
2019-12-04 07:37:49 +00:00
var (
batchSize = int64 ( 0 )
batchStart = 0
batches = make ( [ ] [ ] Meta , 1 )
batchID = 0
firstBatch = true
)
for i , chk := range chks {
// Each chunk contains: data length + encoding + the data itself + crc32
chkSize := int64 ( MaxChunkLengthFieldSize ) // The data length is a variable length field so use the maximum possible value.
chkSize += ChunkEncodingSize // The chunk encoding.
chkSize += int64 ( len ( chk . Chunk . Bytes ( ) ) ) // The data itself.
chkSize += crc32 . Size // The 4 bytes of crc32.
batchSize += chkSize
// Cut a new batch when it is not the first chunk(to avoid empty segments) and
// the batch is too large to fit in the current segment.
cutNewBatch := ( i != 0 ) && ( batchSize + SegmentHeaderSize > w . segmentSize )
// When the segment already has some data than
// the first batch size calculation should account for that.
if firstBatch && w . n > SegmentHeaderSize {
cutNewBatch = batchSize + w . n > w . segmentSize
if cutNewBatch {
firstBatch = false
}
}
if cutNewBatch {
batchStart = i
batches = append ( batches , [ ] Meta { } )
batchID ++
batchSize = chkSize
}
batches [ batchID ] = chks [ batchStart : i + 1 ]
2017-03-07 11:47:49 +00:00
}
2019-12-04 07:37:49 +00:00
// Create a new segment when one doesn't already exist.
if w . n == 0 {
2017-03-07 11:47:49 +00:00
if err := w . cut ( ) ; err != nil {
return err
}
}
2019-12-04 07:37:49 +00:00
for i , chks := range batches {
if err := w . writeChunks ( chks ) ; err != nil {
return err
}
// Cut a new segment only when there are more chunks to write.
// Avoid creating a new empty segment at the end of the write.
if i < len ( batches ) - 1 {
if err := w . cut ( ) ; err != nil {
return err
}
}
}
return nil
}
// writeChunks writes the chunks into the current segment irrespective
// of the configured segment size limit. A segment should have been already
// started before calling this.
func ( w * Writer ) writeChunks ( chks [ ] Meta ) error {
if len ( chks ) == 0 {
return nil
}
2021-11-06 10:10:04 +00:00
seq := uint64 ( w . seq ( ) )
2017-08-06 18:41:24 +00:00
for i := range chks {
chk := & chks [ i ]
2021-11-06 10:10:04 +00:00
chk . Ref = ChunkRef ( NewBlockChunkRef ( seq , uint64 ( w . n ) ) )
2017-03-07 11:47:49 +00:00
2019-07-12 13:12:34 +00:00
n := binary . PutUvarint ( w . buf [ : ] , uint64 ( len ( chk . Chunk . Bytes ( ) ) ) )
2017-03-07 11:47:49 +00:00
2019-07-12 13:12:34 +00:00
if err := w . write ( w . buf [ : n ] ) ; err != nil {
2017-03-07 11:47:49 +00:00
return err
}
2019-07-12 13:12:34 +00:00
w . buf [ 0 ] = byte ( chk . Chunk . Encoding ( ) )
if err := w . write ( w . buf [ : 1 ] ) ; err != nil {
2017-04-25 14:45:44 +00:00
return err
}
2017-04-24 15:10:12 +00:00
if err := w . write ( chk . Chunk . Bytes ( ) ) ; err != nil {
2017-03-07 11:47:49 +00:00
return err
}
2017-04-28 12:17:53 +00:00
w . crc32 . Reset ( )
2019-07-12 13:12:34 +00:00
if err := chk . writeHash ( w . crc32 , w . buf [ : ] ) ; err != nil {
2017-05-02 10:55:40 +00:00
return err
}
2019-07-12 13:12:34 +00:00
if err := w . write ( w . crc32 . Sum ( w . buf [ : 0 ] ) ) ; err != nil {
2017-04-28 12:17:53 +00:00
return err
}
2017-03-07 11:47:49 +00:00
}
return nil
}
2017-11-30 14:34:49 +00:00
func ( w * Writer ) seq ( ) int {
2017-03-07 11:47:49 +00:00
return len ( w . files ) - 1
}
2017-11-30 14:34:49 +00:00
func ( w * Writer ) Close ( ) error {
2017-10-31 14:37:41 +00:00
if err := w . finalizeTail ( ) ; err != nil {
return err
}
// close dir file (if not windows platform will fail on rename)
return w . dirFile . Close ( )
2017-03-07 11:47:49 +00:00
}
2017-11-30 14:34:49 +00:00
// ByteSlice abstracts a byte slice.
type ByteSlice interface {
Len ( ) int
Range ( start , end int ) [ ] byte
}
type realByteSlice [ ] byte
2017-03-07 11:47:49 +00:00
2017-11-30 14:34:49 +00:00
func ( b realByteSlice ) Len ( ) int {
return len ( b )
2017-03-07 11:47:49 +00:00
}
2017-11-30 14:34:49 +00:00
func ( b realByteSlice ) Range ( start , end int ) [ ] byte {
return b [ start : end ]
}
2019-07-11 10:55:34 +00:00
// Reader implements a ChunkReader for a serialized byte stream
2017-03-07 11:47:49 +00:00
// of series data.
2017-11-30 14:34:49 +00:00
type Reader struct {
2019-12-04 07:37:49 +00:00
// The underlying bytes holding the encoded series data.
// Each slice holds the data for a different segment.
2019-12-24 21:55:22 +00:00
bs [ ] ByteSlice
cs [ ] io . Closer // Closers for resources behind the byte slices.
size int64 // The total size of bytes in the reader.
pool chunkenc . Pool
2017-03-07 11:47:49 +00:00
}
2017-11-30 14:34:49 +00:00
func newReader ( bs [ ] ByteSlice , cs [ ] io . Closer , pool chunkenc . Pool ) ( * Reader , error ) {
2019-12-24 21:55:22 +00:00
cr := Reader { pool : pool , bs : bs , cs : cs }
2017-11-10 10:38:22 +00:00
for i , b := range cr . bs {
2019-12-04 07:37:49 +00:00
if b . Len ( ) < SegmentHeaderSize {
return nil , errors . Wrapf ( errInvalidSize , "invalid segment header in segment %d" , i )
2017-11-10 10:38:22 +00:00
}
// Verify magic number.
2019-03-24 20:33:08 +00:00
if m := binary . BigEndian . Uint32 ( b . Range ( 0 , MagicChunksSize ) ) ; m != MagicChunks {
2018-06-08 08:25:12 +00:00
return nil , errors . Errorf ( "invalid magic number %x" , m )
2017-11-10 10:38:22 +00:00
}
2019-03-24 20:33:08 +00:00
// Verify chunk format version.
if v := int ( b . Range ( MagicChunksSize , MagicChunksSize + ChunksFormatVersionSize ) [ 0 ] ) ; v != chunksFormatV1 {
return nil , errors . Errorf ( "invalid chunk format version %d" , v )
}
2020-02-05 13:39:40 +00:00
cr . size += int64 ( b . Len ( ) )
2017-11-10 10:38:22 +00:00
}
return & cr , nil
}
2017-11-30 14:34:49 +00:00
// NewDirReader returns a new Reader against sequentially numbered files in the
2017-11-10 10:38:22 +00:00
// given directory.
2017-11-30 14:34:49 +00:00
func NewDirReader ( dir string , pool chunkenc . Pool ) ( * Reader , error ) {
2017-08-30 16:34:54 +00:00
files , err := sequenceFiles ( dir )
2017-03-07 11:47:49 +00:00
if err != nil {
return nil , err
}
2017-08-08 15:35:34 +00:00
if pool == nil {
2017-11-30 14:34:49 +00:00
pool = chunkenc . NewPool ( )
2017-08-08 15:35:34 +00:00
}
2017-11-10 10:38:22 +00:00
2019-01-16 10:03:52 +00:00
var (
2020-10-28 15:24:58 +00:00
bs [ ] ByteSlice
cs [ ] io . Closer
2019-01-16 10:03:52 +00:00
)
2017-03-07 11:47:49 +00:00
for _ , fn := range files {
2017-11-30 14:34:49 +00:00
f , err := fileutil . OpenMmapFile ( fn )
2017-03-07 11:47:49 +00:00
if err != nil {
2020-10-28 15:24:58 +00:00
return nil , tsdb_errors . NewMulti (
errors . Wrap ( err , "mmap files" ) ,
tsdb_errors . CloseAll ( cs ) ,
) . Err ( )
2017-03-07 11:47:49 +00:00
}
2017-11-10 10:38:22 +00:00
cs = append ( cs , f )
2017-11-30 14:34:49 +00:00
bs = append ( bs , realByteSlice ( f . Bytes ( ) ) )
2017-03-07 11:47:49 +00:00
}
2019-03-24 20:33:08 +00:00
reader , err := newReader ( bs , cs , pool )
if err != nil {
2020-10-28 15:24:58 +00:00
return nil , tsdb_errors . NewMulti (
err ,
tsdb_errors . CloseAll ( cs ) ,
) . Err ( )
2019-03-24 20:33:08 +00:00
}
return reader , nil
2017-03-07 11:47:49 +00:00
}
2017-11-30 14:34:49 +00:00
func ( s * Reader ) Close ( ) error {
2020-10-28 15:24:58 +00:00
return tsdb_errors . CloseAll ( s . cs )
2017-03-07 11:47:49 +00:00
}
2019-01-16 10:03:52 +00:00
// Size returns the size of the chunks.
func ( s * Reader ) Size ( ) int64 {
return s . size
}
2019-01-29 17:46:12 +00:00
// Chunk returns a chunk from a given reference.
2021-11-06 10:10:04 +00:00
func ( s * Reader ) Chunk ( ref ChunkRef ) ( chunkenc . Chunk , error ) {
sgmIndex , chkStart := BlockChunkRef ( ref ) . Unpack ( )
2019-12-04 07:37:49 +00:00
if sgmIndex >= len ( s . bs ) {
return nil , errors . Errorf ( "segment index %d out of range" , sgmIndex )
2017-03-07 11:47:49 +00:00
}
2019-12-04 07:37:49 +00:00
sgmBytes := s . bs [ sgmIndex ]
if chkStart + MaxChunkLengthFieldSize > sgmBytes . Len ( ) {
return nil , errors . Errorf ( "segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v" , chkStart + MaxChunkLengthFieldSize , sgmBytes . Len ( ) )
2017-03-07 11:47:49 +00:00
}
2017-11-10 10:38:22 +00:00
// With the minimum chunk length this should never cause us reading
// over the end of the slice.
2019-12-04 07:37:49 +00:00
c := sgmBytes . Range ( chkStart , chkStart + MaxChunkLengthFieldSize )
chkDataLen , n := binary . Uvarint ( c )
2018-06-08 08:25:12 +00:00
if n <= 0 {
return nil , errors . Errorf ( "reading chunk length failed with %d" , n )
2017-03-07 11:47:49 +00:00
}
2019-12-04 07:37:49 +00:00
chkEncStart := chkStart + n
chkEnd := chkEncStart + ChunkEncodingSize + int ( chkDataLen ) + crc32 . Size
chkDataStart := chkEncStart + ChunkEncodingSize
chkDataEnd := chkEnd - crc32 . Size
if chkEnd > sgmBytes . Len ( ) {
return nil , errors . Errorf ( "segment doesn't include enough bytes to read the chunk - required:%v, available:%v" , chkEnd , sgmBytes . Len ( ) )
}
2019-12-24 21:55:22 +00:00
sum := sgmBytes . Range ( chkDataEnd , chkEnd )
2022-06-08 02:30:59 +00:00
if err := checkCRC32 ( sgmBytes . Range ( chkEncStart , chkDataEnd ) , sum ) ; err != nil {
2019-12-04 07:37:49 +00:00
return nil , err
}
2019-12-24 21:55:22 +00:00
2019-12-04 07:37:49 +00:00
chkData := sgmBytes . Range ( chkDataStart , chkDataEnd )
chkEnc := sgmBytes . Range ( chkEncStart , chkEncStart + ChunkEncodingSize ) [ 0 ]
return s . pool . Get ( chunkenc . Encoding ( chkEnc ) , chkData )
2017-11-30 14:34:49 +00:00
}
func nextSequenceFile ( dir string ) ( string , int , error ) {
2022-04-27 09:24:36 +00:00
files , err := os . ReadDir ( dir )
2017-11-30 14:34:49 +00:00
if err != nil {
return "" , 0 , err
}
i := uint64 ( 0 )
2020-04-06 13:34:20 +00:00
for _ , f := range files {
j , err := strconv . ParseUint ( f . Name ( ) , 10 , 64 )
2017-11-30 14:34:49 +00:00
if err != nil {
continue
}
2020-02-05 13:39:40 +00:00
// It is not necessary that we find the files in number order,
// for example with '1000000' and '200000', '1000000' would come first.
// Though this is a very very race case, we check anyway for the max id.
if j > i {
i = j
}
2017-11-30 14:34:49 +00:00
}
2020-02-05 13:39:40 +00:00
return segmentFile ( dir , int ( i + 1 ) ) , int ( i + 1 ) , nil
}
func segmentFile ( baseDir string , index int ) string {
return filepath . Join ( baseDir , fmt . Sprintf ( "%0.6d" , index ) )
2017-11-30 14:34:49 +00:00
}
func sequenceFiles ( dir string ) ( [ ] string , error ) {
2022-04-27 09:24:36 +00:00
files , err := os . ReadDir ( dir )
2017-11-30 14:34:49 +00:00
if err != nil {
return nil , err
}
var res [ ] string
for _ , fi := range files {
if _ , err := strconv . ParseUint ( fi . Name ( ) , 10 , 64 ) ; err != nil {
continue
}
res = append ( res , filepath . Join ( dir , fi . Name ( ) ) )
}
return res , nil
}