Browse Source

Nits after PR 10051 merge (#10159)

Signed-off-by: Marco Pracucci <marco@pracucci.com>

Co-authored-by: Marco Pracucci <marco@pracucci.com>
pull/10182/head
Mauro Stettler 3 years ago committed by GitHub
parent
commit
bf959b36cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      tsdb/chunks/chunk_write_queue.go
  2. 10
      tsdb/chunks/head_chunks.go

2
tsdb/chunks/chunk_write_queue.go

@ -34,7 +34,7 @@ type chunkWriteJob struct {
// chunkWriteQueue is a queue for writing chunks to disk in a non-blocking fashion. // chunkWriteQueue is a queue for writing chunks to disk in a non-blocking fashion.
// Chunks that shall be written get added to the queue, which is consumed asynchronously. // Chunks that shall be written get added to the queue, which is consumed asynchronously.
// Adding jobs to the job is non-blocking as long as the queue isn't full. // Adding jobs to the queue is non-blocking as long as the queue isn't full.
type chunkWriteQueue struct { type chunkWriteQueue struct {
jobs chan chunkWriteJob jobs chan chunkWriteJob

10
tsdb/chunks/head_chunks.go

@ -61,7 +61,7 @@ const (
CRCSize = 4 CRCSize = 4
// MaxHeadChunkMetaSize is the max size of an mmapped chunks minus the chunks data. // MaxHeadChunkMetaSize is the max size of an mmapped chunks minus the chunks data.
// Max because the uvarint size can be smaller. // Max because the uvarint size can be smaller.
MaxHeadChunkMetaSize = SeriesRefSize + 2*MintMaxtSize + ChunksFormatVersionSize + MaxChunkLengthFieldSize + CRCSize MaxHeadChunkMetaSize = SeriesRefSize + 2*MintMaxtSize + ChunkEncodingSize + MaxChunkLengthFieldSize + CRCSize
// MinWriteBufferSize is the minimum write buffer size allowed. // MinWriteBufferSize is the minimum write buffer size allowed.
MinWriteBufferSize = 64 * 1024 // 64KB. MinWriteBufferSize = 64 * 1024 // 64KB.
// MaxWriteBufferSize is the maximum write buffer size allowed. // MaxWriteBufferSize is the maximum write buffer size allowed.
@ -113,7 +113,7 @@ func (f *chunkPos) getNextChunkRef(chk chunkenc.Chunk) (chkRef ChunkDiskMapperRe
chkLen := uint64(len(chk.Bytes())) chkLen := uint64(len(chk.Bytes()))
bytesToWrite := f.bytesToWriteForChunk(chkLen) bytesToWrite := f.bytesToWriteForChunk(chkLen)
if f.shouldCutNewFile(chkLen) { if f.shouldCutNewFile(bytesToWrite) {
f.toNewFile() f.toNewFile()
f.cutFile = false f.cutFile = false
cutFile = true cutFile = true
@ -144,14 +144,14 @@ func (f *chunkPos) initSeq(seq uint64) {
} }
// shouldCutNewFile returns whether a new file should be cut based on the file size. // shouldCutNewFile returns whether a new file should be cut based on the file size.
// The read or write lock on chunkPos must be held when calling this. // Not thread safe, a lock must be held when calling this.
func (f *chunkPos) shouldCutNewFile(chunkSize uint64) bool { func (f *chunkPos) shouldCutNewFile(bytesToWrite uint64) bool {
if f.cutFile { if f.cutFile {
return true return true
} }
return f.offset == 0 || // First head chunk file. return f.offset == 0 || // First head chunk file.
f.offset+chunkSize+MaxHeadChunkMetaSize > MaxHeadChunkFileSize // Exceeds the max head chunk file size. f.offset+bytesToWrite > MaxHeadChunkFileSize // Exceeds the max head chunk file size.
} }
// bytesToWriteForChunk returns the number of bytes that will need to be written for the given chunk size, // bytesToWriteForChunk returns the number of bytes that will need to be written for the given chunk size,

Loading…
Cancel
Save