Browse Source

Reduce memSeries memory usage by decoupling metadata (#11152)

Metadata was added recently but doesn't seem to be used much, at least as far as I could identify.
Yet it's part of memSeries struct and so even when empty takes 48 bytes,
which is a lot given that without it memSeries requires 224 bytes.
This change turns it into a pointer on the struct, that get set only when metadata is actually set of given series.

Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>

Signed-off-by: Łukasz Mierzwa <l.mierzwa@gmail.com>
pull/11180/head
Łukasz Mierzwa 2 years ago committed by GitHub
parent
commit
3196c98bc2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 40
      tsdb/db_test.go
  2. 6
      tsdb/head.go
  3. 4
      tsdb/head_append.go
  4. 2
      tsdb/head_wal.go

40
tsdb/db_test.go

@ -1821,10 +1821,10 @@ func TestQuerierWithBoundaryChunks(t *testing.T) {
}
// TestInitializeHeadTimestamp ensures that the h.minTime is set properly.
// - no blocks no WAL: set to the time of the first appended sample
// - no blocks with WAL: set to the smallest sample from the WAL
// - with blocks no WAL: set to the last block maxT
// - with blocks with WAL: same as above
// - no blocks no WAL: set to the time of the first appended sample
// - no blocks with WAL: set to the smallest sample from the WAL
// - with blocks no WAL: set to the last block maxT
// - with blocks with WAL: same as above
func TestInitializeHeadTimestamp(t *testing.T) {
t.Run("clean", func(t *testing.T) {
dir := t.TempDir()
@ -2166,10 +2166,12 @@ func TestCorrectNumTombstones(t *testing.T) {
}
// TestBlockRanges checks the following use cases:
// - No samples can be added with timestamps lower than the last block maxt.
// - The compactor doesn't create overlapping blocks
// - No samples can be added with timestamps lower than the last block maxt.
// - The compactor doesn't create overlapping blocks
//
// even when the last blocks is not within the default boundaries.
// - Lower boundary is based on the smallest sample in the head and
// - Lower boundary is based on the smallest sample in the head and
//
// upper boundary is rounded to the configured block range.
//
// This ensures that a snapshot that includes the head and creates a block with a custom time range
@ -3697,10 +3699,10 @@ func TestMetadataAssertInMemoryData(t *testing.T) {
series2 := db.head.series.getByHash(s2.Hash(), s2)
series3 := db.head.series.getByHash(s3.Hash(), s3)
series4 := db.head.series.getByHash(s4.Hash(), s4)
require.Equal(t, series1.meta, m1)
require.Equal(t, series2.meta, m2)
require.Equal(t, series3.meta, m3)
require.Equal(t, series4.meta, metadata.Metadata{})
require.Equal(t, *series1.meta, m1)
require.Equal(t, *series2.meta, m2)
require.Equal(t, *series3.meta, m3)
require.Nil(t, series4.meta)
// Add a replicated metadata entry to the first series,
// a changed metadata entry to the second series,
@ -3718,10 +3720,10 @@ func TestMetadataAssertInMemoryData(t *testing.T) {
series2 = db.head.series.getByHash(s2.Hash(), s2)
series3 = db.head.series.getByHash(s3.Hash(), s3)
series4 = db.head.series.getByHash(s4.Hash(), s4)
require.Equal(t, series1.meta, m1)
require.Equal(t, series2.meta, m5)
require.Equal(t, series3.meta, m3)
require.Equal(t, series4.meta, m4)
require.Equal(t, *series1.meta, m1)
require.Equal(t, *series2.meta, m5)
require.Equal(t, *series3.meta, m3)
require.Equal(t, *series4.meta, m4)
require.NoError(t, db.Close())
@ -3736,8 +3738,8 @@ func TestMetadataAssertInMemoryData(t *testing.T) {
_, err = reopenDB.head.wal.Size()
require.NoError(t, err)
require.Equal(t, reopenDB.head.series.getByHash(s1.Hash(), s1).meta, m1)
require.Equal(t, reopenDB.head.series.getByHash(s2.Hash(), s2).meta, m5)
require.Equal(t, reopenDB.head.series.getByHash(s3.Hash(), s3).meta, m3)
require.Equal(t, reopenDB.head.series.getByHash(s4.Hash(), s4).meta, m4)
require.Equal(t, *reopenDB.head.series.getByHash(s1.Hash(), s1).meta, m1)
require.Equal(t, *reopenDB.head.series.getByHash(s2.Hash(), s2).meta, m5)
require.Equal(t, *reopenDB.head.series.getByHash(s3.Hash(), s3).meta, m3)
require.Equal(t, *reopenDB.head.series.getByHash(s4.Hash(), s4).meta, m4)
}

6
tsdb/head.go

@ -1516,7 +1516,7 @@ type memSeries struct {
ref chunks.HeadSeriesRef
lset labels.Labels
meta metadata.Metadata
meta *metadata.Metadata
// Immutable chunks on disk that have not yet gone into a block, in order of ascending time stamps.
// When compaction runs, chunks get moved into a block and all pointers are shifted like so:
@ -1540,8 +1540,6 @@ type memSeries struct {
// Even the most compact encoding of a sample takes 2 bits, so the last byte is not contended.
sampleBuf [4]sample
pendingCommit bool // Whether there are samples waiting to be committed to this series.
// Current appender for the head chunk. Set when a new head chunk is cut.
// It is nil only if headChunk is nil. E.g. if there was an appender that created a new series, but rolled back the commit
// (the first sample would create a headChunk, hence appender, but rollback skipped it while the Append() call would create a series).
@ -1551,6 +1549,8 @@ type memSeries struct {
// txs is nil if isolation is disabled.
txs *txRing
pendingCommit bool // Whether there are samples waiting to be committed to this series.
}
func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, chunkRange int64, memChunkPool *sync.Pool, isolationDisabled bool) *memSeries {

4
tsdb/head_append.go

@ -399,7 +399,7 @@ func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels,
}
s.RLock()
hasNewMetadata := s.meta != meta
hasNewMetadata := s.meta == nil || *s.meta != meta
s.RUnlock()
if hasNewMetadata {
@ -540,7 +540,7 @@ func (a *headAppender) Commit() (err error) {
for i, m := range a.metadata {
series = a.metadataSeries[i]
series.Lock()
series.meta = metadata.Metadata{Type: record.ToTextparseMetricType(m.Type), Unit: m.Unit, Help: m.Help}
series.meta = &metadata.Metadata{Type: record.ToTextparseMetricType(m.Type), Unit: m.Unit, Help: m.Help}
series.Unlock()
}

2
tsdb/head_wal.go

@ -333,7 +333,7 @@ Outer:
unknownMetadataRefs.Inc()
continue
}
s.meta = metadata.Metadata{
s.meta = &metadata.Metadata{
Type: record.ToTextparseMetricType(m.Type),
Unit: m.Unit,
Help: m.Help,

Loading…
Cancel
Save