Compact head block early

Let older head blocks be compacted once the newest once has samples at
50% of its total range. This allows the memory of the compacted blocks
to be released and garbage collected before a new head block gets
created. Thereby the number of head blocks is 1 or 2 instead of 2 or 3
and memory spikes are reduced.
pull/5805/head
Fabian Reinartz 8 years ago
parent a2948f3c5f
commit 3410559c1b

@ -60,6 +60,11 @@ type Block interface {
type headBlock interface {
Block
Appendable
// ActiveWriters returns the number of currently active appenders.
ActiveWriters() int
// HighTimestamp returns the highest currently inserted timestamp.
HighTimestamp() int64
}
// Snapshottable defines an entity that can be backedup online.
@ -71,9 +76,6 @@ type Snapshottable interface {
type Appendable interface {
// Appender returns a new Appender against an underlying store.
Appender() Appender
// Busy returns whether there are any currently active appenders.
Busy() bool
}
// Queryable defines an entity which provides a Querier.

19
db.go

@ -305,6 +305,15 @@ func (db *DB) retentionCutoff() (bool, error) {
return retentionCutoff(db.dir, mint)
}
// headFullness returns up to which fraction of a blocks time range samples
// were already inserted.
func headFullness(h headBlock) float64 {
m := h.Meta()
a := float64(h.HighTimestamp() - m.MinTime)
b := float64(m.MaxTime - m.MinTime)
return a / b
}
func (db *DB) compact() (changes bool, err error) {
db.cmtx.Lock()
defer db.cmtx.Unlock()
@ -319,12 +328,14 @@ func (db *DB) compact() (changes bool, err error) {
// returning the lock to not block Appenders.
// Selected blocks are semantically ensured to not be written to afterwards
// by appendable().
if len(db.heads) > 2 {
for _, h := range db.heads[:len(db.heads)-2] {
if len(db.heads) > 1 {
f := headFullness(db.heads[len(db.heads)-1])
for _, h := range db.heads[:len(db.heads)-1] {
// Blocks that won't be appendable when instantiating a new appender
// might still have active appenders on them.
// Abort at the first one we encounter.
if h.Busy() {
if h.ActiveWriters() > 0 || f < 0.5 {
break
}
singles = append(singles, h)
@ -849,6 +860,8 @@ func (db *DB) createHeadBlock(mint, maxt int64) (headBlock, error) {
return nil, err
}
db.logger.Log("msg", "created head block", "ulid", newHead.meta.ULID, "mint", mint, "maxt", maxt)
db.blocks = append(db.blocks, newHead) // TODO(fabxc): this is a race!
db.heads = append(db.heads, newHead)

@ -57,6 +57,7 @@ type HeadBlock struct {
wal WAL
activeWriters uint64
highTimestamp int64
closed bool
// descs holds all chunk descs for the head block. Each chunk implicitly
@ -389,9 +390,14 @@ func (h *HeadBlock) Appender() Appender {
return &headAppender{HeadBlock: h, samples: getHeadAppendBuffer()}
}
// Busy returns true if the block has open write transactions.
func (h *HeadBlock) Busy() bool {
return atomic.LoadUint64(&h.activeWriters) > 0
// ActiveWriters returns true if the block has open write transactions.
func (h *HeadBlock) ActiveWriters() int {
return int(atomic.LoadUint64(&h.activeWriters))
}
// HighTimestamp returns the highest inserted sample timestamp.
func (h *HeadBlock) HighTimestamp() int64 {
return atomic.LoadInt64(&h.highTimestamp)
}
var headPool = sync.Pool{}
@ -416,6 +422,7 @@ type headAppender struct {
newHashes map[uint64]uint64
samples []RefSample
highTimestamp int64
}
type hashedLabels struct {
@ -513,6 +520,10 @@ func (a *headAppender) AddFast(ref string, t int64, v float64) error {
}
}
if t > a.highTimestamp {
a.highTimestamp = t
}
a.samples = append(a.samples, RefSample{
Ref: refn,
T: t,
@ -593,6 +604,16 @@ func (a *headAppender) Commit() error {
atomic.AddUint64(&a.meta.Stats.NumSamples, total)
atomic.AddUint64(&a.meta.Stats.NumSeries, uint64(len(a.newSeries)))
for {
ht := a.HeadBlock.HighTimestamp()
if a.highTimestamp <= ht {
break
}
if atomic.CompareAndSwapInt64(&a.HeadBlock.highTimestamp, ht, a.highTimestamp) {
break
}
}
return nil
}

@ -38,7 +38,7 @@ type Querier interface {
Close() error
}
// Series represents a single time series.
// Series exposes a single time series.
type Series interface {
// Labels returns the complete set of labels identifying the series.
Labels() labels.Labels

Loading…
Cancel
Save