mirror of https://github.com/prometheus/prometheus
Use separate lock for series creation
This uses the head block's own lock to only lock if new series were encountered. In the general append case we just need to hold apull/5805/head
parent
63e12807da
commit
300f4e2abf
|
@ -91,7 +91,7 @@ func (b *writeBenchmark) run(cmd *cobra.Command, args []string) {
|
||||||
|
|
||||||
dir := filepath.Join(b.outPath, "storage")
|
dir := filepath.Join(b.outPath, "storage")
|
||||||
|
|
||||||
st, err := tsdb.OpenPartitioned(dir, 8, nil, nil)
|
st, err := tsdb.OpenPartitioned(dir, 1, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
exitWithError(err)
|
exitWithError(err)
|
||||||
}
|
}
|
||||||
|
|
7
db.go
7
db.go
|
@ -339,8 +339,8 @@ func (db *DB) appendBatch(samples []hashedSample) error {
|
||||||
if len(samples) == 0 {
|
if len(samples) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
db.mtx.Lock()
|
db.mtx.RLock()
|
||||||
defer db.mtx.Unlock()
|
defer db.mtx.RUnlock()
|
||||||
|
|
||||||
head := db.heads[len(db.heads)-1]
|
head := db.heads[len(db.heads)-1]
|
||||||
|
|
||||||
|
@ -426,6 +426,9 @@ func (db *DB) reinit(dir string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *DB) compactable() []block {
|
func (db *DB) compactable() []block {
|
||||||
|
db.mtx.RLock()
|
||||||
|
defer db.mtx.RUnlock()
|
||||||
|
|
||||||
var blocks []block
|
var blocks []block
|
||||||
for _, pb := range db.persisted {
|
for _, pb := range db.persisted {
|
||||||
blocks = append([]block{pb}, blocks...)
|
blocks = append([]block{pb}, blocks...)
|
||||||
|
|
71
head.go
71
head.go
|
@ -5,6 +5,7 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/bradfitz/slice"
|
"github.com/bradfitz/slice"
|
||||||
|
@ -100,6 +101,9 @@ func (h *HeadBlock) stats() BlockStats { return h.bstats }
|
||||||
|
|
||||||
// Chunk returns the chunk for the reference number.
|
// Chunk returns the chunk for the reference number.
|
||||||
func (h *HeadBlock) Chunk(ref uint32) (chunks.Chunk, error) {
|
func (h *HeadBlock) Chunk(ref uint32) (chunks.Chunk, error) {
|
||||||
|
h.mtx.RLock()
|
||||||
|
defer h.mtx.RUnlock()
|
||||||
|
|
||||||
if int(ref) >= len(h.descs) {
|
if int(ref) >= len(h.descs) {
|
||||||
return nil, errNotFound
|
return nil, errNotFound
|
||||||
}
|
}
|
||||||
|
@ -107,16 +111,25 @@ func (h *HeadBlock) Chunk(ref uint32) (chunks.Chunk, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *HeadBlock) interval() (int64, int64) {
|
func (h *HeadBlock) interval() (int64, int64) {
|
||||||
|
h.mtx.RLock()
|
||||||
|
defer h.mtx.RUnlock()
|
||||||
|
|
||||||
return h.bstats.MinTime, h.bstats.MaxTime
|
return h.bstats.MinTime, h.bstats.MaxTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns statisitics about the indexed data.
|
// Stats returns statisitics about the indexed data.
|
||||||
func (h *HeadBlock) Stats() (BlockStats, error) {
|
func (h *HeadBlock) Stats() (BlockStats, error) {
|
||||||
|
h.mtx.RLock()
|
||||||
|
defer h.mtx.RUnlock()
|
||||||
|
|
||||||
return h.bstats, nil
|
return h.bstats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LabelValues returns the possible label values
|
// LabelValues returns the possible label values
|
||||||
func (h *HeadBlock) LabelValues(names ...string) (StringTuples, error) {
|
func (h *HeadBlock) LabelValues(names ...string) (StringTuples, error) {
|
||||||
|
h.mtx.RLock()
|
||||||
|
defer h.mtx.RUnlock()
|
||||||
|
|
||||||
if len(names) != 1 {
|
if len(names) != 1 {
|
||||||
return nil, errInvalidSize
|
return nil, errInvalidSize
|
||||||
}
|
}
|
||||||
|
@ -132,11 +145,17 @@ func (h *HeadBlock) LabelValues(names ...string) (StringTuples, error) {
|
||||||
|
|
||||||
// Postings returns the postings list iterator for the label pair.
|
// Postings returns the postings list iterator for the label pair.
|
||||||
func (h *HeadBlock) Postings(name, value string) (Postings, error) {
|
func (h *HeadBlock) Postings(name, value string) (Postings, error) {
|
||||||
|
h.mtx.RLock()
|
||||||
|
defer h.mtx.RUnlock()
|
||||||
|
|
||||||
return h.postings.get(term{name: name, value: value}), nil
|
return h.postings.get(term{name: name, value: value}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Series returns the series for the given reference.
|
// Series returns the series for the given reference.
|
||||||
func (h *HeadBlock) Series(ref uint32) (labels.Labels, []ChunkMeta, error) {
|
func (h *HeadBlock) Series(ref uint32) (labels.Labels, []ChunkMeta, error) {
|
||||||
|
h.mtx.RLock()
|
||||||
|
defer h.mtx.RUnlock()
|
||||||
|
|
||||||
if int(ref) >= len(h.descs) {
|
if int(ref) >= len(h.descs) {
|
||||||
return nil, nil, errNotFound
|
return nil, nil, errNotFound
|
||||||
}
|
}
|
||||||
|
@ -151,6 +170,9 @@ func (h *HeadBlock) Series(ref uint32) (labels.Labels, []ChunkMeta, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *HeadBlock) LabelIndices() ([][]string, error) {
|
func (h *HeadBlock) LabelIndices() ([][]string, error) {
|
||||||
|
h.mtx.RLock()
|
||||||
|
defer h.mtx.RUnlock()
|
||||||
|
|
||||||
res := [][]string{}
|
res := [][]string{}
|
||||||
|
|
||||||
for s := range h.values {
|
for s := range h.values {
|
||||||
|
@ -226,9 +248,11 @@ func (h *HeadBlock) appendBatch(samples []hashedSample) error {
|
||||||
// ones we haven't seen before.
|
// ones we haven't seen before.
|
||||||
var (
|
var (
|
||||||
newSeries []labels.Labels
|
newSeries []labels.Labels
|
||||||
|
newSamples []*hashedSample
|
||||||
newHashes []uint64
|
newHashes []uint64
|
||||||
uniqueHashes = map[uint64]uint32{}
|
uniqueHashes = map[uint64]uint32{}
|
||||||
)
|
)
|
||||||
|
h.mtx.RLock()
|
||||||
|
|
||||||
for i := range samples {
|
for i := range samples {
|
||||||
s := &samples[i]
|
s := &samples[i]
|
||||||
|
@ -254,13 +278,16 @@ func (h *HeadBlock) appendBatch(samples []hashedSample) error {
|
||||||
s.ref = ref
|
s.ref = ref
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
s.ref = uint32(len(h.descs) + len(newSeries))
|
s.ref = uint32(len(newSeries))
|
||||||
uniqueHashes[s.hash] = s.ref
|
uniqueHashes[s.hash] = s.ref
|
||||||
|
|
||||||
newSeries = append(newSeries, s.labels)
|
newSeries = append(newSeries, s.labels)
|
||||||
newHashes = append(newHashes, s.hash)
|
newHashes = append(newHashes, s.hash)
|
||||||
|
newSamples = append(newSamples, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
h.mtx.RUnlock()
|
||||||
|
|
||||||
// Write all new series and samples to the WAL and add it to the
|
// Write all new series and samples to the WAL and add it to the
|
||||||
// in-mem database on success.
|
// in-mem database on success.
|
||||||
if err := h.wal.Log(newSeries, samples); err != nil {
|
if err := h.wal.Log(newSeries, samples); err != nil {
|
||||||
|
@ -269,27 +296,55 @@ func (h *HeadBlock) appendBatch(samples []hashedSample) error {
|
||||||
|
|
||||||
// After the samples were successfully written to the WAL, there may
|
// After the samples were successfully written to the WAL, there may
|
||||||
// be no further failures.
|
// be no further failures.
|
||||||
for i, s := range newSeries {
|
if len(newSeries) > 0 {
|
||||||
h.create(newHashes[i], s)
|
h.mtx.Lock()
|
||||||
|
|
||||||
|
base := len(h.descs)
|
||||||
|
|
||||||
|
for i, s := range newSeries {
|
||||||
|
h.create(newHashes[i], s)
|
||||||
|
}
|
||||||
|
for _, s := range newSamples {
|
||||||
|
s.ref = uint32(base) + s.ref
|
||||||
|
}
|
||||||
|
|
||||||
|
h.mtx.Unlock()
|
||||||
|
|
||||||
|
h.mtx.RLock()
|
||||||
|
defer h.mtx.RUnlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
total := len(samples)
|
||||||
|
|
||||||
for _, s := range samples {
|
for _, s := range samples {
|
||||||
cd := h.descs[s.ref]
|
cd := h.descs[s.ref]
|
||||||
// Skip duplicate samples.
|
// Skip duplicate samples.
|
||||||
if cd.lastTimestamp == s.t && cd.lastValue != s.v {
|
if cd.lastTimestamp == s.t && cd.lastValue != s.v {
|
||||||
|
total--
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
cd.append(s.t, s.v)
|
cd.append(s.t, s.v)
|
||||||
|
|
||||||
if s.t > h.bstats.MaxTime {
|
if t := h.bstats.MaxTime; s.t > t {
|
||||||
h.bstats.MaxTime = s.t
|
// h.bstats.MaxTime = s.t
|
||||||
|
for !atomic.CompareAndSwapInt64(&h.bstats.MaxTime, t, s.t) {
|
||||||
|
if t = h.bstats.MaxTime; s.t <= t {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if s.t < h.bstats.MinTime {
|
if t := h.bstats.MinTime; s.t < t {
|
||||||
h.bstats.MinTime = s.t
|
// h.bstats.MinTime = s.t
|
||||||
|
for !atomic.CompareAndSwapInt64(&h.bstats.MinTime, t, s.t) {
|
||||||
|
if t = h.bstats.MinTime; s.t >= t {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
h.bstats.SampleCount++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
atomic.AddUint64(&h.bstats.SampleCount, uint64(total))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
11
wal.go
11
wal.go
|
@ -7,6 +7,7 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/etcd/pkg/fileutil"
|
"github.com/coreos/etcd/pkg/fileutil"
|
||||||
|
@ -29,6 +30,8 @@ const (
|
||||||
// WAL is a write ahead log for series data. It can only be written to.
|
// WAL is a write ahead log for series data. It can only be written to.
|
||||||
// Use WALReader to read back from a write ahead log.
|
// Use WALReader to read back from a write ahead log.
|
||||||
type WAL struct {
|
type WAL struct {
|
||||||
|
mtx sync.Mutex
|
||||||
|
|
||||||
f *fileutil.LockedFile
|
f *fileutil.LockedFile
|
||||||
enc *walEncoder
|
enc *walEncoder
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
|
@ -108,6 +111,9 @@ func (w *WAL) ReadAll(h *walHandler) error {
|
||||||
|
|
||||||
// Log writes a batch of new series labels and samples to the log.
|
// Log writes a batch of new series labels and samples to the log.
|
||||||
func (w *WAL) Log(series []labels.Labels, samples []hashedSample) error {
|
func (w *WAL) Log(series []labels.Labels, samples []hashedSample) error {
|
||||||
|
w.mtx.Lock()
|
||||||
|
defer w.mtx.Unlock()
|
||||||
|
|
||||||
if err := w.enc.encodeSeries(series); err != nil {
|
if err := w.enc.encodeSeries(series); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -142,9 +148,12 @@ func (w *WAL) run(interval time.Duration) {
|
||||||
case <-w.stopc:
|
case <-w.stopc:
|
||||||
return
|
return
|
||||||
case <-tick:
|
case <-tick:
|
||||||
|
w.mtx.Lock()
|
||||||
|
|
||||||
if err := w.sync(); err != nil {
|
if err := w.sync(); err != nil {
|
||||||
w.logger.Log("msg", "sync failed", "err", err)
|
w.logger.Log("msg", "sync failed", "err", err)
|
||||||
}
|
}
|
||||||
|
w.mtx.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -172,7 +181,7 @@ const (
|
||||||
// walPageBytes is the alignment for flushing records to the backing Writer.
|
// walPageBytes is the alignment for flushing records to the backing Writer.
|
||||||
// It should be a multiple of the minimum sector size so that WAL can safely
|
// It should be a multiple of the minimum sector size so that WAL can safely
|
||||||
// distinguish between torn writes and ordinary data corruption.
|
// distinguish between torn writes and ordinary data corruption.
|
||||||
walPageBytes = 8 * minSectorSize
|
walPageBytes = 32 * minSectorSize
|
||||||
)
|
)
|
||||||
|
|
||||||
func newWALEncoder(f *os.File) (*walEncoder, error) {
|
func newWALEncoder(f *os.File) (*walEncoder, error) {
|
||||||
|
|
Loading…
Reference in New Issue