mirror of https://github.com/prometheus/prometheus
Merge pull request #6565 from prometheus/fixes-for-2.15.2
Cut release 2.15.2; cherry-picked windows fix and index with unsorted postings support.release-2.15 v2.15.2
commit
d9613e5c46
|
@ -4,6 +4,7 @@ version: 2.1
|
||||||
orbs:
|
orbs:
|
||||||
prometheus: prometheus/prometheus@0.3.0
|
prometheus: prometheus/prometheus@0.3.0
|
||||||
go: circleci/go@0.2.0
|
go: circleci/go@0.2.0
|
||||||
|
win: circleci/windows@2.3.0
|
||||||
|
|
||||||
executors:
|
executors:
|
||||||
# Whenever the Go version is updated here, .promu.yml
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
|
@ -49,6 +50,13 @@ jobs:
|
||||||
key: v1-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
|
key: v1-npm-deps-{{ checksum "web/ui/react-app/yarn.lock" }}
|
||||||
paths:
|
paths:
|
||||||
- web/ui/react-app/node_modules
|
- web/ui/react-app/node_modules
|
||||||
|
test_windows:
|
||||||
|
executor: win/default
|
||||||
|
working_directory: /go/src/github.com/prometheus/prometheus
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
# TSDB is where the most risk is Windows wise, so only test there for now.
|
||||||
|
- run: go test ./tsdb/...
|
||||||
fuzzit_regression:
|
fuzzit_regression:
|
||||||
executor: fuzzit
|
executor: fuzzit
|
||||||
working_directory: /go/src/github.com/prometheus/prometheus
|
working_directory: /go/src/github.com/prometheus/prometheus
|
||||||
|
@ -78,6 +86,10 @@ workflows:
|
||||||
filters:
|
filters:
|
||||||
tags:
|
tags:
|
||||||
only: /.*/
|
only: /.*/
|
||||||
|
- test_windows:
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only: /.*/
|
||||||
- fuzzit_regression:
|
- fuzzit_regression:
|
||||||
filters:
|
filters:
|
||||||
tags:
|
tags:
|
||||||
|
|
|
@ -1,6 +1,11 @@
|
||||||
|
## 2.15.2 / 2020-01-06
|
||||||
|
|
||||||
|
* [BUGFIX] TSDB: Fixed support for TSDB blocks built with Prometheus before 2.1.0. #6564
|
||||||
|
* [BUGFIX] TSDB: Fixed block compaction issues on Windows. #6547
|
||||||
|
|
||||||
## 2.15.1 / 2019-12-25
|
## 2.15.1 / 2019-12-25
|
||||||
|
|
||||||
* [BUGFIX] Fixed race on concurrent queries against same data. #6512
|
* [BUGFIX] TSDB: Fixed race on concurrent queries against same data. #6512
|
||||||
|
|
||||||
## 2.15.0 / 2019-12-23
|
## 2.15.0 / 2019-12-23
|
||||||
|
|
||||||
|
|
|
@ -197,9 +197,11 @@ func TestCorruptedChunk(t *testing.T) {
|
||||||
testutil.Equals(t, test.openErr.Error(), err.Error())
|
testutil.Equals(t, test.openErr.Error(), err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
defer func() { testutil.Ok(t, b.Close()) }()
|
||||||
|
|
||||||
querier, err := NewBlockQuerier(b, 0, 1)
|
querier, err := NewBlockQuerier(b, 0, 1)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
defer func() { testutil.Ok(t, querier.Close()) }()
|
||||||
set, err := querier.Select(labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
set, err := querier.Select(labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
|
||||||
|
@ -265,16 +267,20 @@ func TestBlockSize(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadIndexFormatV1(t *testing.T) {
|
func TestReadIndexFormatV1(t *testing.T) {
|
||||||
/* The block here was produced at commit
|
/* The block here was produced at the commit
|
||||||
07ef80820ef1250db82f9544f3fcf7f0f63ccee0 with:
|
706602daed1487f7849990678b4ece4599745905 used in 2.0.0 with:
|
||||||
db, _ := Open("v1db", nil, nil, nil)
|
db, _ := Open("v1db", nil, nil, nil)
|
||||||
app := db.Appender()
|
app := db.Appender()
|
||||||
app.Add(labels.FromStrings("foo", "bar"), 1, 2)
|
app.Add(labels.FromStrings("foo", "bar"), 1, 2)
|
||||||
app.Add(labels.FromStrings("foo", "baz"), 3, 4)
|
app.Add(labels.FromStrings("foo", "baz"), 3, 4)
|
||||||
app.Add(labels.FromStrings("foo", "meh"), 1000*3600*4, 4) // Not in the block.
|
app.Add(labels.FromStrings("foo", "meh"), 1000*3600*4, 4) // Not in the block.
|
||||||
app.Commit()
|
// Make sure we've enough values for the lack of sorting of postings offsets to show up.
|
||||||
db.compact()
|
for i := 0; i < 100; i++ {
|
||||||
db.Close()
|
app.Add(labels.FromStrings("bar", strconv.FormatInt(int64(i), 10)), 0, 0)
|
||||||
|
}
|
||||||
|
app.Commit()
|
||||||
|
db.compact()
|
||||||
|
db.Close()
|
||||||
*/
|
*/
|
||||||
|
|
||||||
blockDir := filepath.Join("testdata", "index_format_v1")
|
blockDir := filepath.Join("testdata", "index_format_v1")
|
||||||
|
@ -288,7 +294,7 @@ func TestReadIndexFormatV1(t *testing.T) {
|
||||||
|
|
||||||
q, err = NewBlockQuerier(block, 0, 1000)
|
q, err = NewBlockQuerier(block, 0, 1000)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
testutil.Equals(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.$")),
|
testutil.Equals(t, query(t, q, labels.MustNewMatcher(labels.MatchNotRegexp, "foo", "^.?$")),
|
||||||
map[string][]tsdbutil.Sample{
|
map[string][]tsdbutil.Sample{
|
||||||
`{foo="bar"}`: []tsdbutil.Sample{sample{t: 1, v: 2}},
|
`{foo="bar"}`: []tsdbutil.Sample{sample{t: 1, v: 2}},
|
||||||
`{foo="baz"}`: []tsdbutil.Sample{sample{t: 3, v: 4}},
|
`{foo="baz"}`: []tsdbutil.Sample{sample{t: 3, v: 4}},
|
||||||
|
|
|
@ -2655,6 +2655,7 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) {
|
||||||
// Check the content of the chunks.
|
// Check the content of the chunks.
|
||||||
r, err := chunks.NewDirReader(tempDir, nil)
|
r, err := chunks.NewDirReader(tempDir, nil)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
defer func() { testutil.Ok(t, r.Close()) }()
|
||||||
|
|
||||||
for _, chks := range test.chks {
|
for _, chks := range test.chks {
|
||||||
for _, chkExp := range chks {
|
for _, chkExp := range chks {
|
||||||
|
@ -2705,4 +2706,5 @@ func TestChunkReader_ConcurrentReads(t *testing.T) {
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
testutil.Ok(t, r.Close())
|
||||||
}
|
}
|
||||||
|
|
|
@ -511,11 +511,11 @@ func (w *Writer) finishSymbols() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
sf, err := fileutil.OpenMmapFile(w.f.name)
|
||||||
w.symbolFile, err = fileutil.OpenMmapFile(w.f.name)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
w.symbolFile = sf
|
||||||
hash := crc32.Checksum(w.symbolFile.Bytes()[w.toc.Symbols+4:hashPos], castagnoliTable)
|
hash := crc32.Checksum(w.symbolFile.Bytes()[w.toc.Symbols+4:hashPos], castagnoliTable)
|
||||||
w.buf1.Reset()
|
w.buf1.Reset()
|
||||||
w.buf1.PutBE32(hash)
|
w.buf1.PutBE32(hash)
|
||||||
|
@ -700,7 +700,11 @@ func (w *Writer) writePostingsOffsetTable() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer func() {
|
||||||
|
if f != nil {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.fPO.pos))
|
d := encoding.NewDecbufRaw(realByteSlice(f.Bytes()), int(w.fPO.pos))
|
||||||
cnt := w.cntPO
|
cnt := w.cntPO
|
||||||
for d.Err() == nil && cnt > 0 {
|
for d.Err() == nil && cnt > 0 {
|
||||||
|
@ -720,6 +724,10 @@ func (w *Writer) writePostingsOffsetTable() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup temporary file.
|
// Cleanup temporary file.
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f = nil
|
||||||
if err := w.fPO.close(); err != nil {
|
if err := w.fPO.close(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -962,9 +970,9 @@ type labelIndexHashEntry struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Writer) Close() error {
|
func (w *Writer) Close() error {
|
||||||
if err := w.ensureStage(idxStageDone); err != nil {
|
// Even if this fails, we need to close all the files.
|
||||||
return err
|
ensureErr := w.ensureStage(idxStageDone)
|
||||||
}
|
|
||||||
if w.symbolFile != nil {
|
if w.symbolFile != nil {
|
||||||
if err := w.symbolFile.Close(); err != nil {
|
if err := w.symbolFile.Close(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -980,7 +988,10 @@ func (w *Writer) Close() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return w.f.close()
|
if err := w.f.close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return ensureErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// StringTuples provides access to a sorted list of string tuples.
|
// StringTuples provides access to a sorted list of string tuples.
|
||||||
|
@ -1013,6 +1024,8 @@ type Reader struct {
|
||||||
// Map of LabelName to a list of some LabelValues's position in the offset table.
|
// Map of LabelName to a list of some LabelValues's position in the offset table.
|
||||||
// The first and last values for each name are always present.
|
// The first and last values for each name are always present.
|
||||||
postings map[string][]postingOffset
|
postings map[string][]postingOffset
|
||||||
|
// For the v1 format, labelname -> labelvalue -> offset.
|
||||||
|
postingsV1 map[string]map[string]uint64
|
||||||
|
|
||||||
symbols *Symbols
|
symbols *Symbols
|
||||||
nameSymbols map[uint32]string // Cache of the label name symbol lookups,
|
nameSymbols map[uint32]string // Cache of the label name symbol lookups,
|
||||||
|
@ -1102,45 +1115,64 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) {
|
||||||
return nil, errors.Wrap(err, "read symbols")
|
return nil, errors.Wrap(err, "read symbols")
|
||||||
}
|
}
|
||||||
|
|
||||||
var lastKey []string
|
if r.version == FormatV1 {
|
||||||
lastOff := 0
|
// Earlier V1 formats don't have a sorted postings offset table, so
|
||||||
valueCount := 0
|
// load the whole offset table into memory.
|
||||||
// For the postings offset table we keep every label name but only every nth
|
r.postingsV1 = map[string]map[string]uint64{}
|
||||||
// label value (plus the first and last one), to save memory.
|
if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, off uint64, _ int) error {
|
||||||
if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, _ uint64, off int) error {
|
if len(key) != 2 {
|
||||||
if len(key) != 2 {
|
return errors.Errorf("unexpected key length for posting table %d", len(key))
|
||||||
return errors.Errorf("unexpected key length for posting table %d", len(key))
|
|
||||||
}
|
|
||||||
if _, ok := r.postings[key[0]]; !ok {
|
|
||||||
// Next label name.
|
|
||||||
r.postings[key[0]] = []postingOffset{}
|
|
||||||
if lastKey != nil {
|
|
||||||
// Always include last value for each label name.
|
|
||||||
r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff})
|
|
||||||
}
|
}
|
||||||
lastKey = nil
|
if _, ok := r.postingsV1[key[0]]; !ok {
|
||||||
valueCount = 0
|
r.postingsV1[key[0]] = map[string]uint64{}
|
||||||
|
r.postings[key[0]] = nil // Used to get a list of labelnames in places.
|
||||||
|
}
|
||||||
|
r.postingsV1[key[0]][key[1]] = off
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "read postings table")
|
||||||
}
|
}
|
||||||
if valueCount%32 == 0 {
|
} else {
|
||||||
r.postings[key[0]] = append(r.postings[key[0]], postingOffset{value: key[1], off: off})
|
var lastKey []string
|
||||||
lastKey = nil
|
lastOff := 0
|
||||||
} else {
|
valueCount := 0
|
||||||
lastKey = key
|
// For the postings offset table we keep every label name but only every nth
|
||||||
lastOff = off
|
// label value (plus the first and last one), to save memory.
|
||||||
|
if err := ReadOffsetTable(r.b, r.toc.PostingsTable, func(key []string, _ uint64, off int) error {
|
||||||
|
if len(key) != 2 {
|
||||||
|
return errors.Errorf("unexpected key length for posting table %d", len(key))
|
||||||
|
}
|
||||||
|
if _, ok := r.postings[key[0]]; !ok {
|
||||||
|
// Next label name.
|
||||||
|
r.postings[key[0]] = []postingOffset{}
|
||||||
|
if lastKey != nil {
|
||||||
|
// Always include last value for each label name.
|
||||||
|
r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff})
|
||||||
|
}
|
||||||
|
lastKey = nil
|
||||||
|
valueCount = 0
|
||||||
|
}
|
||||||
|
if valueCount%32 == 0 {
|
||||||
|
r.postings[key[0]] = append(r.postings[key[0]], postingOffset{value: key[1], off: off})
|
||||||
|
lastKey = nil
|
||||||
|
} else {
|
||||||
|
lastKey = key
|
||||||
|
lastOff = off
|
||||||
|
}
|
||||||
|
valueCount++
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "read postings table")
|
||||||
|
}
|
||||||
|
if lastKey != nil {
|
||||||
|
r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff})
|
||||||
|
}
|
||||||
|
// Trim any extra space in the slices.
|
||||||
|
for k, v := range r.postings {
|
||||||
|
l := make([]postingOffset, len(v))
|
||||||
|
copy(l, v)
|
||||||
|
r.postings[k] = l
|
||||||
}
|
}
|
||||||
valueCount++
|
|
||||||
return nil
|
|
||||||
}); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "read postings table")
|
|
||||||
}
|
|
||||||
if lastKey != nil {
|
|
||||||
r.postings[lastKey[0]] = append(r.postings[lastKey[0]], postingOffset{value: lastKey[1], off: lastOff})
|
|
||||||
}
|
|
||||||
// Trim any extra space in the slices.
|
|
||||||
for k, v := range r.postings {
|
|
||||||
l := make([]postingOffset, len(v))
|
|
||||||
copy(l, v)
|
|
||||||
r.postings[k] = l
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r.nameSymbols = make(map[uint32]string, len(r.postings))
|
r.nameSymbols = make(map[uint32]string, len(r.postings))
|
||||||
|
@ -1397,6 +1429,19 @@ func (r *Reader) LabelValues(names ...string) (StringTuples, error) {
|
||||||
if len(names) != 1 {
|
if len(names) != 1 {
|
||||||
return nil, errors.Errorf("only one label name supported")
|
return nil, errors.Errorf("only one label name supported")
|
||||||
}
|
}
|
||||||
|
if r.version == FormatV1 {
|
||||||
|
e, ok := r.postingsV1[names[0]]
|
||||||
|
if !ok {
|
||||||
|
return emptyStringTuples{}, nil
|
||||||
|
}
|
||||||
|
values := make([]string, 0, len(e))
|
||||||
|
for k := range e {
|
||||||
|
values = append(values, k)
|
||||||
|
}
|
||||||
|
sort.Strings(values)
|
||||||
|
return NewStringTuples(values, 1)
|
||||||
|
|
||||||
|
}
|
||||||
e, ok := r.postings[names[0]]
|
e, ok := r.postings[names[0]]
|
||||||
if !ok {
|
if !ok {
|
||||||
return emptyStringTuples{}, nil
|
return emptyStringTuples{}, nil
|
||||||
|
@ -1456,6 +1501,28 @@ func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) Postings(name string, values ...string) (Postings, error) {
|
func (r *Reader) Postings(name string, values ...string) (Postings, error) {
|
||||||
|
if r.version == FormatV1 {
|
||||||
|
e, ok := r.postingsV1[name]
|
||||||
|
if !ok {
|
||||||
|
return EmptyPostings(), nil
|
||||||
|
}
|
||||||
|
res := make([]Postings, 0, len(values))
|
||||||
|
for _, v := range values {
|
||||||
|
postingsOff, ok := e[v]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Read from the postings table.
|
||||||
|
d := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable)
|
||||||
|
_, p, err := r.dec.Postings(d.Get())
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "decode postings")
|
||||||
|
}
|
||||||
|
res = append(res, p)
|
||||||
|
}
|
||||||
|
return Merge(res...), nil
|
||||||
|
}
|
||||||
|
|
||||||
e, ok := r.postings[name]
|
e, ok := r.postings[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
return EmptyPostings(), nil
|
return EmptyPostings(), nil
|
||||||
|
|
|
@ -280,6 +280,7 @@ func TestPostingsMany(t *testing.T) {
|
||||||
|
|
||||||
ir, err := NewFileReader(fn)
|
ir, err := NewFileReader(fn)
|
||||||
testutil.Ok(t, err)
|
testutil.Ok(t, err)
|
||||||
|
defer func() { testutil.Ok(t, ir.Close()) }()
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
in []string
|
in []string
|
||||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -1,17 +1,17 @@
|
||||||
{
|
{
|
||||||
"version": 1,
|
"version": 1,
|
||||||
"ulid": "01DVZX4CHY2EGZ6JQVS80AB9CF",
|
"ulid": "01DXXFZDYD1MQW6079WK0K6EDQ",
|
||||||
"minTime": 0,
|
"minTime": 0,
|
||||||
"maxTime": 7200000,
|
"maxTime": 7200000,
|
||||||
"stats": {
|
"stats": {
|
||||||
"numSamples": 2,
|
"numSamples": 102,
|
||||||
"numSeries": 2,
|
"numSeries": 102,
|
||||||
"numChunks": 2
|
"numChunks": 102
|
||||||
},
|
},
|
||||||
"compaction": {
|
"compaction": {
|
||||||
"level": 1,
|
"level": 1,
|
||||||
"sources": [
|
"sources": [
|
||||||
"01DVZX4CHY2EGZ6JQVS80AB9CF"
|
"01DXXFZDYD1MQW6079WK0K6EDQ"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -427,6 +427,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
testutil.Ok(t, w.Close())
|
||||||
|
|
||||||
// At this point we should have at least 6 segments, lets create a checkpoint dir of the first 5.
|
// At this point we should have at least 6 segments, lets create a checkpoint dir of the first 5.
|
||||||
checkpointDir := dir + "/wal/checkpoint.000004"
|
checkpointDir := dir + "/wal/checkpoint.000004"
|
||||||
|
|
Loading…
Reference in New Issue