2018-05-17 13:02:47 +00:00
|
|
|
// Copyright 2018 The Prometheus Authors
|
|
|
|
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2022-10-10 15:08:46 +00:00
|
|
|
package wlog
|
2018-05-17 13:02:47 +00:00
|
|
|
|
|
|
|
import (
|
2019-01-07 08:43:33 +00:00
|
|
|
"fmt"
|
2018-05-17 13:02:47 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2022-07-19 08:58:52 +00:00
|
|
|
"sort"
|
2019-01-07 08:43:33 +00:00
|
|
|
"strings"
|
2018-05-17 13:02:47 +00:00
|
|
|
"testing"
|
|
|
|
|
2021-06-11 16:17:59 +00:00
|
|
|
"github.com/go-kit/log"
|
2019-01-07 08:43:33 +00:00
|
|
|
"github.com/pkg/errors"
|
2020-10-29 09:43:23 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2020-10-22 09:00:08 +00:00
|
|
|
|
2022-08-29 12:08:36 +00:00
|
|
|
"github.com/prometheus/prometheus/model/histogram"
|
2021-11-08 14:23:17 +00:00
|
|
|
"github.com/prometheus/prometheus/model/labels"
|
2021-11-06 10:10:04 +00:00
|
|
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
2019-09-19 09:15:41 +00:00
|
|
|
"github.com/prometheus/prometheus/tsdb/record"
|
2018-05-17 13:02:47 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestLastCheckpoint(t *testing.T) {
|
2021-11-01 06:58:18 +00:00
|
|
|
dir := t.TempDir()
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2021-11-01 06:58:18 +00:00
|
|
|
_, _, err := LastCheckpoint(dir)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.Equal(t, record.ErrNotFound, err)
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2021-10-22 08:06:44 +00:00
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.0000"), 0o777))
|
2019-01-02 16:48:42 +00:00
|
|
|
s, k, err := LastCheckpoint(dir)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s)
|
|
|
|
require.Equal(t, 0, k)
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2021-10-22 08:06:44 +00:00
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.xyz"), 0o777))
|
2018-05-17 13:02:47 +00:00
|
|
|
s, k, err = LastCheckpoint(dir)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, filepath.Join(dir, "checkpoint.0000"), s)
|
|
|
|
require.Equal(t, 0, k)
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2021-10-22 08:06:44 +00:00
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1"), 0o777))
|
2018-05-17 13:02:47 +00:00
|
|
|
s, k, err = LastCheckpoint(dir)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, filepath.Join(dir, "checkpoint.1"), s)
|
|
|
|
require.Equal(t, 1, k)
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2021-10-22 08:06:44 +00:00
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.1000"), 0o777))
|
2018-05-17 13:02:47 +00:00
|
|
|
s, k, err = LastCheckpoint(dir)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, filepath.Join(dir, "checkpoint.1000"), s)
|
|
|
|
require.Equal(t, 1000, k)
|
2020-03-18 15:10:41 +00:00
|
|
|
|
2021-10-22 08:06:44 +00:00
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0o777))
|
2020-03-18 15:10:41 +00:00
|
|
|
s, k, err = LastCheckpoint(dir)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, filepath.Join(dir, "checkpoint.99999999"), s)
|
|
|
|
require.Equal(t, 99999999, k)
|
2020-03-18 15:10:41 +00:00
|
|
|
|
2021-10-22 08:06:44 +00:00
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0o777))
|
2020-03-18 15:10:41 +00:00
|
|
|
s, k, err = LastCheckpoint(dir)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, filepath.Join(dir, "checkpoint.100000000"), s)
|
|
|
|
require.Equal(t, 100000000, k)
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDeleteCheckpoints(t *testing.T) {
|
2021-11-01 06:58:18 +00:00
|
|
|
dir := t.TempDir()
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, DeleteCheckpoints(dir, 0))
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2021-10-22 08:06:44 +00:00
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.00"), 0o777))
|
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.01"), 0o777))
|
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.02"), 0o777))
|
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.03"), 0o777))
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, DeleteCheckpoints(dir, 2))
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2022-04-27 09:24:36 +00:00
|
|
|
files, err := os.ReadDir(dir)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
2020-04-06 13:34:20 +00:00
|
|
|
fns := []string{}
|
|
|
|
for _, f := range files {
|
|
|
|
fns = append(fns, f.Name())
|
|
|
|
}
|
2020-10-29 09:43:23 +00:00
|
|
|
require.Equal(t, []string{"checkpoint.02", "checkpoint.03"}, fns)
|
2020-03-18 15:10:41 +00:00
|
|
|
|
2021-10-22 08:06:44 +00:00
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.99999999"), 0o777))
|
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000000"), 0o777))
|
|
|
|
require.NoError(t, os.MkdirAll(filepath.Join(dir, "checkpoint.100000001"), 0o777))
|
2020-03-18 15:10:41 +00:00
|
|
|
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, DeleteCheckpoints(dir, 100000000))
|
2020-03-18 15:10:41 +00:00
|
|
|
|
2022-04-27 09:24:36 +00:00
|
|
|
files, err = os.ReadDir(dir)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
2020-04-06 13:34:20 +00:00
|
|
|
fns = []string{}
|
|
|
|
for _, f := range files {
|
|
|
|
fns = append(fns, f.Name())
|
|
|
|
}
|
2020-10-29 09:43:23 +00:00
|
|
|
require.Equal(t, []string{"checkpoint.100000000", "checkpoint.100000001"}, fns)
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestCheckpoint(t *testing.T) {
|
2022-08-29 12:08:36 +00:00
|
|
|
makeHistogram := func(i int) *histogram.Histogram {
|
|
|
|
return &histogram.Histogram{
|
|
|
|
Count: 5 + uint64(i*4),
|
|
|
|
ZeroCount: 2 + uint64(i),
|
|
|
|
ZeroThreshold: 0.001,
|
|
|
|
Sum: 18.4 * float64(i+1),
|
|
|
|
Schema: 1,
|
|
|
|
PositiveSpans: []histogram.Span{
|
|
|
|
{Offset: 0, Length: 2},
|
|
|
|
{Offset: 1, Length: 2},
|
|
|
|
},
|
|
|
|
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-11 12:57:57 +00:00
|
|
|
for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} {
|
|
|
|
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
|
2021-11-01 06:58:18 +00:00
|
|
|
dir := t.TempDir()
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2019-09-19 09:15:41 +00:00
|
|
|
var enc record.Encoder
|
2019-06-19 13:46:24 +00:00
|
|
|
// Create a dummy segment to bump the initial number.
|
2019-09-19 09:15:41 +00:00
|
|
|
seg, err := CreateSegment(dir, 100)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, seg.Close())
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2019-06-19 13:46:24 +00:00
|
|
|
// Manually create checkpoint for 99 and earlier.
|
2019-09-19 09:15:41 +00:00
|
|
|
w, err := New(nil, nil, filepath.Join(dir, "checkpoint.0099"), compress)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2019-06-19 13:46:24 +00:00
|
|
|
// Add some data we expect to be around later.
|
2019-09-19 09:15:41 +00:00
|
|
|
err = w.Log(enc.Series([]record.RefSeries{
|
2019-06-19 13:46:24 +00:00
|
|
|
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},
|
|
|
|
{Ref: 1, Labels: labels.FromStrings("a", "b", "c", "1")},
|
|
|
|
}, nil))
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
2020-10-05 09:09:59 +00:00
|
|
|
// Log an unknown record, that might have come from a future Prometheus version.
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, w.Log([]byte{255}))
|
|
|
|
require.NoError(t, w.Close())
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2019-06-19 13:46:24 +00:00
|
|
|
// Start a WAL and write records to it as usual.
|
2019-09-19 09:15:41 +00:00
|
|
|
w, err = NewSize(nil, nil, dir, 64*1024, compress)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2022-08-29 12:08:36 +00:00
|
|
|
samplesInWAL, histogramsInWAL := 0, 0
|
2019-06-19 13:46:24 +00:00
|
|
|
var last int64
|
|
|
|
for i := 0; ; i++ {
|
2020-09-01 09:16:57 +00:00
|
|
|
_, n, err := Segments(w.Dir())
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
2019-06-19 13:46:24 +00:00
|
|
|
if n >= 106 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// Write some series initially.
|
|
|
|
if i == 0 {
|
2019-09-19 09:15:41 +00:00
|
|
|
b := enc.Series([]record.RefSeries{
|
2019-06-19 13:46:24 +00:00
|
|
|
{Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")},
|
|
|
|
{Ref: 3, Labels: labels.FromStrings("a", "b", "c", "3")},
|
|
|
|
{Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")},
|
|
|
|
{Ref: 5, Labels: labels.FromStrings("a", "b", "c", "5")},
|
|
|
|
}, nil)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, w.Log(b))
|
2022-07-19 08:58:52 +00:00
|
|
|
|
|
|
|
b = enc.Metadata([]record.RefMetadata{
|
|
|
|
{Ref: 2, Unit: "unit", Help: "help"},
|
|
|
|
{Ref: 3, Unit: "unit", Help: "help"},
|
|
|
|
{Ref: 4, Unit: "unit", Help: "help"},
|
|
|
|
{Ref: 5, Unit: "unit", Help: "help"},
|
|
|
|
}, nil)
|
|
|
|
require.NoError(t, w.Log(b))
|
2019-06-19 13:46:24 +00:00
|
|
|
}
|
|
|
|
// Write samples until the WAL has enough segments.
|
|
|
|
// Make them have drifting timestamps within a record to see that they
|
|
|
|
// get filtered properly.
|
2019-09-19 09:15:41 +00:00
|
|
|
b := enc.Samples([]record.RefSample{
|
2019-06-19 13:46:24 +00:00
|
|
|
{Ref: 0, T: last, V: float64(i)},
|
|
|
|
{Ref: 1, T: last + 10000, V: float64(i)},
|
|
|
|
{Ref: 2, T: last + 20000, V: float64(i)},
|
|
|
|
{Ref: 3, T: last + 30000, V: float64(i)},
|
|
|
|
}, nil)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, w.Log(b))
|
2022-08-29 12:08:36 +00:00
|
|
|
samplesInWAL += 4
|
|
|
|
h := makeHistogram(i)
|
|
|
|
b = enc.HistogramSamples([]record.RefHistogramSample{
|
|
|
|
{Ref: 0, T: last, H: h},
|
|
|
|
{Ref: 1, T: last + 10000, H: h},
|
|
|
|
{Ref: 2, T: last + 20000, H: h},
|
|
|
|
{Ref: 3, T: last + 30000, H: h},
|
|
|
|
}, nil)
|
|
|
|
require.NoError(t, w.Log(b))
|
|
|
|
histogramsInWAL += 4
|
2019-06-19 13:46:24 +00:00
|
|
|
|
2021-05-06 20:53:52 +00:00
|
|
|
b = enc.Exemplars([]record.RefExemplar{
|
|
|
|
{Ref: 1, T: last, V: float64(i), Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", i))},
|
|
|
|
}, nil)
|
|
|
|
require.NoError(t, w.Log(b))
|
|
|
|
|
2022-07-19 08:58:52 +00:00
|
|
|
// Write changing metadata for each series. In the end, only the latest
|
|
|
|
// version should end up in the checkpoint.
|
|
|
|
b = enc.Metadata([]record.RefMetadata{
|
|
|
|
{Ref: 0, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
|
|
|
|
{Ref: 1, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
|
|
|
|
{Ref: 2, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
|
|
|
|
{Ref: 3, Unit: fmt.Sprintf("%d", last), Help: fmt.Sprintf("%d", last)},
|
|
|
|
}, nil)
|
|
|
|
require.NoError(t, w.Log(b))
|
|
|
|
|
2019-06-19 13:46:24 +00:00
|
|
|
last += 100
|
|
|
|
}
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, w.Close())
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2021-11-06 10:10:04 +00:00
|
|
|
_, err = Checkpoint(log.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool {
|
2019-06-19 13:46:24 +00:00
|
|
|
return x%2 == 0
|
|
|
|
}, last/2)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, w.Truncate(107))
|
|
|
|
require.NoError(t, DeleteCheckpoints(w.Dir(), 106))
|
2018-05-17 13:02:47 +00:00
|
|
|
|
2019-06-19 13:46:24 +00:00
|
|
|
// Only the new checkpoint should be left.
|
2022-04-27 09:24:36 +00:00
|
|
|
files, err := os.ReadDir(dir)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, len(files))
|
|
|
|
require.Equal(t, "checkpoint.00000106", files[0].Name())
|
2019-06-19 13:46:24 +00:00
|
|
|
|
2020-03-18 15:10:41 +00:00
|
|
|
sr, err := NewSegmentsReader(filepath.Join(dir, "checkpoint.00000106"))
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
2019-06-19 13:46:24 +00:00
|
|
|
defer sr.Close()
|
|
|
|
|
2019-09-19 09:15:41 +00:00
|
|
|
var dec record.Decoder
|
|
|
|
var series []record.RefSeries
|
2022-07-19 08:58:52 +00:00
|
|
|
var metadata []record.RefMetadata
|
2019-09-19 09:15:41 +00:00
|
|
|
r := NewReader(sr)
|
2019-06-19 13:46:24 +00:00
|
|
|
|
2022-08-29 12:08:36 +00:00
|
|
|
samplesInCheckpoint, histogramsInCheckpoint := 0, 0
|
2019-06-19 13:46:24 +00:00
|
|
|
for r.Next() {
|
|
|
|
rec := r.Record()
|
|
|
|
|
|
|
|
switch dec.Type(rec) {
|
2019-09-19 09:15:41 +00:00
|
|
|
case record.Series:
|
2019-06-19 13:46:24 +00:00
|
|
|
series, err = dec.Series(rec, series)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
2019-09-19 09:15:41 +00:00
|
|
|
case record.Samples:
|
2019-06-19 13:46:24 +00:00
|
|
|
samples, err := dec.Samples(rec, nil)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
2019-06-19 13:46:24 +00:00
|
|
|
for _, s := range samples {
|
2020-10-29 09:43:23 +00:00
|
|
|
require.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp")
|
2019-06-19 13:46:24 +00:00
|
|
|
}
|
2022-08-29 12:08:36 +00:00
|
|
|
samplesInCheckpoint += len(samples)
|
|
|
|
case record.HistogramSamples:
|
|
|
|
histograms, err := dec.HistogramSamples(rec, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
for _, h := range histograms {
|
|
|
|
require.GreaterOrEqual(t, h.T, last/2, "histogram with wrong timestamp")
|
|
|
|
}
|
|
|
|
histogramsInCheckpoint += len(histograms)
|
2021-05-06 20:53:52 +00:00
|
|
|
case record.Exemplars:
|
|
|
|
exemplars, err := dec.Exemplars(rec, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
for _, e := range exemplars {
|
|
|
|
require.GreaterOrEqual(t, e.T, last/2, "exemplar with wrong timestamp")
|
|
|
|
}
|
2022-07-19 08:58:52 +00:00
|
|
|
case record.Metadata:
|
|
|
|
metadata, err = dec.Metadata(rec, metadata)
|
|
|
|
require.NoError(t, err)
|
2019-06-19 13:46:24 +00:00
|
|
|
}
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, r.Err())
|
2022-08-29 12:08:36 +00:00
|
|
|
// Making sure we replayed some samples. We expect >50% samples to be still present.
|
|
|
|
require.Greater(t, float64(samplesInCheckpoint)/float64(samplesInWAL), 0.5)
|
|
|
|
require.Less(t, float64(samplesInCheckpoint)/float64(samplesInWAL), 0.8)
|
|
|
|
require.Greater(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.5)
|
|
|
|
require.Less(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.8)
|
2022-07-19 08:58:52 +00:00
|
|
|
|
|
|
|
expectedRefSeries := []record.RefSeries{
|
2019-06-19 13:46:24 +00:00
|
|
|
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},
|
|
|
|
{Ref: 2, Labels: labels.FromStrings("a", "b", "c", "2")},
|
|
|
|
{Ref: 4, Labels: labels.FromStrings("a", "b", "c", "4")},
|
2022-07-19 08:58:52 +00:00
|
|
|
}
|
|
|
|
require.Equal(t, expectedRefSeries, series)
|
|
|
|
|
|
|
|
expectedRefMetadata := []record.RefMetadata{
|
|
|
|
{Ref: 0, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)},
|
|
|
|
{Ref: 2, Unit: fmt.Sprintf("%d", last-100), Help: fmt.Sprintf("%d", last-100)},
|
|
|
|
{Ref: 4, Unit: "unit", Help: "help"},
|
|
|
|
}
|
|
|
|
sort.Slice(metadata, func(i, j int) bool { return metadata[i].Ref < metadata[j].Ref })
|
|
|
|
require.Equal(t, expectedRefMetadata, metadata)
|
2019-06-19 13:46:24 +00:00
|
|
|
})
|
2018-05-17 13:02:47 +00:00
|
|
|
}
|
|
|
|
}
|
2019-01-07 08:43:33 +00:00
|
|
|
|
|
|
|
func TestCheckpointNoTmpFolderAfterError(t *testing.T) {
|
2022-10-10 15:08:46 +00:00
|
|
|
// Create a new wlog with invalid data.
|
2021-11-01 06:58:18 +00:00
|
|
|
dir := t.TempDir()
|
2023-07-11 12:57:57 +00:00
|
|
|
w, err := NewSize(nil, nil, dir, 64*1024, CompressionNone)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
2020-10-05 09:09:59 +00:00
|
|
|
var enc record.Encoder
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, w.Log(enc.Series([]record.RefSeries{
|
2021-10-22 08:06:44 +00:00
|
|
|
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "2")},
|
|
|
|
}, nil)))
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, w.Close())
|
2019-01-07 08:43:33 +00:00
|
|
|
|
2020-10-05 09:09:59 +00:00
|
|
|
// Corrupt data.
|
2021-10-22 08:06:44 +00:00
|
|
|
f, err := os.OpenFile(filepath.Join(w.Dir(), "00000000"), os.O_WRONLY, 0o666)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
2020-10-05 09:09:59 +00:00
|
|
|
_, err = f.WriteAt([]byte{42}, 1)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, f.Close())
|
2020-10-05 09:09:59 +00:00
|
|
|
|
2022-10-10 15:08:46 +00:00
|
|
|
// Run the checkpoint and since the wlog contains corrupt data this should return an error.
|
2020-07-15 13:45:37 +00:00
|
|
|
_, err = Checkpoint(log.NewNopLogger(), w, 0, 1, nil, 0)
|
2020-10-29 09:43:23 +00:00
|
|
|
require.Error(t, err)
|
2019-01-07 08:43:33 +00:00
|
|
|
|
2022-10-10 15:08:46 +00:00
|
|
|
// Walk the wlog dir to make sure there are no tmp folder left behind after the error.
|
2019-01-07 08:43:33 +00:00
|
|
|
err = filepath.Walk(w.Dir(), func(path string, info os.FileInfo, err error) error {
|
|
|
|
if err != nil {
|
2022-03-03 16:21:05 +00:00
|
|
|
return errors.Wrapf(err, "access err %q: %v", path, err)
|
2019-01-07 08:43:33 +00:00
|
|
|
}
|
|
|
|
if info.IsDir() && strings.HasSuffix(info.Name(), ".tmp") {
|
2022-10-10 15:08:46 +00:00
|
|
|
return fmt.Errorf("wlog dir contains temporary folder:%s", info.Name())
|
2019-01-07 08:43:33 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
2020-10-29 09:43:23 +00:00
|
|
|
require.NoError(t, err)
|
2019-01-07 08:43:33 +00:00
|
|
|
}
|