Merge pull request #14691 from aknuds1/arve/upgrade-golangci-lint

Upgrade golangci-lint to v1.60.1
pull/14606/merge
Arve Knudsen 3 months ago committed by GitHub
commit b914a9b580
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -186,7 +186,7 @@ jobs:
with:
args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
version: v1.59.1
version: v1.60.1
fuzzing:
uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request'

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.59.1
GOLANGCI_LINT_VERSION ?= v1.60.1
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

@ -866,16 +866,16 @@ func displayHistogram(dataType string, datas []int, total int) {
fmt.Println()
}
func generateBucket(min, max int) (start, end, step int) {
s := (max - min) / 10
func generateBucket(minVal, maxVal int) (start, end, step int) {
s := (maxVal - minVal) / 10
step = 10
for step < s && step <= 10000 {
step *= 10
}
start = min - min%step
end = max - max%step + step
start = minVal - minVal%step
end = maxVal - maxVal%step + step
return
}

@ -97,7 +97,6 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic
resp.Body.Close()
}()
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 {
return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode)
}

@ -87,7 +87,6 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
resp.Body.Close()
}()
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 {
return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode)
}

@ -154,7 +154,7 @@ func (d k8sDiscoveryTest) Run(t *testing.T) {
// readResultWithTimeout reads all targetgroups from channel with timeout.
// It merges targetgroups by source and sends the result to result channel.
func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, max int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) {
func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, maxGroups int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) {
res := make(map[string]*targetgroup.Group)
timeout := time.After(stopAfter)
Loop:
@ -167,7 +167,7 @@ Loop:
}
res[tg.Source] = tg
}
if len(res) == max {
if len(res) == maxGroups {
// Reached max target groups we may get, break fast.
break Loop
}
@ -175,10 +175,10 @@ Loop:
// Because we use queue, an object that is created then
// deleted or updated may be processed only once.
// So possibly we may skip events, timed out here.
t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), max)
t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), maxGroups)
break Loop
case <-ctx.Done():
t.Logf("stopped, got %d (max: %d) items", len(res), max)
t.Logf("stopped, got %d (max: %d) items", len(res), maxGroups)
break Loop
}
}

@ -674,7 +674,6 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
}()
// Any HTTP status 2xx is OK.
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 {
return fmt.Errorf("bad response status %s", resp.Status)
}

@ -467,15 +467,15 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *Eval
// === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) ===
func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
min := vals[1].(Vector)[0].F
max := vals[2].(Vector)[0].F
if max < min {
minVal := vals[1].(Vector)[0].F
maxVal := vals[2].(Vector)[0].F
if maxVal < minVal {
return enh.Out, nil
}
for _, el := range vec {
enh.Out = append(enh.Out, Sample{
Metric: el.Metric.DropMetricName(),
F: math.Max(min, math.Min(max, el.F)),
F: math.Max(minVal, math.Min(maxVal, el.F)),
})
}
return enh.Out, nil
@ -484,11 +484,11 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
// === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) ===
func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
max := vals[1].(Vector)[0].F
maxVal := vals[1].(Vector)[0].F
for _, el := range vec {
enh.Out = append(enh.Out, Sample{
Metric: el.Metric.DropMetricName(),
F: math.Min(max, el.F),
F: math.Min(maxVal, el.F),
})
}
return enh.Out, nil
@ -497,11 +497,11 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel
// === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) ===
func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
vec := vals[0].(Vector)
min := vals[1].(Vector)[0].F
minVal := vals[1].(Vector)[0].F
for _, el := range vec {
enh.Out = append(enh.Out, Sample{
Metric: el.Metric.DropMetricName(),
F: math.Max(min, el.F),
F: math.Max(minVal, el.F),
})
}
return enh.Out, nil
@ -700,13 +700,13 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
return enh.Out, nil
}
return aggrOverTime(vals, enh, func(s Series) float64 {
max := s.Floats[0].F
maxVal := s.Floats[0].F
for _, f := range s.Floats {
if f.F > max || math.IsNaN(max) {
max = f.F
if f.F > maxVal || math.IsNaN(maxVal) {
maxVal = f.F
}
}
return max
return maxVal
}), nil
}
@ -720,13 +720,13 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
return enh.Out, nil
}
return aggrOverTime(vals, enh, func(s Series) float64 {
min := s.Floats[0].F
minVal := s.Floats[0].F
for _, f := range s.Floats {
if f.F < min || math.IsNaN(min) {
min = f.F
if f.F < minVal || math.IsNaN(minVal) {
minVal = f.F
}
}
return min
return minVal
}), nil
}

@ -727,23 +727,23 @@ func lexValueSequence(l *Lexer) stateFn {
// was only modified to integrate with our lexer.
func lexEscape(l *Lexer) stateFn {
var n int
var base, max uint32
var base, maxVal uint32
ch := l.next()
switch ch {
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', l.stringOpen:
return lexString
case '0', '1', '2', '3', '4', '5', '6', '7':
n, base, max = 3, 8, 255
n, base, maxVal = 3, 8, 255
case 'x':
ch = l.next()
n, base, max = 2, 16, 255
n, base, maxVal = 2, 16, 255
case 'u':
ch = l.next()
n, base, max = 4, 16, unicode.MaxRune
n, base, maxVal = 4, 16, unicode.MaxRune
case 'U':
ch = l.next()
n, base, max = 8, 16, unicode.MaxRune
n, base, maxVal = 8, 16, unicode.MaxRune
case eof:
l.errorf("escape sequence not terminated")
return lexString
@ -772,7 +772,7 @@ func lexEscape(l *Lexer) stateFn {
}
}
if x > max || 0xD800 <= x && x < 0xE000 {
if x > maxVal || 0xD800 <= x && x < 0xE000 {
l.errorf("escape sequence is an invalid Unicode code point")
}
return lexString

@ -36,4 +36,4 @@ jobs:
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
with:
args: --verbose
version: v1.59.1
version: v1.60.1

@ -96,10 +96,10 @@ func TestSampleRingMixed(t *testing.T) {
// With ValNone as the preferred type, nothing should be initialized.
r := newSampleRing(10, 2, chunkenc.ValNone)
require.Zero(t, len(r.fBuf))
require.Zero(t, len(r.hBuf))
require.Zero(t, len(r.fhBuf))
require.Zero(t, len(r.iBuf))
require.Empty(t, r.fBuf)
require.Empty(t, r.hBuf)
require.Empty(t, r.fhBuf)
require.Empty(t, r.iBuf)
// But then mixed adds should work as expected.
r.addF(fSample{t: 1, f: 3.14})
@ -146,10 +146,10 @@ func TestSampleRingAtFloatHistogram(t *testing.T) {
// With ValNone as the preferred type, nothing should be initialized.
r := newSampleRing(10, 2, chunkenc.ValNone)
require.Zero(t, len(r.fBuf))
require.Zero(t, len(r.hBuf))
require.Zero(t, len(r.fhBuf))
require.Zero(t, len(r.iBuf))
require.Empty(t, r.fBuf)
require.Empty(t, r.hBuf)
require.Empty(t, r.fhBuf)
require.Empty(t, r.iBuf)
var (
h *histogram.Histogram

@ -287,7 +287,6 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo
// we can continue handling.
rs, _ := ParseWriteResponseStats(httpResp)
//nolint:usestdlibvars
if httpResp.StatusCode/100 == 2 {
return rs, nil
}
@ -297,7 +296,6 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo
body, _ := io.ReadAll(io.LimitReader(httpResp.Body, maxErrMsgLen))
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, body)
//nolint:usestdlibvars
if httpResp.StatusCode/100 == 5 ||
(c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) {
return rs, RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
@ -382,7 +380,6 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err)
}
//nolint:usestdlibvars
if httpResp.StatusCode/100 != 2 {
return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed)))
}

@ -1522,7 +1522,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
// If we have fewer samples than that, flush them out after a deadline anyways.
var (
max = s.qm.cfg.MaxSamplesPerSend
maxCount = s.qm.cfg.MaxSamplesPerSend
pBuf = proto.NewBuffer(nil)
pBufRaw []byte
@ -1530,19 +1530,19 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
)
// TODO(@tpaschalis) Should we also raise the max if we have WAL metadata?
if s.qm.sendExemplars {
max += int(float64(max) * 0.1)
maxCount += int(float64(maxCount) * 0.1)
}
// TODO: Dry all of this, we should make an interface/generic for the timeseries type.
batchQueue := queue.Chan()
pendingData := make([]prompb.TimeSeries, max)
pendingData := make([]prompb.TimeSeries, maxCount)
for i := range pendingData {
pendingData[i].Samples = []prompb.Sample{{}}
if s.qm.sendExemplars {
pendingData[i].Exemplars = []prompb.Exemplar{{}}
}
}
pendingDataV2 := make([]writev2.TimeSeries, max)
pendingDataV2 := make([]writev2.TimeSeries, maxCount)
for i := range pendingDataV2 {
pendingDataV2[i].Samples = []writev2.Sample{{}}
}

@ -453,10 +453,10 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenHistogramsHeader))
expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader))
require.Empty(t, len(appendable.samples))
require.Empty(t, len(appendable.histograms))
require.Empty(t, len(appendable.exemplars))
require.Empty(t, len(appendable.metadata))
require.Empty(t, appendable.samples)
require.Empty(t, appendable.histograms)
require.Empty(t, appendable.exemplars)
require.Empty(t, appendable.metadata)
return
}

@ -166,7 +166,7 @@ func NewTemplateExpander(
return html_template.HTML(text)
},
"match": regexp.MatchString,
"title": strings.Title, //nolint:staticcheck
"title": strings.Title,
"toUpper": strings.ToUpper,
"toLower": strings.ToLower,
"graphLink": strutil.GraphLinkForExpression,

@ -69,16 +69,16 @@ func TestQueuePushPopSingleGoroutine(t *testing.T) {
const maxSize = 500
const maxIters = 50
for max := 1; max < maxSize; max++ {
queue := newWriteJobQueue(max, 1+(r.Int()%max))
for maxCount := 1; maxCount < maxSize; maxCount++ {
queue := newWriteJobQueue(maxCount, 1+(r.Int()%maxCount))
elements := 0 // total elements in the queue
lastWriteID := 0
lastReadID := 0
for iter := 0; iter < maxIters; iter++ {
if elements < max {
toWrite := r.Int() % (max - elements)
if elements < maxCount {
toWrite := r.Int() % (maxCount - elements)
if toWrite == 0 {
toWrite = 1
}

@ -693,7 +693,7 @@ func (db *DBReadOnly) LastBlockID() (string, error) {
return "", err
}
max := uint64(0)
maxT := uint64(0)
lastBlockID := ""
@ -705,8 +705,8 @@ func (db *DBReadOnly) LastBlockID() (string, error) {
continue // Not a block dir.
}
timestamp := ulidObj.Time()
if timestamp > max {
max = timestamp
if timestamp > maxT {
maxT = timestamp
lastBlockID = dirName
}
}
@ -2316,13 +2316,13 @@ func blockDirs(dir string) ([]string, error) {
return dirs, nil
}
func exponential(d, min, max time.Duration) time.Duration {
func exponential(d, minD, maxD time.Duration) time.Duration {
d *= 2
if d < min {
d = min
if d < minD {
d = minD
}
if d > max {
d = max
if d > maxD {
d = maxD
}
return d
}

@ -4529,8 +4529,8 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSample
addSample := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, _, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
@ -4566,8 +4566,8 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSample
var series1Samples, series2Samples []chunks.Sample
for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, highest}} {
fromMins, toMins := r[0], r[1]
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
}
@ -4645,8 +4645,8 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSample
verifySamples := func(block *Block, fromMins, toMins int64) {
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
}
@ -4730,8 +4730,8 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen
addSamples := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, _, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
@ -4785,8 +4785,8 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen
verifySamples := func(block *Block, fromMins, toMins int64) {
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
}
@ -4839,8 +4839,8 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen
addSamples := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, _, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
@ -4894,8 +4894,8 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen
verifySamples := func(block *Block, fromMins, toMins int64) {
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
}
@ -4948,8 +4948,8 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa
addSamples := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, _, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
@ -4996,8 +4996,8 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa
verifySamples := func(fromMins, toMins int64) {
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
series2Samples = append(series2Samples, scenario.sampleFunc(ts, ts*2))
}
@ -5045,10 +5045,10 @@ func Test_Querier_OOOQuery(t *testing.T) {
addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample) ([]chunks.Sample, int) {
app := db.Appender(context.Background())
totalAppended := 0
for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() {
_, err := app.Append(0, series1, min, float64(min))
if min >= queryMinT && min <= queryMaxT {
expSamples = append(expSamples, sample{t: min, f: float64(min)})
for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() {
_, err := app.Append(0, series1, m, float64(m))
if m >= queryMinT && m <= queryMaxT {
expSamples = append(expSamples, sample{t: m, f: float64(m)})
}
require.NoError(t, err)
totalAppended++
@ -5129,10 +5129,10 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample) ([]chunks.Sample, int) {
app := db.Appender(context.Background())
totalAppended := 0
for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() {
_, err := app.Append(0, series1, min, float64(min))
if min >= queryMinT && min <= queryMaxT {
expSamples = append(expSamples, sample{t: min, f: float64(min)})
for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() {
_, err := app.Append(0, series1, m, float64(m))
if m >= queryMinT && m <= queryMaxT {
expSamples = append(expSamples, sample{t: m, f: float64(m)})
}
require.NoError(t, err)
totalAppended++
@ -5239,9 +5239,9 @@ func testOOOAppendAndQuery(t *testing.T, scenario sampleTypeScenario) {
app := db.Appender(context.Background())
key := lbls.String()
from, to := minutes(fromMins), minutes(toMins)
for min := from; min <= to; min += time.Minute.Milliseconds() {
for m := from; m <= to; m += time.Minute.Milliseconds() {
val := rand.Intn(1000)
_, s, err := scenario.appendFunc(app, lbls, min, int64(val))
_, s, err := scenario.appendFunc(app, lbls, m, int64(val))
if faceError {
require.Error(t, err)
} else {
@ -5370,14 +5370,14 @@ func testOOODisabled(t *testing.T, scenario sampleTypeScenario) {
app := db.Appender(context.Background())
key := lbls.String()
from, to := minutes(fromMins), minutes(toMins)
for min := from; min <= to; min += time.Minute.Milliseconds() {
_, _, err := scenario.appendFunc(app, lbls, min, min)
for m := from; m <= to; m += time.Minute.Milliseconds() {
_, _, err := scenario.appendFunc(app, lbls, m, m)
if faceError {
require.Error(t, err)
failedSamples++
} else {
require.NoError(t, err)
expSamples[key] = append(expSamples[key], scenario.sampleFunc(min, min))
expSamples[key] = append(expSamples[key], scenario.sampleFunc(m, m))
totalSamples++
}
}
@ -5444,9 +5444,9 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) {
app := db.Appender(context.Background())
key := lbls.String()
from, to := minutes(fromMins), minutes(toMins)
for min := from; min <= to; min += time.Minute.Milliseconds() {
for m := from; m <= to; m += time.Minute.Milliseconds() {
val := rand.Intn(1000)
_, s, err := scenario.appendFunc(app, lbls, min, int64(val))
_, s, err := scenario.appendFunc(app, lbls, m, int64(val))
require.NoError(t, err)
expSamples[key] = append(expSamples[key], s)
totalSamples++
@ -5635,8 +5635,8 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) {
addSample := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, _, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
}
@ -5723,8 +5723,8 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) {
verifySamples := func(block *Block, fromMins, toMins int64) {
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
}
expRes := map[string][]chunks.Sample{
@ -5772,8 +5772,8 @@ func TestWBLCorruption(t *testing.T) {
var allSamples, expAfterRestart []chunks.Sample
addSamples := func(fromMins, toMins int64, afterRestart bool) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, err := app.Append(0, series1, ts, float64(ts))
require.NoError(t, err)
allSamples = append(allSamples, sample{t: ts, f: float64(ts)})
@ -5926,8 +5926,8 @@ func testOOOMmapCorruption(t *testing.T, scenario sampleTypeScenario) {
var allSamples, expInMmapChunks []chunks.Sample
addSamples := func(fromMins, toMins int64, inMmapAfterCorruption bool) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, s, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
allSamples = append(allSamples, s)
@ -6071,8 +6071,8 @@ func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) {
series1 := labels.FromStrings("foo", "bar1")
addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool, allSamples []chunks.Sample) []chunks.Sample {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, s, err := scenario.appendFunc(app, series1, ts, ts)
if success {
require.NoError(t, err)
@ -6105,7 +6105,7 @@ func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) {
// WBL is not empty.
size, err := db.head.wbl.Size()
require.NoError(t, err)
require.Greater(t, size, int64(0))
require.Positive(t, size)
require.Empty(t, db.Blocks())
require.NoError(t, db.compactOOOHead(ctx))
@ -6282,8 +6282,8 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) {
series1 := labels.FromStrings("foo", "bar1")
addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, _, err := scenario.appendFunc(app, series1, ts, ts)
if success {
require.NoError(t, err)
@ -6296,8 +6296,8 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) {
verifySamples := func(t *testing.T, db *DB, fromMins, toMins int64) {
var expSamples []chunks.Sample
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
expSamples = append(expSamples, scenario.sampleFunc(ts, ts))
}
@ -6410,8 +6410,8 @@ func testWblReplayAfterOOODisableAndRestart(t *testing.T, scenario sampleTypeSce
var allSamples []chunks.Sample
addSamples := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, s, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
allSamples = append(allSamples, s)
@ -6477,8 +6477,8 @@ func testPanicOnApplyConfig(t *testing.T, scenario sampleTypeScenario) {
var allSamples []chunks.Sample
addSamples := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, s, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
allSamples = append(allSamples, s)
@ -6534,8 +6534,8 @@ func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenari
var allSamples []chunks.Sample
addSamples := func(fromMins, toMins int64) {
app := db.Appender(context.Background())
for min := fromMins; min <= toMins; min++ {
ts := min * time.Minute.Milliseconds()
for m := fromMins; m <= toMins; m++ {
ts := m * time.Minute.Milliseconds()
_, s, err := scenario.appendFunc(app, series1, ts, ts)
require.NoError(t, err)
allSamples = append(allSamples, s)

@ -1403,12 +1403,12 @@ func (s *memSeries) histogramsAppendPreprocessor(t int64, e chunkenc.Encoding, o
// It assumes that the time range is 1/ratioToFull full.
// Assuming that the samples will keep arriving at the same rate, it will make the
// remaining n chunks within this chunk range (before max) equally sized.
func computeChunkEndTime(start, cur, max int64, ratioToFull float64) int64 {
n := float64(max-start) / (float64(cur-start+1) * ratioToFull)
func computeChunkEndTime(start, cur, maxT int64, ratioToFull float64) int64 {
n := float64(maxT-start) / (float64(cur-start+1) * ratioToFull)
if n <= 1 {
return max
return maxT
}
return int64(float64(start) + float64(max-start)/math.Floor(n))
return int64(float64(start) + float64(maxT-start)/math.Floor(n))
}
func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange int64) *memChunk {

@ -2757,7 +2757,7 @@ func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario) {
require.Equal(t, int64(math.MinInt64), db.head.minValidTime.Load())
require.NoError(t, db.Compact(ctx))
require.Greater(t, db.head.minValidTime.Load(), int64(0))
require.Positive(t, db.head.minValidTime.Load())
app = db.Appender(ctx)
_, err = appendSample(app, db.head.minValidTime.Load()-2)
@ -3677,7 +3677,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) {
require.Len(t, ms.mmappedChunks, 25)
expMmapChunks := make([]*mmappedChunk, 0, 20)
for _, mmap := range ms.mmappedChunks {
require.Greater(t, mmap.numSamples, uint16(0))
require.Positive(t, mmap.numSamples)
cpy := *mmap
expMmapChunks = append(expMmapChunks, &cpy)
}

@ -20,10 +20,10 @@ import (
func TestPostingsStats(t *testing.T) {
stats := &maxHeap{}
max := 3000000
heapLength := 10
const maxCount = 3000000
const heapLength = 10
stats.init(heapLength)
for i := 0; i < max; i++ {
for i := 0; i < maxCount; i++ {
item := Stat{
Name: "Label-da",
Count: uint64(i),
@ -35,13 +35,13 @@ func TestPostingsStats(t *testing.T) {
data := stats.get()
require.Len(t, data, 10)
for i := 0; i < heapLength; i++ {
require.Equal(t, uint64(max-i), data[i].Count)
require.Equal(t, uint64(maxCount-i), data[i].Count)
}
}
func TestPostingsStats2(t *testing.T) {
stats := &maxHeap{}
heapLength := 10
const heapLength = 10
stats.init(heapLength)
stats.push(Stat{Name: "Stuff", Count: 10})
@ -57,12 +57,12 @@ func TestPostingsStats2(t *testing.T) {
func BenchmarkPostingStatsMaxHep(b *testing.B) {
stats := &maxHeap{}
max := 9000000
heapLength := 10
const maxCount = 9000000
const heapLength = 10
b.ResetTimer()
for n := 0; n < b.N; n++ {
stats.init(heapLength)
for i := 0; i < max; i++ {
for i := 0; i < maxCount; i++ {
item := Stat{
Name: "Label-da",
Count: uint64(i),

@ -612,16 +612,16 @@ func (w *WL) setSegment(segment *Segment) error {
// flushPage writes the new contents of the page to disk. If no more records will fit into
// the page, the remaining bytes will be set to zero and a new page will be started.
// If clear is true, this is enforced regardless of how many bytes are left in the page.
func (w *WL) flushPage(clear bool) error {
// If forceClear is true, this is enforced regardless of how many bytes are left in the page.
func (w *WL) flushPage(forceClear bool) error {
w.metrics.pageFlushes.Inc()
p := w.page
clear = clear || p.full()
shouldClear := forceClear || p.full()
// No more data will fit into the page or an implicit clear.
// Enqueue and clear it.
if clear {
if shouldClear {
p.alloc = pageSize // Write till end of page.
}
@ -633,7 +633,7 @@ func (w *WL) flushPage(clear bool) error {
p.flushed += n
// We flushed an entire page, prepare a new one.
if clear {
if shouldClear {
p.reset()
w.donePages++
w.metrics.pageCompletions.Inc()

@ -155,7 +155,7 @@ func DirHash(t *testing.T, path string) []byte {
modTime, err := info.ModTime().GobEncode()
require.NoError(t, err)
_, err = io.WriteString(hash, string(modTime))
_, err = hash.Write(modTime)
require.NoError(t, err)
return nil
})

@ -481,14 +481,14 @@ func New(logger log.Logger, o *Options) *Handler {
router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, o.AppName+" is Healthy.\n")
fmt.Fprintf(w, "%s is Healthy.\n", o.AppName)
})
router.Head("/-/healthy", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
})
router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, o.AppName+" is Ready.\n")
fmt.Fprintf(w, "%s is Ready.\n", o.AppName)
}))
router.Head("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)

Loading…
Cancel
Save