diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go
index f7e49ff14..b2aaaea23 100644
--- a/cmd/prometheus/query_log_test.go
+++ b/cmd/prometheus/query_log_test.go
@@ -298,7 +298,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() {
assert.Equal(t, 1, qc)
} else {
- assert.True(t, qc > 0, "no queries logged")
+ assert.Greater(t, qc, 0, "no queries logged")
}
p.validateLastQuery(t, ql)
@@ -324,7 +324,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() {
assert.Equal(t, qc, len(ql))
} else {
- assert.True(t, len(ql) > qc, "no queries logged")
+ assert.Greater(t, len(ql), qc, "no queries logged")
}
p.validateLastQuery(t, ql)
qc = len(ql)
@@ -355,7 +355,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() {
assert.Equal(t, qc, len(ql))
} else {
- assert.True(t, len(ql) > qc, "no queries logged")
+ assert.Greater(t, len(ql), qc, "no queries logged")
}
p.validateLastQuery(t, ql)
@@ -368,7 +368,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() {
assert.Equal(t, 1, qc)
} else {
- assert.True(t, qc > 0, "no queries logged")
+ assert.Greater(t, qc, 0, "no queries logged")
}
}
diff --git a/config/config_test.go b/config/config_test.go
index f2e1acac7..7830a45dd 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -19,7 +19,6 @@ import (
"net/url"
"path/filepath"
"regexp"
- "strings"
"testing"
"time"
@@ -763,8 +762,8 @@ func TestElideSecrets(t *testing.T) {
yamlConfig := string(config)
matches := secretRe.FindAllStringIndex(yamlConfig, -1)
- assert.True(t, len(matches) == 10, "wrong number of secret matches found")
- assert.True(t, !strings.Contains(yamlConfig, "mysecret"),
+ assert.Equal(t, 10, len(matches), "wrong number of secret matches found")
+ assert.NotContains(t, yamlConfig, "mysecret",
"yaml marshal reveals authentication credentials.")
}
@@ -1027,7 +1026,7 @@ func TestBadConfigs(t *testing.T) {
for _, ee := range expectedErrors {
_, err := LoadFile("testdata/" + ee.filename)
assert.Error(t, err, "%s", ee.filename)
- assert.True(t, strings.Contains(err.Error(), ee.errMsg),
+ assert.Contains(t, err.Error(), ee.errMsg,
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
}
}
diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go
index eb4058d33..7c42d5c8e 100644
--- a/discovery/consul/consul_test.go
+++ b/discovery/consul/consul_test.go
@@ -275,7 +275,7 @@ func checkOneTarget(t *testing.T, tg []*targetgroup.Group) {
assert.Equal(t, target.Source, string(target.Labels["__meta_consul_service"]))
if target.Source == "test" {
// test service should have one node.
- assert.True(t, len(target.Targets) > 0, "Test service should have one node")
+ assert.Greater(t, len(target.Targets), 0, "Test service should have one node")
}
}
diff --git a/discovery/openstack/hypervisor_test.go b/discovery/openstack/hypervisor_test.go
index 8184a1be4..0878d3fb7 100644
--- a/discovery/openstack/hypervisor_test.go
+++ b/discovery/openstack/hypervisor_test.go
@@ -15,7 +15,6 @@ package openstack
import (
"context"
- "strings"
"testing"
"github.com/prometheus/common/model"
@@ -96,5 +95,5 @@ func TestOpenstackSDHypervisorRefreshWithDoneContext(t *testing.T) {
cancel()
_, err := hypervisor.refresh(ctx)
assert.Error(t, err)
- assert.True(t, strings.Contains(err.Error(), context.Canceled.Error()), "%q doesn't contain %q", err, context.Canceled)
+ assert.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
}
diff --git a/discovery/openstack/instance_test.go b/discovery/openstack/instance_test.go
index 210048e29..90f74dbf7 100644
--- a/discovery/openstack/instance_test.go
+++ b/discovery/openstack/instance_test.go
@@ -16,7 +16,6 @@ package openstack
import (
"context"
"fmt"
- "strings"
"testing"
"github.com/prometheus/common/model"
@@ -135,5 +134,5 @@ func TestOpenstackSDInstanceRefreshWithDoneContext(t *testing.T) {
cancel()
_, err := hypervisor.refresh(ctx)
assert.Error(t, err)
- assert.True(t, strings.Contains(err.Error(), context.Canceled.Error()), "%q doesn't contain %q", err, context.Canceled)
+ assert.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
}
diff --git a/discovery/triton/triton_test.go b/discovery/triton/triton_test.go
index 46ba2fbc1..891c51527 100644
--- a/discovery/triton/triton_test.go
+++ b/discovery/triton/triton_test.go
@@ -87,7 +87,7 @@ func TestTritonSDNew(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, td)
assert.NotNil(t, td.client)
- assert.True(t, td.interval != 0, "")
+ assert.NotZero(t, td.interval)
assert.NotNil(t, td.sdConfig)
assert.Equal(t, conf.Account, td.sdConfig.Account)
assert.Equal(t, conf.DNSSuffix, td.sdConfig.DNSSuffix)
@@ -98,7 +98,7 @@ func TestTritonSDNew(t *testing.T) {
func TestTritonSDNewBadConfig(t *testing.T) {
td, err := newTritonDiscovery(badconf)
assert.Error(t, err)
- assert.True(t, td == nil, "")
+ assert.Nil(t, td)
}
func TestTritonSDNewGroupsConfig(t *testing.T) {
@@ -106,7 +106,7 @@ func TestTritonSDNewGroupsConfig(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, td)
assert.NotNil(t, td.client)
- assert.True(t, td.interval != 0, "")
+ assert.NotZero(t, td.interval)
assert.NotNil(t, td.sdConfig)
assert.Equal(t, groupsconf.Account, td.sdConfig.Account)
assert.Equal(t, groupsconf.DNSSuffix, td.sdConfig.DNSSuffix)
@@ -120,8 +120,8 @@ func TestTritonSDNewCNConfig(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, td)
assert.NotNil(t, td.client)
- assert.True(t, td.interval != 0, "")
- assert.NotNil(t, td.sdConfig)
+ assert.NotZero(t, td.interval)
+ assert.NotZero(t, td.sdConfig)
assert.Equal(t, cnconf.Role, td.sdConfig.Role)
assert.Equal(t, cnconf.Account, td.sdConfig.Account)
assert.Equal(t, cnconf.DNSSuffix, td.sdConfig.DNSSuffix)
@@ -131,7 +131,7 @@ func TestTritonSDNewCNConfig(t *testing.T) {
func TestTritonSDRefreshNoTargets(t *testing.T) {
tgts := testTritonSDRefresh(t, conf, "{\"containers\":[]}")
- assert.True(t, tgts == nil, "")
+ assert.Nil(t, tgts)
}
func TestTritonSDRefreshMultipleTargets(t *testing.T) {
@@ -234,12 +234,12 @@ func testTritonSDRefresh(t *testing.T, c SDConfig, dstr string) []model.LabelSet
host, strport, err := net.SplitHostPort(u.Host)
assert.NoError(t, err)
- assert.True(t, host != "", "")
- assert.True(t, strport != "", "")
+ assert.NotEmpty(t, host)
+ assert.NotEmpty(t, strport)
port, err := strconv.Atoi(strport)
assert.NoError(t, err)
- assert.True(t, port != 0, "")
+ assert.NotZero(t, port)
td.sdConfig.Port = port
diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go
index f0e0b0ff8..de02e608b 100644
--- a/notifier/notifier_test.go
+++ b/notifier/notifier_test.go
@@ -83,7 +83,7 @@ func TestHandlerNextBatch(t *testing.T) {
assert.NoError(t, alertsEqual(expected[0:maxBatchSize], h.nextBatch()))
assert.NoError(t, alertsEqual(expected[maxBatchSize:2*maxBatchSize], h.nextBatch()))
assert.NoError(t, alertsEqual(expected[2*maxBatchSize:], h.nextBatch()))
- assert.True(t, len(h.queue) == 0, "Expected queue to be empty but got %d alerts", len(h.queue))
+ assert.Equal(t, 0, len(h.queue), "Expected queue to be empty but got %d alerts", len(h.queue))
}
func alertsEqual(a, b []*Alert) error {
@@ -201,7 +201,7 @@ func TestHandlerSendAll(t *testing.T) {
checkNoErr()
status2.Store(int32(http.StatusInternalServerError))
- assert.True(t, !h.sendAll(h.queue...), "all sends succeeded unexpectedly")
+ assert.False(t, h.sendAll(h.queue...), "all sends succeeded unexpectedly")
checkNoErr()
}
diff --git a/pkg/labels/labels_test.go b/pkg/labels/labels_test.go
index 9dfb61cfd..5c4c3b637 100644
--- a/pkg/labels/labels_test.go
+++ b/pkg/labels/labels_test.go
@@ -387,10 +387,7 @@ func TestLabels_FromStrings(t *testing.T) {
assert.Equal(t, expected, labels, "unexpected labelset")
- defer func() { recover() }()
- FromStrings("aaa", "111", "bbb")
-
- assert.True(t, false, "did not panic as expected")
+ assert.Panics(t, func() { FromStrings("aaa", "111", "bbb") })
}
func TestLabels_Compare(t *testing.T) {
@@ -640,8 +637,8 @@ func TestLabels_Hash(t *testing.T) {
{Name: "baz", Value: "qux"},
}
assert.Equal(t, lbls.Hash(), lbls.Hash())
- assert.True(t, lbls.Hash() != Labels{lbls[1], lbls[0]}.Hash(), "unordered labels match.")
- assert.True(t, lbls.Hash() != Labels{lbls[0]}.Hash(), "different labels match.")
+ assert.NotEqual(t, lbls.Hash(), Labels{lbls[1], lbls[0]}.Hash(), "unordered labels match.")
+ assert.NotEqual(t, lbls.Hash(), Labels{lbls[0]}.Hash(), "different labels match.")
}
var benchmarkLabelsResult uint64
diff --git a/pkg/relabel/relabel_test.go b/pkg/relabel/relabel_test.go
index f002c8c8c..669c64951 100644
--- a/pkg/relabel/relabel_test.go
+++ b/pkg/relabel/relabel_test.go
@@ -440,7 +440,7 @@ func TestTargetLabelValidity(t *testing.T) {
{"foo${bar}foo", true},
}
for _, test := range tests {
- assert.True(t, relabelTarget.Match([]byte(test.str)) == test.valid,
+ assert.Equal(t, test.valid, relabelTarget.Match([]byte(test.str)),
"Expected %q to be %v", test.str, test.valid)
}
}
diff --git a/pkg/textparse/interface.go b/pkg/textparse/interface.go
index cfcd05e21..557e56662 100644
--- a/pkg/textparse/interface.go
+++ b/pkg/textparse/interface.go
@@ -85,12 +85,12 @@ const (
type MetricType string
const (
- MetricTypeCounter = "counter"
- MetricTypeGauge = "gauge"
- MetricTypeHistogram = "histogram"
- MetricTypeGaugeHistogram = "gaugehistogram"
- MetricTypeSummary = "summary"
- MetricTypeInfo = "info"
- MetricTypeStateset = "stateset"
- MetricTypeUnknown = "unknown"
+ MetricTypeCounter = MetricType("counter")
+ MetricTypeGauge = MetricType("gauge")
+ MetricTypeHistogram = MetricType("histogram")
+ MetricTypeGaugeHistogram = MetricType("gaugehistogram")
+ MetricTypeSummary = MetricType("summary")
+ MetricTypeInfo = MetricType("info")
+ MetricTypeStateset = MetricType("stateset")
+ MetricTypeUnknown = MetricType("unknown")
)
diff --git a/promql/engine_test.go b/promql/engine_test.go
index ccd6b61ac..cc43664a0 100644
--- a/promql/engine_test.go
+++ b/promql/engine_test.go
@@ -1129,7 +1129,7 @@ func TestQueryLogger_basic(t *testing.T) {
assert.Equal(t, 2*l, len(f1.logs))
// Test that we close the query logger when unsetting it.
- assert.True(t, !f1.closed, "expected f1 to be open, got closed")
+ assert.False(t, f1.closed, "expected f1 to be open, got closed")
engine.SetQueryLogger(nil)
assert.True(t, f1.closed, "expected f1 to be closed, got open")
queryExec()
@@ -1138,11 +1138,11 @@ func TestQueryLogger_basic(t *testing.T) {
f2 := NewFakeQueryLogger()
f3 := NewFakeQueryLogger()
engine.SetQueryLogger(f2)
- assert.True(t, !f2.closed, "expected f2 to be open, got closed")
+ assert.False(t, f2.closed, "expected f2 to be open, got closed")
queryExec()
engine.SetQueryLogger(f3)
assert.True(t, f2.closed, "expected f2 to be closed, got open")
- assert.True(t, !f3.closed, "expected f3 to be open, got closed")
+ assert.False(t, f3.closed, "expected f3 to be open, got closed")
queryExec()
}
diff --git a/promql/functions_test.go b/promql/functions_test.go
index 99816ee54..825f00edb 100644
--- a/promql/functions_test.go
+++ b/promql/functions_test.go
@@ -15,7 +15,6 @@ package promql
import (
"context"
- "fmt"
"testing"
"time"
@@ -56,19 +55,19 @@ func TestDeriv(t *testing.T) {
assert.NoError(t, result.Err)
vec, _ := result.Vector()
- assert.True(t, len(vec) == 1, "Expected 1 result, got %d", len(vec))
- assert.True(t, vec[0].V == 0.0, "Expected 0.0 as value, got %f", vec[0].V)
+ assert.Equal(t, 1, len(vec), "Expected 1 result, got %d", len(vec))
+ assert.Equal(t, 0.0, vec[0].V, "Expected 0.0 as value, got %f", vec[0].V)
}
func TestFunctionList(t *testing.T) {
// Test that Functions and parser.Functions list the same functions.
for i := range FunctionCalls {
_, ok := parser.Functions[i]
- assert.True(t, ok, fmt.Sprintf("function %s exists in promql package, but not in parser package", i))
+ assert.True(t, ok, "function %s exists in promql package, but not in parser package", i)
}
for i := range parser.Functions {
_, ok := FunctionCalls[i]
- assert.True(t, ok, (fmt.Sprintf("function %s exists in parser package, but not in promql package", i)))
+ assert.True(t, ok, "function %s exists in parser package, but not in promql package", i)
}
}
diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go
index f9340e375..e28a136c7 100644
--- a/promql/parser/parse_test.go
+++ b/promql/parser/parse_test.go
@@ -15,7 +15,6 @@ package parser
import (
"math"
- "strings"
"testing"
"time"
@@ -2659,14 +2658,14 @@ func TestParseExpressions(t *testing.T) {
expr, err := ParseExpr(test.input)
// Unexpected errors are always caused by a bug.
- assert.True(t, err != errUnexpected, "unexpected error occurred")
+ assert.NotEqual(t, err, errUnexpected, "unexpected error occurred")
if !test.fail {
assert.NoError(t, err)
assert.Equal(t, test.expected, expr, "error on input '%s'", test.input)
} else {
assert.Error(t, err)
- assert.True(t, strings.Contains(err.Error(), test.errMsg), "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error())
+ assert.Contains(t, err.Error(), test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error())
errorList, ok := err.(ParseErrors)
@@ -2804,7 +2803,7 @@ func TestParseSeries(t *testing.T) {
metric, vals, err := ParseSeriesDesc(test.input)
// Unexpected errors are always caused by a bug.
- assert.True(t, err != errUnexpected, "unexpected error occurred")
+ assert.NotEqual(t, err, errUnexpected, "unexpected error occurred")
if !test.fail {
assert.NoError(t, err)
diff --git a/promql/test_test.go b/promql/test_test.go
index 158f161d0..8c109b427 100644
--- a/promql/test_test.go
+++ b/promql/test_test.go
@@ -134,9 +134,9 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
// Get the series for the matcher.
ss := querier.Select(false, nil, matchers...)
- assert.True(t, ss.Next(), "")
+ assert.True(t, ss.Next())
storageSeries := ss.At()
- assert.True(t, !ss.Next(), "Expecting only 1 series")
+ assert.False(t, ss.Next(), "Expecting only 1 series")
// Convert `storage.Series` to `promql.Series`.
got := Series{
diff --git a/rules/alerting_test.go b/rules/alerting_test.go
index dce0302fa..ce5959155 100644
--- a/rules/alerting_test.go
+++ b/rules/alerting_test.go
@@ -15,6 +15,7 @@ package rules
import (
"context"
+ "html/template"
"testing"
"time"
@@ -33,16 +34,16 @@ func TestAlertingRuleHTMLSnippet(t *testing.T) {
assert.NoError(t, err)
rule := NewAlertingRule("testrule", expr, 0, labels.FromStrings("html", "BOLD"), labels.FromStrings("html", "BOLD"), nil, false, nil)
- const want = `alert: testrule
+ const want = template.HTML(`alert: testrule
expr: foo{html="<b>BOLD<b>"}
labels:
html: '<b>BOLD</b>'
annotations:
html: '<b>BOLD</b>'
-`
+`)
got := rule.HTMLSnippet("/test/prefix")
- assert.True(t, want == got, "incorrect HTML snippet; want:\n\n|%v|\n\ngot:\n\n|%v|", want, got)
+ assert.Equal(t, want, got, "incorrect HTML snippet; want:\n\n|%v|\n\ngot:\n\n|%v|", want, got)
}
func TestAlertingRuleState(t *testing.T) {
@@ -81,7 +82,7 @@ func TestAlertingRuleState(t *testing.T) {
rule := NewAlertingRule(test.name, nil, 0, nil, nil, nil, true, nil)
rule.active = test.active
got := rule.State()
- assert.True(t, test.want == got, "test case %d unexpected AlertState, want:%d got:%d", i, test.want, got)
+ assert.Equal(t, test.want, got, "test case %d unexpected AlertState, want:%d got:%d", i, test.want, got)
}
}
diff --git a/rules/manager_test.go b/rules/manager_test.go
index 753fbaf87..c85a9b55c 100644
--- a/rules/manager_test.go
+++ b/rules/manager_test.go
@@ -172,7 +172,7 @@ func TestAlertingRule(t *testing.T) {
for i := range test.result {
test.result[i].T = timestamp.FromTime(evalTime)
}
- assert.True(t, len(test.result) == len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
+ assert.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
sort.Slice(filteredRes, func(i, j int) bool {
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
@@ -180,7 +180,7 @@ func TestAlertingRule(t *testing.T) {
assert.Equal(t, test.result, filteredRes)
for _, aa := range rule.ActiveAlerts() {
- assert.True(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
+ assert.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
}
}
@@ -325,7 +325,7 @@ func TestForStateAddSamples(t *testing.T) {
test.result[i].V = forState
}
}
- assert.True(t, len(test.result) == len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
+ assert.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
sort.Slice(filteredRes, func(i, j int) bool {
return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
@@ -333,7 +333,7 @@ func TestForStateAddSamples(t *testing.T) {
assert.Equal(t, test.result, filteredRes)
for _, aa := range rule.ActiveAlerts() {
- assert.True(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
+ assert.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
}
@@ -402,7 +402,7 @@ func TestForStateRestore(t *testing.T) {
exp := rule.ActiveAlerts()
for _, aa := range exp {
- assert.True(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
+ assert.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
sort.Slice(exp, func(i, j int) bool {
return labels.Compare(exp[i].Labels, exp[j].Labels) < 0
@@ -466,7 +466,7 @@ func TestForStateRestore(t *testing.T) {
got := newRule.ActiveAlerts()
for _, aa := range got {
- assert.True(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
+ assert.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
}
sort.Slice(got, func(i, j int) bool {
return labels.Compare(got[i].Labels, got[j].Labels) < 0
@@ -494,7 +494,7 @@ func TestForStateRestore(t *testing.T) {
// Difference in time should be within 1e6 ns, i.e. 1ms
// (due to conversion between ns & ms, float64 & int64).
activeAtDiff := float64(e.ActiveAt.Unix() + int64(tst.downDuration/time.Second) - got[i].ActiveAt.Unix())
- assert.True(t, math.Abs(activeAtDiff) == 0, "'for' state restored time is wrong")
+ assert.Equal(t, 0.0, math.Abs(activeAtDiff), "'for' state restored time is wrong")
}
}
}
@@ -727,7 +727,7 @@ func TestUpdate(t *testing.T) {
err := ruleManager.Update(10*time.Second, files, nil)
assert.NoError(t, err)
- assert.True(t, len(ruleManager.groups) > 0, "expected non-empty rule groups")
+ assert.Greater(t, len(ruleManager.groups), 0, "expected non-empty rule groups")
ogs := map[string]*Group{}
for h, g := range ruleManager.groups {
g.seriesInPreviousEval = []map[string]labels.Labels{
@@ -748,7 +748,7 @@ func TestUpdate(t *testing.T) {
// Groups will be recreated if updated.
rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml")
- assert.True(t, len(errs) == 0, "file parsing failures")
+ assert.Equal(t, 0, len(errs), "file parsing failures")
tmpFile, err := ioutil.TempFile("", "rules.test.*.yaml")
assert.NoError(t, err)
@@ -885,7 +885,7 @@ func TestNotify(t *testing.T) {
// Alert sent right away
group.Eval(ctx, time.Unix(1, 0))
assert.Equal(t, 1, len(lastNotified))
- assert.True(t, !lastNotified[0].ValidUntil.IsZero(), "ValidUntil should not be zero")
+ assert.NotZero(t, lastNotified[0].ValidUntil, "ValidUntil should not be zero")
// Alert is not sent 1s later
group.Eval(ctx, time.Unix(2, 0))
@@ -1160,6 +1160,6 @@ func TestGroupHasAlertingRules(t *testing.T) {
for i, test := range tests {
got := test.group.HasAlertingRules()
- assert.True(t, test.want == got, "test case %d failed, expected:%t got:%t", i, test.want, got)
+ assert.Equal(t, test.want, got, "test case %d failed, expected:%t got:%t", i, test.want, got)
}
}
diff --git a/rules/recording_test.go b/rules/recording_test.go
index 274944cfa..fd0f59008 100644
--- a/rules/recording_test.go
+++ b/rules/recording_test.go
@@ -15,6 +15,7 @@ package rules
import (
"context"
+ "html/template"
"testing"
"time"
@@ -83,14 +84,14 @@ func TestRecordingRuleHTMLSnippet(t *testing.T) {
assert.NoError(t, err)
rule := NewRecordingRule("testrule", expr, labels.FromStrings("html", "BOLD"))
- const want = `record: testrule
+ const want = template.HTML(`record: testrule
expr: foo{html="<b>BOLD<b>"}
labels:
html: '<b>BOLD</b>'
-`
+`)
got := rule.HTMLSnippet("/test/prefix")
- assert.True(t, want == got, "incorrect HTML snippet; want:\n\n%s\n\ngot:\n\n%s", want, got)
+ assert.Equal(t, want, got, "incorrect HTML snippet; want:\n\n%s\n\ngot:\n\n%s", want, got)
}
// TestRuleEvalDuplicate tests for duplicate labels in recorded metrics, see #5529.
diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go
index 6845d3b7b..cd7203539 100644
--- a/scrape/scrape_test.go
+++ b/scrape/scrape_test.go
@@ -231,8 +231,8 @@ func TestScrapePoolStop(t *testing.T) {
assert.Equal(t, numTargets, len(stopped), "Unexpected number of stopped loops")
mtx.Unlock()
- assert.True(t, len(sp.activeTargets) == 0, "Targets were not cleared on stopping: %d left", len(sp.activeTargets))
- assert.True(t, len(sp.loops) == 0, "Loops were not cleared on stopping: %d left", len(sp.loops))
+ assert.Equal(t, 0, len(sp.activeTargets), "Targets were not cleared on stopping: %d left", len(sp.activeTargets))
+ assert.Equal(t, 0, len(sp.loops), "Loops were not cleared on stopping: %d left", len(sp.loops))
}
func TestScrapePoolReload(t *testing.T) {
@@ -872,19 +872,19 @@ test_metric 1
md, ok := cache.GetMetadata("test_metric")
assert.True(t, ok, "expected metadata to be present")
- assert.True(t, textparse.MetricTypeCounter == md.Type, "unexpected metric type")
+ assert.Equal(t, textparse.MetricTypeCounter, md.Type, "unexpected metric type")
assert.Equal(t, "some help text", md.Help)
assert.Equal(t, "metric", md.Unit)
md, ok = cache.GetMetadata("test_metric_no_help")
assert.True(t, ok, "expected metadata to be present")
- assert.True(t, textparse.MetricTypeGauge == md.Type, "unexpected metric type")
+ assert.Equal(t, textparse.MetricTypeGauge, md.Type, "unexpected metric type")
assert.Equal(t, "", md.Help)
assert.Equal(t, "", md.Unit)
md, ok = cache.GetMetadata("test_metric_no_type")
assert.True(t, ok, "expected metadata to be present")
- assert.True(t, textparse.MetricTypeUnknown == md.Type, "unexpected metric type")
+ assert.Equal(t, textparse.MetricTypeUnknown, md.Type, "unexpected metric type")
assert.Equal(t, "other help text", md.Help)
assert.Equal(t, "", md.Unit)
}
@@ -1352,7 +1352,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
value := metric.GetCounter().GetValue()
change := value - beforeMetricValue
- assert.True(t, change == 1, "Unexpected change of sample limit metric: %f", change)
+ assert.Equal(t, 1.0, change, "Unexpected change of sample limit metric: %f", change)
// And verify that we got the samples that fit under the limit.
want := []sample{
@@ -1765,7 +1765,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
}
_, err = ts.scrape(context.Background(), ioutil.Discard)
- assert.True(t, strings.Contains(err.Error(), "404"), "Expected \"404 NotFound\" error but got: %s", err)
+ assert.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err)
}
// testScraper implements the scraper interface and allows setting values
@@ -2118,15 +2118,15 @@ func TestReuseScrapeCache(t *testing.T) {
sp.reload(s.newConfig)
for fp, newCacheAddr := range cacheAddr(sp) {
if s.keep {
- assert.True(t, initCacheAddr[fp] == newCacheAddr, "step %d: old cache and new cache are not the same", i)
+ assert.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: old cache and new cache are not the same", i)
} else {
- assert.True(t, initCacheAddr[fp] != newCacheAddr, "step %d: old cache and new cache are the same", i)
+ assert.NotEqual(t, initCacheAddr[fp], newCacheAddr, "step %d: old cache and new cache are the same", i)
}
}
initCacheAddr = cacheAddr(sp)
sp.reload(s.newConfig)
for fp, newCacheAddr := range cacheAddr(sp) {
- assert.True(t, initCacheAddr[fp] == newCacheAddr, "step %d: reloading the exact config invalidates the cache", i)
+ assert.Equal(t, initCacheAddr[fp], newCacheAddr, "step %d: reloading the exact config invalidates the cache", i)
}
}
}
diff --git a/storage/buffer_test.go b/storage/buffer_test.go
index 76501052a..acbd69520 100644
--- a/storage/buffer_test.go
+++ b/storage/buffer_test.go
@@ -77,9 +77,9 @@ func TestSampleRing(t *testing.T) {
}
if found {
- assert.True(t, sold.t >= s.t-c.delta, "%d: unexpected sample %d in buffer; buffer %v", i, sold.t, buffered)
+ assert.GreaterOrEqual(t, sold.t, s.t-c.delta, "%d: unexpected sample %d in buffer; buffer %v", i, sold.t, buffered)
} else {
- assert.True(t, sold.t < s.t-c.delta, "%d: expected sample %d to be in buffer but was not; buffer %v", i, sold.t, buffered)
+ assert.Less(t, sold.t, s.t-c.delta, "%d: expected sample %d to be in buffer but was not; buffer %v", i, sold.t, buffered)
}
}
}
@@ -137,7 +137,7 @@ func TestBufferedSeriesIterator(t *testing.T) {
sampleEq(101, 10)
bufferEq([]sample{{t: 99, v: 8}, {t: 100, v: 9}})
- assert.True(t, !it.Next(), "next succeeded unexpectedly")
+ assert.False(t, it.Next(), "next succeeded unexpectedly")
}
// At() should not be called once Next() returns false.
@@ -147,7 +147,7 @@ func TestBufferedSeriesIteratorNoBadAt(t *testing.T) {
m := &mockSeriesIterator{
seek: func(int64) bool { return false },
at: func() (int64, float64) {
- assert.True(t, !done, "unexpectedly done")
+ assert.False(t, done, "unexpectedly done")
done = true
return 0, 0
},
diff --git a/storage/fanout_test.go b/storage/fanout_test.go
index 14519aef3..1c1657a1e 100644
--- a/storage/fanout_test.go
+++ b/storage/fanout_test.go
@@ -174,7 +174,7 @@ func TestFanoutErrors(t *testing.T) {
}
if tc.warning != nil {
- assert.True(t, len(ss.Warnings()) > 0, "warnings expected")
+ assert.Greater(t, len(ss.Warnings()), 0, "warnings expected")
assert.Error(t, ss.Warnings()[0])
assert.Equal(t, tc.warning.Error(), ss.Warnings()[0].Error())
}
@@ -199,7 +199,7 @@ func TestFanoutErrors(t *testing.T) {
}
if tc.warning != nil {
- assert.True(t, len(ss.Warnings()) > 0, "warnings expected")
+ assert.Greater(t, len(ss.Warnings()), 0, "warnings expected")
assert.Error(t, ss.Warnings()[0])
assert.Equal(t, tc.warning.Error(), ss.Warnings()[0].Error())
}
diff --git a/storage/merge_test.go b/storage/merge_test.go
index 55d1ae623..bf851668d 100644
--- a/storage/merge_test.go
+++ b/storage/merge_test.go
@@ -206,7 +206,7 @@ func TestMergeQuerierWithChainMerger(t *testing.T) {
assert.Equal(t, expErr, actErr)
assert.Equal(t, expSmpl, actSmpl)
}
- assert.True(t, !tc.expected.Next(), "Expected Next() to be false")
+ assert.False(t, tc.expected.Next(), "Expected Next() to be false")
})
}
}
@@ -376,7 +376,7 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
}
assert.NoError(t, merged.Err())
- assert.True(t, !tc.expected.Next(), "Expected Next() to be false")
+ assert.False(t, tc.expected.Next(), "Expected Next() to be false")
})
}
}
diff --git a/storage/remote/chunked_test.go b/storage/remote/chunked_test.go
index cf56ca8f7..55340dee6 100644
--- a/storage/remote/chunked_test.go
+++ b/storage/remote/chunked_test.go
@@ -53,7 +53,7 @@ func TestChunkedReaderCanReadFromChunkedWriter(t *testing.T) {
for ; i < 4; i++ {
msg, err := r.Next()
assert.NoError(t, err)
- assert.True(t, i < len(msgs), "more messages then expected")
+ assert.Less(t, i, len(msgs), "more messages then expected")
assert.Equal(t, msgs[i], msg)
}
@@ -62,7 +62,7 @@ func TestChunkedReaderCanReadFromChunkedWriter(t *testing.T) {
msg, err := r.Next()
assert.NoError(t, err)
- assert.True(t, i < len(msgs), "more messages then expected")
+ assert.Less(t, i, len(msgs), "more messages then expected")
assert.Equal(t, msgs[i], msg)
_, err = r.Next()
diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go
index 8d7af46ea..e3f6d44ae 100644
--- a/storage/remote/codec_test.go
+++ b/storage/remote/codec_test.go
@@ -140,7 +140,7 @@ func TestConcreteSeriesSet(t *testing.T) {
assert.Equal(t, series1, c.At(), "Unexpected series returned.")
assert.True(t, c.Next(), "Expected Next() to be true.")
assert.Equal(t, series2, c.At(), "Unexpected series returned.")
- assert.True(t, !c.Next(), "Expected Next() to be false.")
+ assert.False(t, c.Next(), "Expected Next() to be false.")
}
func TestConcreteSeriesClonesLabels(t *testing.T) {
@@ -185,7 +185,7 @@ func TestFromQueryResultWithDuplicates(t *testing.T) {
assert.True(t, isErrSeriesSet, "Expected resulting series to be an errSeriesSet")
errMessage := errSeries.Err().Error()
- assert.True(t, errMessage == "duplicate label with name: foo", fmt.Sprintf("Expected error to be from duplicate label, but got: %s", errMessage))
+ assert.Equal(t, "duplicate label with name: foo", errMessage, fmt.Sprintf("Expected error to be from duplicate label, but got: %s", errMessage))
}
func TestNegotiateResponseType(t *testing.T) {
diff --git a/storage/remote/intern_test.go b/storage/remote/intern_test.go
index bffcc4260..3102645c0 100644
--- a/storage/remote/intern_test.go
+++ b/storage/remote/intern_test.go
@@ -33,7 +33,7 @@ func TestIntern(t *testing.T) {
interned, ok := interner.pool[testString]
assert.Equal(t, true, ok)
- assert.True(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
+ assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
}
func TestIntern_MultiRef(t *testing.T) {
@@ -44,13 +44,13 @@ func TestIntern_MultiRef(t *testing.T) {
interned, ok := interner.pool[testString]
assert.Equal(t, true, ok)
- assert.True(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
+ assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
interner.intern(testString)
interned, ok = interner.pool[testString]
assert.Equal(t, true, ok)
- assert.True(t, interned.refs.Load() == 2, fmt.Sprintf("expected refs to be 2 but it was %d", interned.refs.Load()))
+ assert.Equal(t, int64(2), interned.refs.Load(), fmt.Sprintf("expected refs to be 2 but it was %d", interned.refs.Load()))
}
func TestIntern_DeleteRef(t *testing.T) {
@@ -61,7 +61,7 @@ func TestIntern_DeleteRef(t *testing.T) {
interned, ok := interner.pool[testString]
assert.Equal(t, true, ok)
- assert.True(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
+ assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
interner.release(testString)
_, ok = interner.pool[testString]
@@ -75,7 +75,7 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
interner.intern(testString)
interned, ok := interner.pool[testString]
assert.Equal(t, true, ok)
- assert.True(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
+ assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
go interner.release(testString)
@@ -87,5 +87,5 @@ func TestIntern_MultiRef_Concurrent(t *testing.T) {
interned, ok = interner.pool[testString]
interner.mtx.RUnlock()
assert.Equal(t, true, ok)
- assert.True(t, interned.refs.Load() == 1, fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
+ assert.Equal(t, int64(1), interned.refs.Load(), fmt.Sprintf("expected refs to be 1 but it was %d", interned.refs.Load()))
}
diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go
index 2a9d55fa3..b9a1abc24 100644
--- a/storage/remote/queue_manager_test.go
+++ b/storage/remote/queue_manager_test.go
@@ -334,7 +334,7 @@ func TestReleaseNoninternedString(t *testing.T) {
}
metric := client_testutil.ToFloat64(noReferenceReleases)
- assert.True(t, metric == 0, "expected there to be no calls to release for strings that were not already interned: %d", int(metric))
+ assert.Equal(t, 0.0, metric, "expected there to be no calls to release for strings that were not already interned: %d", int(metric))
}
func TestShouldReshard(t *testing.T) {
@@ -725,10 +725,10 @@ func TestCalculateDesiredShards(t *testing.T) {
t.Log("desiredShards", m.numShards, "pendingSamples", pendingSamples)
m.numShards = m.calculateDesiredShards()
- assert.True(t, m.numShards >= minShards, "Shards are too low. desiredShards=%d, minShards=%d, t_seconds=%d", m.numShards, minShards, ts/time.Second)
- assert.True(t, m.numShards <= maxShards, "Shards are too high. desiredShards=%d, maxShards=%d, t_seconds=%d", m.numShards, maxShards, ts/time.Second)
+ assert.GreaterOrEqual(t, m.numShards, minShards, "Shards are too low. desiredShards=%d, minShards=%d, t_seconds=%d", m.numShards, minShards, ts/time.Second)
+ assert.LessOrEqual(t, m.numShards, maxShards, "Shards are too high. desiredShards=%d, maxShards=%d, t_seconds=%d", m.numShards, maxShards, ts/time.Second)
}
- assert.True(t, pendingSamples == 0, "Remote write never caught up, there are still %d pending samples.", pendingSamples)
+ assert.Equal(t, int64(0), pendingSamples, "Remote write never caught up, there are still %d pending samples.", pendingSamples)
}
func TestQueueManagerMetrics(t *testing.T) {
diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go
index abf88fab8..22ae6f528 100644
--- a/storage/remote/write_test.go
+++ b/storage/remote/write_test.go
@@ -362,12 +362,12 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
assert.Equal(t, 3, len(s.queues))
_, hashExists := s.queues[hashes[0]]
- assert.True(t, !hashExists, "The queue for the first remote write configuration should have been restarted because the relabel configuration has changed.")
+ assert.False(t, hashExists, "The queue for the first remote write configuration should have been restarted because the relabel configuration has changed.")
q, hashExists := s.queues[hashes[1]]
assert.True(t, hashExists, "Hash of unchanged queue should have remained the same")
- assert.True(t, q == queues[1], "Pointer of unchanged queue should have remained the same")
+ assert.Equal(t, q, queues[1], "Pointer of unchanged queue should have remained the same")
_, hashExists = s.queues[hashes[2]]
- assert.True(t, !hashExists, "The queue for the third remote write configuration should have been restarted because the timeout has changed.")
+ assert.False(t, hashExists, "The queue for the third remote write configuration should have been restarted because the timeout has changed.")
storeHashes()
secondClient := s.queues[hashes[1]].client()
@@ -381,7 +381,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
assert.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
q, hashExists = s.queues[hashes[1]]
assert.True(t, hashExists, "Hash of queue with secret change should have remained the same")
- assert.True(t, secondClient != q.client(), "Pointer of a client with a secret change should not be the same")
+ assert.NotEqual(t, secondClient, q.client(), "Pointer of a client with a secret change should not be the same")
_, hashExists = s.queues[hashes[2]]
assert.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
@@ -395,7 +395,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
assert.Equal(t, 2, len(s.queues))
_, hashExists = s.queues[hashes[0]]
- assert.True(t, !hashExists, "If a config is removed, the queue should be stopped and recreated.")
+ assert.False(t, hashExists, "If a config is removed, the queue should be stopped and recreated.")
_, hashExists = s.queues[hashes[1]]
assert.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
_, hashExists = s.queues[hashes[2]]
diff --git a/tsdb/block_test.go b/tsdb/block_test.go
index d59a47b71..7216590e4 100644
--- a/tsdb/block_test.go
+++ b/tsdb/block_test.go
@@ -51,7 +51,7 @@ func TestBlockMetaMustNeverBeVersion2(t *testing.T) {
meta, _, err := readMetaFile(dir)
assert.NoError(t, err)
- assert.True(t, meta.Version != 2, "meta.json version must never be 2")
+ assert.NotEqual(t, 2, meta.Version, "meta.json version must never be 2")
}
func TestSetCompactionFailed(t *testing.T) {
@@ -181,7 +181,7 @@ func TestCorruptedChunk(t *testing.T) {
blockDir := createBlock(t, tmpdir, []storage.Series{series})
files, err := sequenceFiles(chunkDir(blockDir))
assert.NoError(t, err)
- assert.True(t, len(files) > 0, "No chunk created.")
+ assert.Greater(t, len(files), 0, "No chunk created.")
f, err := os.OpenFile(files[0], os.O_RDWR, 0666)
assert.NoError(t, err)
@@ -204,7 +204,7 @@ func TestCorruptedChunk(t *testing.T) {
set := querier.Select(false, nil, labels.MustNewMatcher(labels.MatchEqual, "a", "b"))
// Check chunk errors during iter time.
- assert.True(t, set.Next(), "")
+ assert.True(t, set.Next())
it := set.At().Iterator()
assert.Equal(t, false, it.Next())
assert.Equal(t, tc.iterErr.Error(), it.Err().Error())
@@ -244,7 +244,7 @@ func TestBlockSize(t *testing.T) {
{
assert.NoError(t, blockInit.Delete(1, 10, labels.MustNewMatcher(labels.MatchRegexp, "", ".*")))
expAfterDelete := blockInit.Size()
- assert.True(t, expAfterDelete > expSizeInit, "after a delete the block size should be bigger as the tombstone file should grow %v > %v", expAfterDelete, expSizeInit)
+ assert.Greater(t, expAfterDelete, expSizeInit, "after a delete the block size should be bigger as the tombstone file should grow %v > %v", expAfterDelete, expSizeInit)
actAfterDelete, err := fileutil.DirSize(blockDirInit)
assert.NoError(t, err)
assert.Equal(t, expAfterDelete, actAfterDelete, "after a delete reported block size doesn't match actual disk size")
@@ -261,7 +261,7 @@ func TestBlockSize(t *testing.T) {
expAfterCompact := blockAfterCompact.Size()
actAfterCompact, err := fileutil.DirSize(blockAfterCompact.Dir())
assert.NoError(t, err)
- assert.True(t, actAfterDelete > actAfterCompact, "after a delete and compaction the block size should be smaller %v,%v", actAfterDelete, actAfterCompact)
+ assert.Greater(t, actAfterDelete, actAfterCompact, "after a delete and compaction the block size should be smaller %v,%v", actAfterDelete, actAfterCompact)
assert.Equal(t, expAfterCompact, actAfterCompact, "after a delete and compaction reported block size doesn't match actual disk size")
}
}
diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go
index cc8881722..5d3e36574 100644
--- a/tsdb/chunks/head_chunks_test.go
+++ b/tsdb/chunks/head_chunks_test.go
@@ -104,7 +104,8 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) {
}
// Checking on-disk bytes for the first file.
- assert.True(t, len(hrw.mmappedChunkFiles) == 3 && len(hrw.closers) == 3, "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
+ assert.Equal(t, 3, len(hrw.mmappedChunkFiles), "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles))
+ assert.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers))
actualBytes, err := ioutil.ReadFile(firstFileName)
assert.NoError(t, err)
@@ -225,9 +226,9 @@ func TestChunkDiskMapper_Truncate(t *testing.T) {
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool())
assert.NoError(t, err)
- assert.True(t, !hrw.fileMaxtSet, "")
+ assert.False(t, hrw.fileMaxtSet)
assert.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
- assert.True(t, hrw.fileMaxtSet, "")
+ assert.True(t, hrw.fileMaxtSet)
verifyFiles([]int{3, 4, 5, 6, 7, 8})
// New file is created after restart even if last file was empty.
@@ -395,14 +396,14 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
// Open chunk disk mapper again, corrupt file should be removed.
hrw, err = NewChunkDiskMapper(dir, chunkenc.NewPool())
assert.NoError(t, err)
- assert.True(t, !hrw.fileMaxtSet, "")
+ assert.False(t, hrw.fileMaxtSet)
assert.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
- assert.True(t, hrw.fileMaxtSet, "")
+ assert.True(t, hrw.fileMaxtSet)
// Removed from memory.
assert.Equal(t, 3, len(hrw.mmappedChunkFiles))
for idx := range hrw.mmappedChunkFiles {
- assert.True(t, idx <= lastFile, "file index is bigger than previous last file")
+ assert.LessOrEqual(t, idx, lastFile, "file index is bigger than previous last file")
}
// Removed even from disk.
@@ -412,7 +413,7 @@ func TestHeadReadWriter_ReadRepairOnEmptyLastFile(t *testing.T) {
for _, fi := range files {
seq, err := strconv.ParseUint(fi.Name(), 10, 64)
assert.NoError(t, err)
- assert.True(t, seq <= uint64(lastFile), "file index on disk is bigger than previous last file")
+ assert.LessOrEqual(t, seq, uint64(lastFile), "file index on disk is bigger than previous last file")
}
}
@@ -426,9 +427,9 @@ func testChunkDiskMapper(t *testing.T) *ChunkDiskMapper {
hrw, err := NewChunkDiskMapper(tmpdir, chunkenc.NewPool())
assert.NoError(t, err)
- assert.True(t, !hrw.fileMaxtSet, "")
+ assert.False(t, hrw.fileMaxtSet)
assert.NoError(t, hrw.IterateAllChunks(func(_, _ uint64, _, _ int64, _ uint16) error { return nil }))
- assert.True(t, hrw.fileMaxtSet, "")
+ assert.True(t, hrw.fileMaxtSet)
return hrw
}
diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go
index 25d492b01..bef42eefa 100644
--- a/tsdb/compact_test.go
+++ b/tsdb/compact_test.go
@@ -1150,7 +1150,7 @@ func TestDisableAutoCompactions(t *testing.T) {
time.Sleep(10 * time.Millisecond)
}
- assert.True(t, prom_testutil.ToFloat64(db.metrics.compactionsSkipped) > 0.0, "No compaction was skipped after the set timeout.")
+ assert.Greater(t, prom_testutil.ToFloat64(db.metrics.compactionsSkipped), 0.0, "No compaction was skipped after the set timeout.")
assert.Equal(t, 0, len(db.blocks))
// Enable the compaction, trigger it and check that the block is persisted.
@@ -1165,7 +1165,7 @@ func TestDisableAutoCompactions(t *testing.T) {
}
time.Sleep(100 * time.Millisecond)
}
- assert.True(t, len(db.Blocks()) > 0, "No block was persisted after the set timeout.")
+ assert.Greater(t, len(db.Blocks()), 0, "No block was persisted after the set timeout.")
}
// TestCancelCompactions ensures that when the db is closed
diff --git a/tsdb/db_test.go b/tsdb/db_test.go
index 7e6a93fde..2e0a1f58c 100644
--- a/tsdb/db_test.go
+++ b/tsdb/db_test.go
@@ -304,7 +304,7 @@ func TestDBAppenderAddRef(t *testing.T) {
ref2, err := app2.Add(labels.FromStrings("a", "b"), 133, 1)
assert.NoError(t, err)
- assert.True(t, ref1 == ref2, "")
+ assert.Equal(t, ref1, ref2)
// Reference must be valid to add another sample.
err = app2.AddFast(ref2, 143, 2)
@@ -719,7 +719,7 @@ Outer:
})
if len(expSamples) == 0 {
- assert.True(t, res.Next() == false, "")
+ assert.False(t, res.Next())
continue
}
@@ -948,7 +948,7 @@ func TestWALSegmentSizeOptions(t *testing.T) {
assert.Equal(t, int64(DefaultOptions().WALSegmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name())
}
lastFile := files[len(files)-1]
- assert.True(t, int64(DefaultOptions().WALSegmentSize) > lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
+ assert.Greater(t, int64(DefaultOptions().WALSegmentSize), lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
},
// Custom Wal Size.
2 * 32 * 1024: func(dbDir string, segmentSize int) {
@@ -960,13 +960,13 @@ func TestWALSegmentSizeOptions(t *testing.T) {
files = append(files, f)
}
}
- assert.True(t, len(files) > 1, "current WALSegmentSize should result in more than a single WAL file.")
+ assert.Greater(t, len(files), 1, "current WALSegmentSize should result in more than a single WAL file.")
// All the full segment files (all but the last) should match the segment size option.
for _, f := range files[:len(files)-1] {
assert.Equal(t, int64(segmentSize), f.Size(), "WAL file size doesn't match WALSegmentSize option, filename: %v", f.Name())
}
lastFile := files[len(files)-1]
- assert.True(t, int64(segmentSize) > lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
+ assert.Greater(t, int64(segmentSize), lastFile.Size(), "last WAL file size is not smaller than the WALSegmentSize option, filename: %v", lastFile.Name())
},
// Wal disabled.
-1: func(dbDir string, segmentSize int) {
@@ -1069,7 +1069,7 @@ func TestTombstoneClean(t *testing.T) {
})
if len(expSamples) == 0 {
- assert.True(t, res.Next() == false, "")
+ assert.False(t, res.Next())
continue
}
@@ -1295,7 +1295,7 @@ func TestSizeRetention(t *testing.T) {
assert.Equal(t, 1, actRetentionCount, "metric retention count mismatch")
assert.Equal(t, actSize, expSize, "metric db size doesn't match actual disk size")
- assert.True(t, expSize <= sizeLimit, "actual size (%v) is expected to be less than or equal to limit (%v)", expSize, sizeLimit)
+ assert.LessOrEqual(t, expSize, sizeLimit, "actual size (%v) is expected to be less than or equal to limit (%v)", expSize, sizeLimit)
assert.Equal(t, len(blocks)-1, len(actBlocks), "new block count should be decreased from:%v to:%v", len(blocks), len(blocks)-1)
assert.Equal(t, expBlocks[0].MaxTime, actBlocks[0].meta.MaxTime, "maxT mismatch of the first block")
assert.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime, "maxT mismatch of the last block")
@@ -1425,7 +1425,7 @@ func TestOverlappingBlocksDetectsAllOverlaps(t *testing.T) {
metas[i] = BlockMeta{MinTime: int64(i * 10), MaxTime: int64((i + 1) * 10)}
}
- assert.True(t, len(OverlappingBlocks(metas)) == 0, "we found unexpected overlaps")
+ assert.Equal(t, 0, len(OverlappingBlocks(metas)), "we found unexpected overlaps")
// Add overlapping blocks. We've to establish order again since we aren't interested
// in trivial overlaps caused by unorderedness.
@@ -1563,7 +1563,7 @@ func TestChunkAtBlockBoundary(t *testing.T) {
chunkCount++
}
}
- assert.True(t, chunkCount == 1, "expected 1 chunk in block %s, got %d", meta.ULID, chunkCount)
+ assert.Equal(t, 1, chunkCount, "expected 1 chunk in block %s, got %d", meta.ULID, chunkCount)
}
}
@@ -1592,7 +1592,7 @@ func TestQuerierWithBoundaryChunks(t *testing.T) {
err = db.Compact()
assert.NoError(t, err)
- assert.True(t, len(db.blocks) >= 3, "invalid test, less than three blocks in DB")
+ assert.GreaterOrEqual(t, len(db.blocks), 3, "invalid test, less than three blocks in DB")
q, err := db.Querier(context.TODO(), blockRange, 2*blockRange)
assert.NoError(t, err)
@@ -1764,7 +1764,7 @@ func TestNoEmptyBlocks(t *testing.T) {
app = db.Appender(ctx)
_, err = app.Add(defaultLabel, 1, 0)
- assert.True(t, err == storage.ErrOutOfBounds, "the head should be truncated so no samples in the past should be allowed")
+ assert.Equal(t, storage.ErrOutOfBounds, err, "the head should be truncated so no samples in the past should be allowed")
// Adding new blocks.
currentTime := db.Head().MaxTime()
@@ -1781,7 +1781,7 @@ func TestNoEmptyBlocks(t *testing.T) {
actBlocks, err = blockDirs(db.Dir())
assert.NoError(t, err)
assert.Equal(t, len(db.Blocks()), len(actBlocks))
- assert.True(t, len(actBlocks) == 1, "No blocks created when compacting with >0 samples")
+ assert.Equal(t, 1, len(actBlocks), "No blocks created when compacting with >0 samples")
})
t.Run(`When no new block is created from head, and there are some blocks on disk
@@ -2120,7 +2120,7 @@ func TestDBReadOnly(t *testing.T) {
expBlocks = dbWritable.Blocks()
expDbSize, err := fileutil.DirSize(dbWritable.Dir())
assert.NoError(t, err)
- assert.True(t, expDbSize > dbSizeBeforeAppend, "db size didn't increase after an append")
+ assert.Greater(t, expDbSize, dbSizeBeforeAppend, "db size didn't increase after an append")
q, err := dbWritable.Querier(context.TODO(), math.MinInt64, math.MaxInt64)
assert.NoError(t, err)
@@ -2559,7 +2559,7 @@ func TestChunkWriter_ReadAfterWrite(t *testing.T) {
for i, f := range files {
size := int(f.Size())
// Verify that the segment is the same or smaller than the expected size.
- assert.True(t, chunks.SegmentHeaderSize+test.expSegmentSizes[i] >= size, "Segment:%v should NOT be bigger than:%v actual:%v", i, chunks.SegmentHeaderSize+test.expSegmentSizes[i], size)
+ assert.GreaterOrEqual(t, chunks.SegmentHeaderSize+test.expSegmentSizes[i], size, "Segment:%v should NOT be bigger than:%v actual:%v", i, chunks.SegmentHeaderSize+test.expSegmentSizes[i], size)
sizeAct += size
}
diff --git a/tsdb/head_test.go b/tsdb/head_test.go
index 42908f46f..4507973dc 100644
--- a/tsdb/head_test.go
+++ b/tsdb/head_test.go
@@ -296,7 +296,7 @@ func TestHead_WALMultiRef(t *testing.T) {
assert.NoError(t, app.Commit())
assert.Equal(t, 4.0, prom_testutil.ToFloat64(head.metrics.chunksCreated))
- assert.True(t, ref1 != ref2, "Refs are the same")
+ assert.NotEqual(t, ref1, ref2, "Refs are the same")
assert.NoError(t, head.Close())
w, err = wal.New(nil, nil, w.Dir(), false)
@@ -370,8 +370,8 @@ func TestHead_Truncate(t *testing.T) {
{minTime: 3000, maxTime: 3999},
}, h.series.getByID(s2.ref).mmappedChunks)
- assert.True(t, h.series.getByID(s3.ref) == nil, "")
- assert.True(t, h.series.getByID(s4.ref) == nil, "")
+ assert.Nil(t, h.series.getByID(s3.ref))
+ assert.Nil(t, h.series.getByID(s4.ref))
postingsA1, _ := index.ExpandPostings(h.postings.Get("a", "1"))
postingsA2, _ := index.ExpandPostings(h.postings.Get("a", "2"))
@@ -384,8 +384,8 @@ func TestHead_Truncate(t *testing.T) {
assert.Equal(t, []uint64{s2.ref}, postingsA2)
assert.Equal(t, []uint64{s1.ref, s2.ref}, postingsB1)
assert.Equal(t, []uint64{s1.ref, s2.ref}, postingsAll)
- assert.True(t, postingsB2 == nil, "")
- assert.True(t, postingsC1 == nil, "")
+ assert.Nil(t, postingsB2)
+ assert.Nil(t, postingsC1)
assert.Equal(t, map[string]struct{}{
"": {}, // from 'all' postings list
@@ -437,7 +437,7 @@ func TestMemSeries_truncateChunks(t *testing.T) {
for i := 0; i < 4000; i += 5 {
ok, _ := s.append(int64(i), float64(i), 0, chunkDiskMapper)
- assert.True(t, ok == true, "sample append failed")
+ assert.True(t, ok, "sample append failed")
}
// Check that truncate removes half of the chunks and afterwards
@@ -456,7 +456,7 @@ func TestMemSeries_truncateChunks(t *testing.T) {
assert.Equal(t, int64(2000), s.mmappedChunks[0].minTime)
_, _, err = s.chunk(0, chunkDiskMapper)
- assert.True(t, err == storage.ErrNotFound, "first chunks not gone")
+ assert.Equal(t, storage.ErrNotFound, err, "first chunks not gone")
assert.Equal(t, countBefore/2, len(s.mmappedChunks)+1) // +1 for the head chunk.
chk, _, err = s.chunk(lastID, chunkDiskMapper)
assert.NoError(t, err)
@@ -466,11 +466,11 @@ func TestMemSeries_truncateChunks(t *testing.T) {
// after truncation.
it1 := s.iterator(s.chunkID(len(s.mmappedChunks)), nil, chunkDiskMapper, nil)
_, ok := it1.(*memSafeIterator)
- assert.True(t, ok == true, "")
+ assert.True(t, ok)
it2 := s.iterator(s.chunkID(len(s.mmappedChunks)-1), nil, chunkDiskMapper, nil)
_, ok = it2.(*memSafeIterator)
- assert.True(t, ok == false, "non-last chunk incorrectly wrapped with sample buffer")
+ assert.False(t, ok, "non-last chunk incorrectly wrapped with sample buffer")
}
func TestHeadDeleteSeriesWithoutSamples(t *testing.T) {
@@ -656,7 +656,7 @@ func TestDeleteUntilCurMax(t *testing.T) {
assert.True(t, res.Next(), "series is not present")
s := res.At()
it := s.Iterator()
- assert.True(t, !it.Next(), "expected no samples")
+ assert.False(t, it.Next(), "expected no samples")
for res.Next() {
}
assert.NoError(t, res.Err())
@@ -980,7 +980,7 @@ func TestMemSeries_append(t *testing.T) {
ok, chunkCreated = s.append(999, 2, 0, chunkDiskMapper)
assert.True(t, ok, "append failed")
- assert.True(t, !chunkCreated, "second sample should use same chunk")
+ assert.False(t, chunkCreated, "second sample should use same chunk")
ok, chunkCreated = s.append(1000, 3, 0, chunkDiskMapper)
assert.True(t, ok, "append failed")
@@ -988,11 +988,13 @@ func TestMemSeries_append(t *testing.T) {
ok, chunkCreated = s.append(1001, 4, 0, chunkDiskMapper)
assert.True(t, ok, "append failed")
- assert.True(t, !chunkCreated, "second sample should use same chunk")
+ assert.False(t, chunkCreated, "second sample should use same chunk")
- assert.True(t, len(s.mmappedChunks) == 1, "there should be only 1 mmapped chunk")
- assert.True(t, s.mmappedChunks[0].minTime == 998 && s.mmappedChunks[0].maxTime == 999, "wrong chunk range")
- assert.True(t, s.headChunk.minTime == 1000 && s.headChunk.maxTime == 1001, "wrong chunk range")
+ assert.Equal(t, 1, len(s.mmappedChunks), "there should be only 1 mmapped chunk")
+ assert.Equal(t, int64(998), s.mmappedChunks[0].minTime, "wrong chunk range")
+ assert.Equal(t, int64(999), s.mmappedChunks[0].maxTime, "wrong chunk range")
+ assert.Equal(t, int64(1000), s.headChunk.minTime, "wrong chunk range")
+ assert.Equal(t, int64(1001), s.headChunk.maxTime, "wrong chunk range")
// Fill the range [1000,2000) with many samples. Intermediate chunks should be cut
// at approximately 120 samples per chunk.
@@ -1001,13 +1003,13 @@ func TestMemSeries_append(t *testing.T) {
assert.True(t, ok, "append failed")
}
- assert.True(t, len(s.mmappedChunks)+1 > 7, "expected intermediate chunks")
+ assert.Greater(t, len(s.mmappedChunks)+1, 7, "expected intermediate chunks")
// All chunks but the first and last should now be moderately full.
for i, c := range s.mmappedChunks[1:] {
chk, err := chunkDiskMapper.Chunk(c.ref)
assert.NoError(t, err)
- assert.True(t, chk.NumSamples() > 100, "unexpected small chunk %d of length %d", i, chk.NumSamples())
+ assert.Greater(t, chk.NumSamples(), 100, "unexpected small chunk %d of length %d", i, chk.NumSamples())
}
}
@@ -1028,7 +1030,7 @@ func TestGCChunkAccess(t *testing.T) {
assert.True(t, chunkCreated, "chunks was not created")
ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper)
assert.True(t, ok, "series append failed")
- assert.True(t, !chunkCreated, "chunks was created")
+ assert.False(t, chunkCreated, "chunks was created")
// A new chunks should be created here as it's beyond the chunk range.
ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper)
@@ -1036,7 +1038,7 @@ func TestGCChunkAccess(t *testing.T) {
assert.True(t, chunkCreated, "chunks was not created")
ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper)
assert.True(t, ok, "series append failed")
- assert.True(t, !chunkCreated, "chunks was created")
+ assert.False(t, chunkCreated, "chunks was created")
idx := h.indexRange(0, 1500)
var (
@@ -1082,7 +1084,7 @@ func TestGCSeriesAccess(t *testing.T) {
assert.True(t, chunkCreated, "chunks was not created")
ok, chunkCreated = s.append(999, 999, 0, h.chunkDiskMapper)
assert.True(t, ok, "series append failed")
- assert.True(t, !chunkCreated, "chunks was created")
+ assert.False(t, chunkCreated, "chunks was created")
// A new chunks should be created here as it's beyond the chunk range.
ok, chunkCreated = s.append(1000, 1000, 0, h.chunkDiskMapper)
@@ -1090,7 +1092,7 @@ func TestGCSeriesAccess(t *testing.T) {
assert.True(t, chunkCreated, "chunks was not created")
ok, chunkCreated = s.append(1999, 1999, 0, h.chunkDiskMapper)
assert.True(t, ok, "series append failed")
- assert.True(t, !chunkCreated, "chunks was created")
+ assert.False(t, chunkCreated, "chunks was created")
idx := h.indexRange(0, 2000)
var (
@@ -1135,7 +1137,7 @@ func TestUncommittedSamplesNotLostOnTruncate(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, h.Truncate(2000))
- assert.True(t, nil != h.series.getByHash(lset.Hash(), lset), "series should not have been garbage collected")
+ assert.NotNil(t, h.series.getByHash(lset.Hash(), lset), "series should not have been garbage collected")
assert.NoError(t, app.Commit())
@@ -1165,7 +1167,7 @@ func TestRemoveSeriesAfterRollbackAndTruncate(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, h.Truncate(2000))
- assert.True(t, nil != h.series.getByHash(lset.Hash(), lset), "series should not have been garbage collected")
+ assert.NotNil(t, h.series.getByHash(lset.Hash(), lset), "series should not have been garbage collected")
assert.NoError(t, app.Rollback())
@@ -1332,7 +1334,7 @@ func TestHeadReadWriterRepair(t *testing.T) {
assert.True(t, chunkCreated, "chunk was not created")
ok, chunkCreated = s.append(int64(i*chunkRange)+chunkRange-1, float64(i*chunkRange), 0, h.chunkDiskMapper)
assert.True(t, ok, "series append failed")
- assert.True(t, !chunkCreated, "chunk was created")
+ assert.False(t, chunkCreated, "chunk was created")
assert.NoError(t, h.chunkDiskMapper.CutNewFile())
}
assert.NoError(t, h.Close())
@@ -1731,7 +1733,7 @@ func TestOutOfOrderSamplesMetric(t *testing.T) {
assert.Equal(t, int64(math.MinInt64), db.head.minValidTime.Load())
assert.NoError(t, db.Compact())
- assert.True(t, db.head.minValidTime.Load() > 0, "")
+ assert.Greater(t, db.head.minValidTime.Load(), int64(0))
app = db.Appender(ctx)
_, err = app.Add(labels.FromStrings("a", "b"), db.head.minValidTime.Load()-2, 99)
diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go
index 3cf187d3a..9b7aceb14 100644
--- a/tsdb/index/index_test.go
+++ b/tsdb/index/index_test.go
@@ -437,7 +437,7 @@ func TestPersistence_index_e2e(t *testing.T) {
var chks, expchks []chunks.Meta
for gotp.Next() {
- assert.True(t, expp.Next() == true, "")
+ assert.True(t, expp.Next())
ref := gotp.At()
@@ -449,7 +449,7 @@ func TestPersistence_index_e2e(t *testing.T) {
assert.Equal(t, explset, lset)
assert.Equal(t, expchks, chks)
}
- assert.True(t, expp.Next() == false, "Expected no more postings for %q=%q", p.Name, p.Value)
+ assert.False(t, expp.Next(), "Expected no more postings for %q=%q", p.Name, p.Value)
assert.NoError(t, gotp.Err())
}
diff --git a/tsdb/index/postings_test.go b/tsdb/index/postings_test.go
index efcb4a255..f1c29b79c 100644
--- a/tsdb/index/postings_test.go
+++ b/tsdb/index/postings_test.go
@@ -557,7 +557,7 @@ func TestRemovedNextStackoverflow(t *testing.T) {
}
assert.NoError(t, rp.Err())
- assert.True(t, !gotElem, "")
+ assert.False(t, gotElem)
}
func TestRemovedPostingsSeek(t *testing.T) {
@@ -664,12 +664,12 @@ func TestBigEndian(t *testing.T) {
t.Run("Iteration", func(t *testing.T) {
bep := newBigEndianPostings(beLst)
for i := 0; i < num; i++ {
- assert.True(t, bep.Next() == true, "")
+ assert.True(t, bep.Next())
assert.Equal(t, uint64(ls[i]), bep.At())
}
- assert.True(t, bep.Next() == false, "")
- assert.True(t, bep.Err() == nil, "")
+ assert.False(t, bep.Next())
+ assert.NoError(t, bep.Err())
})
t.Run("Seek", func(t *testing.T) {
@@ -715,7 +715,7 @@ func TestBigEndian(t *testing.T) {
for _, v := range table {
assert.Equal(t, v.found, bep.Seek(uint64(v.seek)))
assert.Equal(t, uint64(v.val), bep.At())
- assert.True(t, bep.Err() == nil, "")
+ assert.NoError(t, bep.Err())
}
})
}
@@ -872,5 +872,5 @@ func TestMemPostings_Delete(t *testing.T) {
deleted := p.Get("lbl1", "b")
expanded, err = ExpandPostings(deleted)
assert.NoError(t, err)
- assert.True(t, 0 == len(expanded), "expected empty postings, got %v", expanded)
+ assert.Equal(t, 0, len(expanded), "expected empty postings, got %v", expanded)
}
diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go
index 71d8ec317..0280138e7 100644
--- a/tsdb/querier_test.go
+++ b/tsdb/querier_test.go
@@ -860,9 +860,9 @@ func TestPopulateWithDelSeriesIterator_DoubleSeek(t *testing.T) {
)
it := newPopulateWithDelGenericSeriesIterator(f, chkMetas, nil).toSeriesIterator()
- assert.True(t, it.Seek(1), "")
- assert.True(t, it.Seek(2), "")
- assert.True(t, it.Seek(2), "")
+ assert.True(t, it.Seek(1))
+ assert.True(t, it.Seek(2))
+ assert.True(t, it.Seek(2))
ts, v := it.At()
assert.Equal(t, int64(2), ts)
assert.Equal(t, float64(2), v)
@@ -878,12 +878,12 @@ func TestPopulateWithDelSeriesIterator_SeekInCurrentChunk(t *testing.T) {
)
it := newPopulateWithDelGenericSeriesIterator(f, chkMetas, nil).toSeriesIterator()
- assert.True(t, it.Next(), "")
+ assert.True(t, it.Next())
ts, v := it.At()
assert.Equal(t, int64(1), ts)
assert.Equal(t, float64(2), v)
- assert.True(t, it.Seek(4), "")
+ assert.True(t, it.Seek(4))
ts, v = it.At()
assert.Equal(t, int64(5), ts)
assert.Equal(t, float64(6), v)
@@ -1018,7 +1018,7 @@ func TestDeletedIterator(t *testing.T) {
}
}
- assert.True(t, i < 1000, "")
+ assert.Less(t, i, int64(1000))
ts, v := it.At()
assert.Equal(t, act[i].t, ts)
@@ -1033,7 +1033,7 @@ func TestDeletedIterator(t *testing.T) {
}
}
- assert.True(t, i >= 1000, "")
+ assert.GreaterOrEqual(t, i, int64(1000))
assert.NoError(t, it.Err())
}
}
diff --git a/tsdb/repair_test.go b/tsdb/repair_test.go
index d73fe5007..d5cdb8939 100644
--- a/tsdb/repair_test.go
+++ b/tsdb/repair_test.go
@@ -123,5 +123,5 @@ func TestRepairBadIndexVersion(t *testing.T) {
meta, _, err := readMetaFile(tmpDbDir)
assert.NoError(t, err)
- assert.True(t, meta.Version == metaVersion1, "unexpected meta version %d", meta.Version)
+ assert.Equal(t, metaVersion1, meta.Version, "unexpected meta version %d", meta.Version)
}
diff --git a/tsdb/tsdbutil/buffer_test.go b/tsdb/tsdbutil/buffer_test.go
index 3c916e012..f54757089 100644
--- a/tsdb/tsdbutil/buffer_test.go
+++ b/tsdb/tsdbutil/buffer_test.go
@@ -111,29 +111,29 @@ func TestBufferedSeriesIterator(t *testing.T) {
{t: 101, v: 10},
}), 2)
- assert.True(t, it.Seek(-123) == true, "seek failed")
+ assert.True(t, it.Seek(-123), "seek failed")
sampleEq(1, 2)
bufferEq(nil)
- assert.True(t, it.Next() == true, "next failed")
+ assert.True(t, it.Next(), "next failed")
sampleEq(2, 3)
bufferEq([]sample{{t: 1, v: 2}})
- assert.True(t, it.Next() == true, "next failed")
- assert.True(t, it.Next() == true, "next failed")
- assert.True(t, it.Next() == true, "next failed")
+ assert.True(t, it.Next(), "next failed")
+ assert.True(t, it.Next(), "next failed")
+ assert.True(t, it.Next(), "next failed")
sampleEq(5, 6)
bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
- assert.True(t, it.Seek(5) == true, "seek failed")
+ assert.True(t, it.Seek(5), "seek failed")
sampleEq(5, 6)
bufferEq([]sample{{t: 2, v: 3}, {t: 3, v: 4}, {t: 4, v: 5}})
- assert.True(t, it.Seek(101) == true, "seek failed")
+ assert.True(t, it.Seek(101), "seek failed")
sampleEq(101, 10)
bufferEq([]sample{{t: 99, v: 8}, {t: 100, v: 9}})
- assert.True(t, it.Next() == false, "next succeeded unexpectedly")
+ assert.False(t, it.Next(), "next succeeded unexpectedly")
}
type listSeriesIterator struct {
diff --git a/tsdb/wal/checkpoint_test.go b/tsdb/wal/checkpoint_test.go
index 21c47a102..7b3692392 100644
--- a/tsdb/wal/checkpoint_test.go
+++ b/tsdb/wal/checkpoint_test.go
@@ -213,7 +213,7 @@ func TestCheckpoint(t *testing.T) {
samples, err := dec.Samples(rec, nil)
assert.NoError(t, err)
for _, s := range samples {
- assert.True(t, s.T >= last/2, "sample with wrong timestamp")
+ assert.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp")
}
}
}
diff --git a/tsdb/wal/reader_test.go b/tsdb/wal/reader_test.go
index 3f8d4eae7..340d9a3be 100644
--- a/tsdb/wal/reader_test.go
+++ b/tsdb/wal/reader_test.go
@@ -221,7 +221,7 @@ func TestReader_Live(t *testing.T) {
reader := NewLiveReader(logger, NewLiveReaderMetrics(nil), readFd)
for _, exp := range testReaderCases[i].exp {
for !reader.Next() {
- assert.True(t, reader.Err() == io.EOF, "expect EOF, got: %v", reader.Err())
+ assert.Equal(t, io.EOF, reader.Err(), "expect EOF, got: %v", reader.Err())
runtime.Gosched()
}
@@ -229,7 +229,7 @@ func TestReader_Live(t *testing.T) {
assert.Equal(t, exp, actual, "read wrong record")
}
- assert.True(t, !reader.Next(), "unexpected record")
+ assert.False(t, reader.Next(), "unexpected record")
if testReaderCases[i].fail {
assert.Error(t, reader.Err())
}
@@ -341,7 +341,7 @@ func TestReaderFuzz(t *testing.T) {
assert.True(t, reader.Next(), "expected record: %v", reader.Err())
assert.Equal(t, expected, reader.Record(), "read wrong record")
}
- assert.True(t, !reader.Next(), "unexpected record")
+ assert.False(t, reader.Next(), "unexpected record")
})
}
}
@@ -391,7 +391,7 @@ func TestReaderFuzz_Live(t *testing.T) {
assert.True(t, ok, "unexpected record")
assert.Equal(t, expected, rec, "record does not match expected")
}
- assert.True(t, r.Err() == io.EOF, "expected EOF, got: %v", r.Err())
+ assert.Equal(t, io.EOF, r.Err(), "expected EOF, got: %v", r.Err())
return true
}
@@ -411,7 +411,7 @@ func TestReaderFuzz_Live(t *testing.T) {
fi, err := os.Stat(SegmentName(dir, seg.i))
assert.NoError(t, err)
- assert.True(t, r.Offset() == fi.Size(), "expected to have read whole segment, but read %d of %d", r.Offset(), fi.Size())
+ assert.Equal(t, r.Offset(), fi.Size(), "expected to have read whole segment, but read %d of %d", r.Offset(), fi.Size())
seg, err = OpenReadSegment(SegmentName(dir, seg.i+1))
assert.NoError(t, err)
@@ -427,7 +427,7 @@ func TestReaderFuzz_Live(t *testing.T) {
}
}
- assert.True(t, r.Err() == io.EOF, "expected EOF")
+ assert.Equal(t, io.EOF, r.Err(), "expected EOF")
})
}
}
@@ -473,8 +473,8 @@ func TestLiveReaderCorrupt_ShortFile(t *testing.T) {
defer seg.Close()
r := NewLiveReader(logger, nil, seg)
- assert.True(t, r.Next() == false, "expected no records")
- assert.True(t, r.Err() == io.EOF, "expected error, got: %v", r.Err())
+ assert.False(t, r.Next(), "expected no records")
+ assert.Equal(t, io.EOF, r.Err(), "expected error, got: %v", r.Err())
}
func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) {
@@ -521,8 +521,8 @@ func TestLiveReaderCorrupt_RecordTooLongAndShort(t *testing.T) {
defer seg.Close()
r := NewLiveReader(logger, NewLiveReaderMetrics(nil), seg)
- assert.True(t, r.Next() == false, "expected no records")
- assert.True(t, r.Err().Error() == "record length greater than a single page: 65542 > 32768", "expected error, got: %v", r.Err())
+ assert.False(t, r.Next(), "expected no records")
+ assert.EqualError(t, r.Err(), "record length greater than a single page: 65542 > 32768", "expected error, got: %v", r.Err())
}
func TestReaderData(t *testing.T) {
diff --git a/tsdb/wal/wal_test.go b/tsdb/wal/wal_test.go
index c6d640812..025b2f72c 100644
--- a/tsdb/wal/wal_test.go
+++ b/tsdb/wal/wal_test.go
@@ -294,7 +294,7 @@ func TestCorruptAndCarryOn(t *testing.T) {
assert.Equal(t, recordSize, len(reader.Record()))
}
assert.Equal(t, 4, i, "not enough records")
- assert.True(t, !reader.Next(), "unexpected record")
+ assert.False(t, reader.Next(), "unexpected record")
corruptionErr := reader.Err()
assert.Error(t, corruptionErr)
@@ -336,7 +336,7 @@ func TestCorruptAndCarryOn(t *testing.T) {
assert.Equal(t, recordSize, len(reader.Record()))
}
assert.Equal(t, 9, i, "wrong number of records")
- assert.True(t, !reader.Next(), "unexpected record")
+ assert.False(t, reader.Next(), "unexpected record")
assert.Equal(t, nil, reader.Err())
sr.Close()
}
@@ -380,7 +380,7 @@ func TestSegmentMetric(t *testing.T) {
err = w.Log(buf)
assert.NoError(t, err)
}
- assert.True(t, client_testutil.ToFloat64(w.metrics.currentSegment) == initialSegment+1, "segment metric did not increment after segment rotation")
+ assert.Equal(t, initialSegment+1, client_testutil.ToFloat64(w.metrics.currentSegment), "segment metric did not increment after segment rotation")
assert.NoError(t, w.Close())
}
@@ -421,7 +421,7 @@ func TestCompression(t *testing.T) {
compressedSize, err := fileutil.DirSize(dirCompressed)
assert.NoError(t, err)
- assert.True(t, float64(uncompressedSize)*0.75 > float64(compressedSize), "Compressing zeroes should save at least 25%% space - uncompressedSize: %d, compressedSize: %d", uncompressedSize, compressedSize)
+ assert.Greater(t, float64(uncompressedSize)*0.75, float64(compressedSize), "Compressing zeroes should save at least 25%% space - uncompressedSize: %d, compressedSize: %d", uncompressedSize, compressedSize)
}
func BenchmarkWAL_LogBatched(b *testing.B) {
diff --git a/web/web_test.go b/web/web_test.go
index 6f559382e..964ac3992 100644
--- a/web/web_test.go
+++ b/web/web_test.go
@@ -478,7 +478,7 @@ func cleanupSnapshot(t *testing.T, dbDir string, resp *http.Response) {
b, err := ioutil.ReadAll(resp.Body)
assert.NoError(t, err)
assert.NoError(t, json.Unmarshal(b, snapshot))
- assert.True(t, snapshot.Data.Name != "", "snapshot directory not returned")
+ assert.NotZero(t, snapshot.Data.Name, "snapshot directory not returned")
assert.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots", snapshot.Data.Name)))
assert.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots")))
}