Browse Source

enable errorf rule from perfsprint linter

Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
pull/12826/head
Matthieu MOREL 3 weeks ago committed by Ayoub Mrini
parent
commit
af1a19fc78
  1. 2
      .golangci.yml
  2. 8
      cmd/promtool/analyze.go
  3. 2
      cmd/promtool/backfill.go
  4. 2
      cmd/promtool/main.go
  5. 4
      cmd/promtool/tsdb.go
  6. 4
      config/config.go
  7. 2
      discovery/aws/ec2.go
  8. 2
      discovery/aws/lightsail.go
  9. 2
      discovery/azure/azure.go
  10. 2
      discovery/consul/consul.go
  11. 3
      discovery/digitalocean/digitalocean.go
  12. 2
      discovery/dns/dns.go
  13. 4
      discovery/dns/dns_test.go
  14. 3
      discovery/eureka/eureka.go
  15. 2
      discovery/file/file.go
  16. 2
      discovery/gce/gce.go
  17. 2
      discovery/hetzner/hetzner.go
  18. 8
      discovery/http/http.go
  19. 3
      discovery/ionos/ionos.go
  20. 22
      discovery/kubernetes/kubernetes.go
  21. 2
      discovery/linode/linode.go
  22. 7
      discovery/manager_test.go
  23. 2
      discovery/marathon/marathon.go
  24. 5
      discovery/moby/docker.go
  25. 7
      discovery/moby/dockerswarm.go
  26. 2
      discovery/nomad/nomad.go
  27. 2
      discovery/openstack/openstack.go
  28. 2
      discovery/ovhcloud/ovhcloud.go
  29. 11
      discovery/puppetdb/puppetdb.go
  30. 4
      discovery/refresh/refresh_test.go
  31. 4
      discovery/registry.go
  32. 2
      discovery/scaleway/scaleway.go
  33. 2
      discovery/triton/triton.go
  34. 2
      discovery/uyuni/uyuni.go
  35. 3
      discovery/vultr/vultr.go
  36. 3
      discovery/xds/kuma.go
  37. 15
      model/histogram/float_histogram.go
  38. 11
      model/histogram/histogram.go
  39. 5
      model/relabel/relabel.go
  40. 12
      model/rulefmt/rulefmt.go
  41. 2
      model/textparse/promparse.go
  42. 6
      promql/engine.go
  43. 2
      promql/info.go
  44. 2
      promql/promqltest/test.go
  45. 3
      rules/alerting.go
  46. 3
      rules/recording.go
  47. 5
      scrape/manager_test.go
  48. 6
      scrape/scrape_test.go
  49. 10
      storage/interface.go
  50. 27
      storage/remote/azuread/azuread.go
  51. 2
      storage/remote/queue_manager_test.go
  52. 4
      storage/remote/write_handler_test.go
  53. 2
      tsdb/chunkenc/chunk_test.go
  54. 7
      tsdb/chunkenc/float_histogram.go
  55. 7
      tsdb/chunkenc/histogram.go
  56. 7
      tsdb/chunks/chunks.go
  57. 2
      tsdb/compact.go
  58. 4
      tsdb/db.go
  59. 5
      tsdb/db_test.go
  60. 2
      tsdb/ooo_head_read_test.go
  61. 2
      tsdb/querier_test.go
  62. 3
      tsdb/tsdbblockutil.go
  63. 4
      web/api/v1/api.go
  64. 12
      web/api/v1/api_test.go

2
.golangci.yml

@ -109,7 +109,7 @@ linters-settings:
extra-rules: true extra-rules: true
perfsprint: perfsprint:
# Optimizes `fmt.Errorf`. # Optimizes `fmt.Errorf`.
errorf: false errorf: true
revive: revive:
# By default, revive will enable only the linting rules that are named in the configuration file. # By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly enable all required rules here. # So, it's needed to explicitly enable all required rules here.

8
cmd/promtool/analyze.go

@ -34,8 +34,8 @@ import (
) )
var ( var (
errNotNativeHistogram = fmt.Errorf("not a native histogram") errNotNativeHistogram = errors.New("not a native histogram")
errNotEnoughData = fmt.Errorf("not enough data") errNotEnoughData = errors.New("not enough data")
outputHeader = `Bucket stats for each histogram series over time outputHeader = `Bucket stats for each histogram series over time
------------------------------------------------ ------------------------------------------------
@ -169,7 +169,7 @@ func querySamples(ctx context.Context, api v1.API, query string, end time.Time)
matrix, ok := values.(model.Matrix) matrix, ok := values.(model.Matrix)
if !ok { if !ok {
return nil, fmt.Errorf("query of buckets resulted in non-Matrix") return nil, errors.New("query of buckets resulted in non-Matrix")
} }
return matrix, nil return matrix, nil
@ -259,7 +259,7 @@ func getBucketCountsAtTime(matrix model.Matrix, numBuckets, timeIdx int) ([]int,
prev := matrix[i].Values[timeIdx] prev := matrix[i].Values[timeIdx]
// Assume the results are nicely aligned. // Assume the results are nicely aligned.
if curr.Timestamp != prev.Timestamp { if curr.Timestamp != prev.Timestamp {
return counts, fmt.Errorf("matrix result is not time aligned") return counts, errors.New("matrix result is not time aligned")
} }
counts[i+1] = int(curr.Value - prev.Value) counts[i+1] = int(curr.Value - prev.Value)
} }

2
cmd/promtool/backfill.go

@ -49,7 +49,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) {
_, ts, _ := p.Series() _, ts, _ := p.Series()
if ts == nil { if ts == nil {
return 0, 0, fmt.Errorf("expected timestamp for series got none") return 0, 0, errors.New("expected timestamp for series got none")
} }
if *ts > maxt { if *ts > maxt {

2
cmd/promtool/main.go

@ -444,7 +444,7 @@ func checkExperimental(f bool) {
} }
} }
var errLint = fmt.Errorf("lint error") var errLint = errors.New("lint error")
type lintConfig struct { type lintConfig struct {
all bool all bool

4
cmd/promtool/tsdb.go

@ -662,7 +662,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes())) histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
fhchk, ok := chk.(*chunkenc.FloatHistogramChunk) fhchk, ok := chk.(*chunkenc.FloatHistogramChunk)
if !ok { if !ok {
return fmt.Errorf("chunk is not FloatHistogramChunk") return errors.New("chunk is not FloatHistogramChunk")
} }
it := fhchk.Iterator(nil) it := fhchk.Iterator(nil)
bucketCount := 0 bucketCount := 0
@ -677,7 +677,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes())) histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
hchk, ok := chk.(*chunkenc.HistogramChunk) hchk, ok := chk.(*chunkenc.HistogramChunk)
if !ok { if !ok {
return fmt.Errorf("chunk is not HistogramChunk") return errors.New("chunk is not HistogramChunk")
} }
it := hchk.Iterator(nil) it := hchk.Iterator(nil)
bucketCount := 0 bucketCount := 0

4
config/config.go

@ -1072,7 +1072,7 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
if httpClientConfigAuthEnabled && c.SigV4Config != nil { if httpClientConfigAuthEnabled && c.SigV4Config != nil {
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") return errors.New("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
} }
// Check for users putting URLs in target groups. // Check for users putting URLs in target groups.
@ -1420,7 +1420,7 @@ func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
for i, attr := range c.PromoteResourceAttributes { for i, attr := range c.PromoteResourceAttributes {
attr = strings.TrimSpace(attr) attr = strings.TrimSpace(attr)
if attr == "" { if attr == "" {
err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute")) err = errors.Join(err, errors.New("empty promoted OTel resource attribute"))
continue continue
} }
if _, exists := seen[attr]; exists { if _, exists := seen[attr]; exists {

2
discovery/aws/ec2.go

@ -161,7 +161,7 @@ type EC2Discovery struct {
func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) { func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
m, ok := metrics.(*ec2Metrics) m, ok := metrics.(*ec2Metrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

2
discovery/aws/lightsail.go

@ -134,7 +134,7 @@ type LightsailDiscovery struct {
func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) { func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
m, ok := metrics.(*lightsailMetrics) m, ok := metrics.(*lightsailMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

2
discovery/azure/azure.go

@ -186,7 +186,7 @@ type Discovery struct {
func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*azureMetrics) m, ok := metrics.(*azureMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

2
discovery/consul/consul.go

@ -189,7 +189,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*consulMetrics) m, ok := metrics.(*consulMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

3
discovery/digitalocean/digitalocean.go

@ -15,6 +15,7 @@ package digitalocean
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net" "net"
@ -114,7 +115,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*digitaloceanMetrics) m, ok := metrics.(*digitaloceanMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &Discovery{ d := &Discovery{

2
discovery/dns/dns.go

@ -121,7 +121,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*dnsMetrics) m, ok := metrics.(*dnsMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

4
discovery/dns/dns_test.go

@ -15,7 +15,7 @@ package dns
import ( import (
"context" "context"
"fmt" "errors"
"log/slog" "log/slog"
"net" "net"
"testing" "testing"
@ -53,7 +53,7 @@ func TestDNS(t *testing.T) {
Type: "A", Type: "A",
}, },
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) { lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
return nil, fmt.Errorf("some error") return nil, errors.New("some error")
}, },
expected: []*targetgroup.Group{}, expected: []*targetgroup.Group{},
}, },

3
discovery/eureka/eureka.go

@ -16,7 +16,6 @@ package eureka
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"log/slog" "log/slog"
"net" "net"
"net/http" "net/http"
@ -129,7 +128,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*eurekaMetrics) m, ok := metrics.(*eurekaMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd") rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")

2
discovery/file/file.go

@ -184,7 +184,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
fm, ok := metrics.(*fileMetrics) fm, ok := metrics.(*fileMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

2
discovery/gce/gce.go

@ -132,7 +132,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*gceMetrics) m, ok := metrics.(*gceMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &Discovery{ d := &Discovery{

2
discovery/hetzner/hetzner.go

@ -138,7 +138,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*hetznerMetrics) m, ok := metrics.(*hetznerMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
r, err := newRefresher(conf, logger) r, err := newRefresher(conf, logger)

8
discovery/http/http.go

@ -86,17 +86,17 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err return err
} }
if c.URL == "" { if c.URL == "" {
return fmt.Errorf("URL is missing") return errors.New("URL is missing")
} }
parsedURL, err := url.Parse(c.URL) parsedURL, err := url.Parse(c.URL)
if err != nil { if err != nil {
return err return err
} }
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return fmt.Errorf("URL scheme must be 'http' or 'https'") return errors.New("URL scheme must be 'http' or 'https'")
} }
if parsedURL.Host == "" { if parsedURL.Host == "" {
return fmt.Errorf("host is missing in URL") return errors.New("host is missing in URL")
} }
return c.HTTPClientConfig.Validate() return c.HTTPClientConfig.Validate()
} }
@ -118,7 +118,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*httpMetrics) m, ok := metrics.(*httpMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

3
discovery/ionos/ionos.go

@ -15,7 +15,6 @@ package ionos
import ( import (
"errors" "errors"
"fmt"
"log/slog" "log/slog"
"time" "time"
@ -46,7 +45,7 @@ type Discovery struct{}
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*ionosMetrics) m, ok := metrics.(*ionosMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if conf.ionosEndpoint == "" { if conf.ionosEndpoint == "" {

22
discovery/kubernetes/kubernetes.go

@ -173,7 +173,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err return err
} }
if c.Role == "" { if c.Role == "" {
return fmt.Errorf("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)") return errors.New("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)")
} }
err = c.HTTPClientConfig.Validate() err = c.HTTPClientConfig.Validate()
if err != nil { if err != nil {
@ -181,20 +181,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
} }
if c.APIServer.URL != nil && c.KubeConfig != "" { if c.APIServer.URL != nil && c.KubeConfig != "" {
// Api-server and kubeconfig_file are mutually exclusive // Api-server and kubeconfig_file are mutually exclusive
return fmt.Errorf("cannot use 'kubeconfig_file' and 'api_server' simultaneously") return errors.New("cannot use 'kubeconfig_file' and 'api_server' simultaneously")
} }
if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
// Kubeconfig_file and custom http config are mutually exclusive // Kubeconfig_file and custom http config are mutually exclusive
return fmt.Errorf("cannot use a custom HTTP client configuration together with 'kubeconfig_file'") return errors.New("cannot use a custom HTTP client configuration together with 'kubeconfig_file'")
} }
if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
return fmt.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly") return errors.New("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
} }
if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace { if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace {
return fmt.Errorf("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously") return errors.New("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
} }
if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace { if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace {
return fmt.Errorf("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously") return errors.New("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
} }
foundSelectorRoles := make(map[Role]struct{}) foundSelectorRoles := make(map[Role]struct{})
@ -288,7 +288,7 @@ func (d *Discovery) getNamespaces() []string {
func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
m, ok := metrics.(*kubernetesMetrics) m, ok := metrics.(*kubernetesMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if l == nil { if l == nil {
@ -672,7 +672,7 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde
indexers[nodeIndex] = func(obj interface{}) ([]string, error) { indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
pod, ok := obj.(*apiv1.Pod) pod, ok := obj.(*apiv1.Pod)
if !ok { if !ok {
return nil, fmt.Errorf("object is not a pod") return nil, errors.New("object is not a pod")
} }
return []string{pod.Spec.NodeName}, nil return []string{pod.Spec.NodeName}, nil
} }
@ -686,7 +686,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
indexers[podIndex] = func(obj interface{}) ([]string, error) { indexers[podIndex] = func(obj interface{}) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints) e, ok := obj.(*apiv1.Endpoints)
if !ok { if !ok {
return nil, fmt.Errorf("object is not endpoints") return nil, errors.New("object is not endpoints")
} }
var pods []string var pods []string
for _, target := range e.Subsets { for _, target := range e.Subsets {
@ -705,7 +705,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
indexers[nodeIndex] = func(obj interface{}) ([]string, error) { indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints) e, ok := obj.(*apiv1.Endpoints)
if !ok { if !ok {
return nil, fmt.Errorf("object is not endpoints") return nil, errors.New("object is not endpoints")
} }
var nodes []string var nodes []string
for _, target := range e.Subsets { for _, target := range e.Subsets {
@ -751,7 +751,7 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
} }
} }
default: default:
return nil, fmt.Errorf("object is not an endpointslice") return nil, errors.New("object is not an endpointslice")
} }
return nodes, nil return nodes, nil

2
discovery/linode/linode.go

@ -141,7 +141,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*linodeMetrics) m, ok := metrics.(*linodeMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &Discovery{ d := &Discovery{

7
discovery/manager_test.go

@ -15,6 +15,7 @@ package discovery
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"sort" "sort"
"strconv" "strconv"
@ -1209,9 +1210,9 @@ func TestGaugeFailedConfigs(t *testing.T) {
c := map[string]Configs{ c := map[string]Configs{
"prometheus": { "prometheus": {
errorConfig{fmt.Errorf("tests error 0")}, errorConfig{errors.New("tests error 0")},
errorConfig{fmt.Errorf("tests error 1")}, errorConfig{errors.New("tests error 1")},
errorConfig{fmt.Errorf("tests error 2")}, errorConfig{errors.New("tests error 2")},
}, },
} }
discoveryManager.ApplyConfig(c) discoveryManager.ApplyConfig(c)

2
discovery/marathon/marathon.go

@ -143,7 +143,7 @@ type Discovery struct {
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*marathonMetrics) m, ok := metrics.(*marathonMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd") rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd")

5
discovery/moby/docker.go

@ -15,6 +15,7 @@ package moby
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net" "net"
@ -110,7 +111,7 @@ func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error
return err return err
} }
if c.Host == "" { if c.Host == "" {
return fmt.Errorf("host missing") return errors.New("host missing")
} }
if _, err = url.Parse(c.Host); err != nil { if _, err = url.Parse(c.Host); err != nil {
return err return err
@ -131,7 +132,7 @@ type DockerDiscovery struct {
func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) { func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) {
m, ok := metrics.(*dockerMetrics) m, ok := metrics.(*dockerMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &DockerDiscovery{ d := &DockerDiscovery{

7
discovery/moby/dockerswarm.go

@ -15,6 +15,7 @@ package moby
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net/http" "net/http"
@ -99,7 +100,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
return err return err
} }
if c.Host == "" { if c.Host == "" {
return fmt.Errorf("host missing") return errors.New("host missing")
} }
if _, err = url.Parse(c.Host); err != nil { if _, err = url.Parse(c.Host); err != nil {
return err return err
@ -107,7 +108,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
switch c.Role { switch c.Role {
case "services", "nodes", "tasks": case "services", "nodes", "tasks":
case "": case "":
return fmt.Errorf("role missing (one of: tasks, services, nodes)") return errors.New("role missing (one of: tasks, services, nodes)")
default: default:
return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role) return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role)
} }
@ -128,7 +129,7 @@ type Discovery struct {
func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*dockerswarmMetrics) m, ok := metrics.(*dockerswarmMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &Discovery{ d := &Discovery{

2
discovery/nomad/nomad.go

@ -124,7 +124,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*nomadMetrics) m, ok := metrics.(*nomadMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &Discovery{ d := &Discovery{

2
discovery/openstack/openstack.go

@ -145,7 +145,7 @@ type refresher interface {
func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*openstackMetrics) m, ok := metrics.(*openstackMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
r, err := newRefresher(conf, l) r, err := newRefresher(conf, l)

2
discovery/ovhcloud/ovhcloud.go

@ -151,7 +151,7 @@ func newRefresher(conf *SDConfig, logger *slog.Logger) (refresher, error) {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*ovhcloudMetrics) m, ok := metrics.(*ovhcloudMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
r, err := newRefresher(conf, logger) r, err := newRefresher(conf, logger)

11
discovery/puppetdb/puppetdb.go

@ -17,6 +17,7 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"log/slog" "log/slog"
@ -109,20 +110,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return err return err
} }
if c.URL == "" { if c.URL == "" {
return fmt.Errorf("URL is missing") return errors.New("URL is missing")
} }
parsedURL, err := url.Parse(c.URL) parsedURL, err := url.Parse(c.URL)
if err != nil { if err != nil {
return err return err
} }
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
return fmt.Errorf("URL scheme must be 'http' or 'https'") return errors.New("URL scheme must be 'http' or 'https'")
} }
if parsedURL.Host == "" { if parsedURL.Host == "" {
return fmt.Errorf("host is missing in URL") return errors.New("host is missing in URL")
} }
if c.Query == "" { if c.Query == "" {
return fmt.Errorf("query missing") return errors.New("query missing")
} }
return c.HTTPClientConfig.Validate() return c.HTTPClientConfig.Validate()
} }
@ -142,7 +143,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*puppetdbMetrics) m, ok := metrics.(*puppetdbMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {

4
discovery/refresh/refresh_test.go

@ -15,7 +15,7 @@ package refresh
import ( import (
"context" "context"
"fmt" "errors"
"testing" "testing"
"time" "time"
@ -64,7 +64,7 @@ func TestRefresh(t *testing.T) {
case 2: case 2:
return tg2, nil return tg2, nil
} }
return nil, fmt.Errorf("some error") return nil, errors.New("some error")
} }
interval := time.Millisecond interval := time.Millisecond

4
discovery/registry.go

@ -267,7 +267,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) { func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) {
err := rmm.Register() err := rmm.Register()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create service discovery refresh metrics") return nil, errors.New("failed to create service discovery refresh metrics")
} }
metrics := make(map[string]DiscovererMetrics) metrics := make(map[string]DiscovererMetrics)
@ -275,7 +275,7 @@ func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManag
currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm) currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm)
err = currentSdMetrics.Register() err = currentSdMetrics.Register()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create service discovery metrics") return nil, errors.New("failed to create service discovery metrics")
} }
metrics[conf.Name()] = currentSdMetrics metrics[conf.Name()] = currentSdMetrics
} }

2
discovery/scaleway/scaleway.go

@ -188,7 +188,7 @@ type Discovery struct{}
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*scalewayMetrics) m, ok := metrics.(*scalewayMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
r, err := newRefresher(conf) r, err := newRefresher(conf)

2
discovery/triton/triton.go

@ -149,7 +149,7 @@ type Discovery struct {
func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) { func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*tritonMetrics) m, ok := metrics.(*tritonMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
tls, err := config.NewTLSConfig(&conf.TLSConfig) tls, err := config.NewTLSConfig(&conf.TLSConfig)

2
discovery/uyuni/uyuni.go

@ -215,7 +215,7 @@ func getEndpointInfoForSystems(
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*uyuniMetrics) m, ok := metrics.(*uyuniMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
apiURL, err := url.Parse(conf.Server) apiURL, err := url.Parse(conf.Server)

3
discovery/vultr/vultr.go

@ -15,6 +15,7 @@ package vultr
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net" "net"
@ -117,7 +118,7 @@ type Discovery struct {
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*vultrMetrics) m, ok := metrics.(*vultrMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
d := &Discovery{ d := &Discovery{

3
discovery/xds/kuma.go

@ -14,6 +14,7 @@
package xds package xds
import ( import (
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net/url" "net/url"
@ -161,7 +162,7 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L
func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) { func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) {
m, ok := metrics.(*xdsMetrics) m, ok := metrics.(*xdsMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, errors.New("invalid discovery metrics type")
} }
// Default to "prometheus" if hostname is unavailable. // Default to "prometheus" if hostname is unavailable.

15
model/histogram/float_histogram.go

@ -14,6 +14,7 @@
package histogram package histogram
import ( import (
"errors"
"fmt" "fmt"
"math" "math"
"strings" "strings"
@ -784,16 +785,16 @@ func (h *FloatHistogram) Validate() error {
return fmt.Errorf("custom buckets: %w", err) return fmt.Errorf("custom buckets: %w", err)
} }
if h.ZeroCount != 0 { if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0") return errors.New("custom buckets: must have zero count of 0")
} }
if h.ZeroThreshold != 0 { if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0") return errors.New("custom buckets: must have zero threshold of 0")
} }
if len(h.NegativeSpans) > 0 { if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans") return errors.New("custom buckets: must not have negative spans")
} }
if len(h.NegativeBuckets) > 0 { if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets") return errors.New("custom buckets: must not have negative buckets")
} }
} else { } else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
@ -807,7 +808,7 @@ func (h *FloatHistogram) Validate() error {
return fmt.Errorf("negative side: %w", err) return fmt.Errorf("negative side: %w", err)
} }
if h.CustomValues != nil { if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds") return errors.New("histogram with exponential schema must not have custom bounds")
} }
} }
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false) err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
@ -948,10 +949,10 @@ func (h *FloatHistogram) floatBucketIterator(
positive bool, absoluteStartValue float64, targetSchema int32, positive bool, absoluteStartValue float64, targetSchema int32,
) floatBucketIterator { ) floatBucketIterator {
if h.UsesCustomBuckets() && targetSchema != h.Schema { if h.UsesCustomBuckets() && targetSchema != h.Schema {
panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema")) panic(errors.New("cannot merge from custom buckets schema to exponential schema"))
} }
if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) { if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) {
panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema")) panic(errors.New("cannot merge from exponential buckets schema to custom schema"))
} }
if targetSchema > h.Schema { if targetSchema > h.Schema {
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))

11
model/histogram/histogram.go

@ -14,6 +14,7 @@
package histogram package histogram
import ( import (
"errors"
"fmt" "fmt"
"math" "math"
"slices" "slices"
@ -432,16 +433,16 @@ func (h *Histogram) Validate() error {
return fmt.Errorf("custom buckets: %w", err) return fmt.Errorf("custom buckets: %w", err)
} }
if h.ZeroCount != 0 { if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0") return errors.New("custom buckets: must have zero count of 0")
} }
if h.ZeroThreshold != 0 { if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0") return errors.New("custom buckets: must have zero threshold of 0")
} }
if len(h.NegativeSpans) > 0 { if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans") return errors.New("custom buckets: must not have negative spans")
} }
if len(h.NegativeBuckets) > 0 { if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets") return errors.New("custom buckets: must not have negative buckets")
} }
} else { } else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
@ -455,7 +456,7 @@ func (h *Histogram) Validate() error {
return fmt.Errorf("negative side: %w", err) return fmt.Errorf("negative side: %w", err)
} }
if h.CustomValues != nil { if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds") return errors.New("histogram with exponential schema must not have custom bounds")
} }
} }
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true) err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)

5
model/relabel/relabel.go

@ -16,6 +16,7 @@ package relabel
import ( import (
"crypto/md5" "crypto/md5"
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
@ -114,10 +115,10 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
func (c *Config) Validate() error { func (c *Config) Validate() error {
if c.Action == "" { if c.Action == "" {
return fmt.Errorf("relabel action cannot be empty") return errors.New("relabel action cannot be empty")
} }
if c.Modulus == 0 && c.Action == HashMod { if c.Modulus == 0 && c.Action == HashMod {
return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") return errors.New("relabel configuration for hashmod requires non-zero modulus")
} }
if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" {
return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action)

12
model/rulefmt/rulefmt.go

@ -184,14 +184,14 @@ type RuleNode struct {
func (r *RuleNode) Validate() (nodes []WrappedError) { func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Record.Value != "" && r.Alert.Value != "" { if r.Record.Value != "" && r.Alert.Value != "" {
nodes = append(nodes, WrappedError{ nodes = append(nodes, WrappedError{
err: fmt.Errorf("only one of 'record' and 'alert' must be set"), err: errors.New("only one of 'record' and 'alert' must be set"),
node: &r.Record, node: &r.Record,
nodeAlt: &r.Alert, nodeAlt: &r.Alert,
}) })
} }
if r.Record.Value == "" && r.Alert.Value == "" { if r.Record.Value == "" && r.Alert.Value == "" {
nodes = append(nodes, WrappedError{ nodes = append(nodes, WrappedError{
err: fmt.Errorf("one of 'record' or 'alert' must be set"), err: errors.New("one of 'record' or 'alert' must be set"),
node: &r.Record, node: &r.Record,
nodeAlt: &r.Alert, nodeAlt: &r.Alert,
}) })
@ -199,7 +199,7 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Expr.Value == "" { if r.Expr.Value == "" {
nodes = append(nodes, WrappedError{ nodes = append(nodes, WrappedError{
err: fmt.Errorf("field 'expr' must be set in rule"), err: errors.New("field 'expr' must be set in rule"),
node: &r.Expr, node: &r.Expr,
}) })
} else if _, err := parser.ParseExpr(r.Expr.Value); err != nil { } else if _, err := parser.ParseExpr(r.Expr.Value); err != nil {
@ -211,19 +211,19 @@ func (r *RuleNode) Validate() (nodes []WrappedError) {
if r.Record.Value != "" { if r.Record.Value != "" {
if len(r.Annotations) > 0 { if len(r.Annotations) > 0 {
nodes = append(nodes, WrappedError{ nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'annotations' in recording rule"), err: errors.New("invalid field 'annotations' in recording rule"),
node: &r.Record, node: &r.Record,
}) })
} }
if r.For != 0 { if r.For != 0 {
nodes = append(nodes, WrappedError{ nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'for' in recording rule"), err: errors.New("invalid field 'for' in recording rule"),
node: &r.Record, node: &r.Record,
}) })
} }
if r.KeepFiringFor != 0 { if r.KeepFiringFor != 0 {
nodes = append(nodes, WrappedError{ nodes = append(nodes, WrappedError{
err: fmt.Errorf("invalid field 'keep_firing_for' in recording rule"), err: errors.New("invalid field 'keep_firing_for' in recording rule"),
node: &r.Record, node: &r.Record,
}) })
} }

2
model/textparse/promparse.go

@ -509,7 +509,7 @@ func yoloString(b []byte) string {
func parseFloat(s string) (float64, error) { func parseFloat(s string) (float64, error) {
// Keep to pre-Go 1.13 float formats. // Keep to pre-Go 1.13 float formats.
if strings.ContainsAny(s, "pP_") { if strings.ContainsAny(s, "pP_") {
return 0, fmt.Errorf("unsupported character in float") return 0, errors.New("unsupported character in float")
} }
return strconv.ParseFloat(s, 64) return strconv.ParseFloat(s, 64)
} }

6
promql/engine.go

@ -2047,7 +2047,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
} }
for i := range mat { for i := range mat {
if len(mat[i].Floats)+len(mat[i].Histograms) != 1 { if len(mat[i].Floats)+len(mat[i].Histograms) != 1 {
panic(fmt.Errorf("unexpected number of samples")) panic(errors.New("unexpected number of samples"))
} }
for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts += ev.interval { for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts += ev.interval {
if len(mat[i].Floats) > 0 { if len(mat[i].Floats) > 0 {
@ -3626,7 +3626,7 @@ func detectHistogramStatsDecoding(expr parser.Expr) {
if n, ok := node.(*parser.BinaryExpr); ok { if n, ok := node.(*parser.BinaryExpr); ok {
detectHistogramStatsDecoding(n.LHS) detectHistogramStatsDecoding(n.LHS)
detectHistogramStatsDecoding(n.RHS) detectHistogramStatsDecoding(n.RHS)
return fmt.Errorf("stop") return errors.New("stop")
} }
n, ok := (node).(*parser.VectorSelector) n, ok := (node).(*parser.VectorSelector)
@ -3648,7 +3648,7 @@ func detectHistogramStatsDecoding(expr parser.Expr) {
break break
} }
} }
return fmt.Errorf("stop") return errors.New("stop")
}) })
} }

2
promql/info.go

@ -90,7 +90,7 @@ func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints {
nodeTimestamp = n.Timestamp nodeTimestamp = n.Timestamp
} }
offset = durationMilliseconds(n.OriginalOffset) offset = durationMilliseconds(n.OriginalOffset)
return fmt.Errorf("end traversal") return errors.New("end traversal")
default: default:
return nil return nil
} }

2
promql/promqltest/test.go

@ -672,7 +672,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
switch val := result.(type) { switch val := result.(type) {
case promql.Matrix: case promql.Matrix:
if ev.ordered { if ev.ordered {
return fmt.Errorf("expected ordered result, but query returned a matrix") return errors.New("expected ordered result, but query returned a matrix")
} }
if ev.expectScalar { if ev.expectScalar {

3
rules/alerting.go

@ -15,6 +15,7 @@ package rules
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net/url" "net/url"
@ -403,7 +404,7 @@ func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts t
resultFPs[h] = struct{}{} resultFPs[h] = struct{}{}
if _, ok := alerts[h]; ok { if _, ok := alerts[h]; ok {
return nil, fmt.Errorf("vector contains metrics with the same labelset after applying alert labels") return nil, errors.New("vector contains metrics with the same labelset after applying alert labels")
} }
alerts[h] = &Alert{ alerts[h] = &Alert{

3
rules/recording.go

@ -15,6 +15,7 @@ package rules
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"net/url" "net/url"
"time" "time"
@ -103,7 +104,7 @@ func (rule *RecordingRule) Eval(ctx context.Context, queryOffset time.Duration,
// Check that the rule does not produce identical metrics after applying // Check that the rule does not produce identical metrics after applying
// labels. // labels.
if vector.ContainsSameLabelset() { if vector.ContainsSameLabelset() {
return nil, fmt.Errorf("vector contains metrics with the same labelset after applying rule labels") return nil, errors.New("vector contains metrics with the same labelset after applying rule labels")
} }
numSeries := len(vector) numSeries := len(vector)

5
scrape/manager_test.go

@ -16,6 +16,7 @@ package scrape
import ( import (
"bytes" "bytes"
"context" "context"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -898,7 +899,7 @@ func doOneScrape(t *testing.T, manager *Manager, appender *collectResultAppender
if len(appender.resultFloats) > 0 { if len(appender.resultFloats) > 0 {
return nil return nil
} }
return fmt.Errorf("expected some float samples, got none") return errors.New("expected some float samples, got none")
}), "after 1 minute") }), "after 1 minute")
manager.Stop() manager.Stop()
} }
@ -1061,7 +1062,7 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) {
if len(app.resultHistograms) > 0 { if len(app.resultHistograms) > 0 {
return nil return nil
} }
return fmt.Errorf("expected some histogram samples, got none") return errors.New("expected some histogram samples, got none")
}), "after 1 minute") }), "after 1 minute")
scrapeManager.Stop() scrapeManager.Stop()

6
scrape/scrape_test.go

@ -1010,7 +1010,7 @@ func TestScrapeLoopForcedErr(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
sl := newBasicScrapeLoop(t, ctx, scraper, app, time.Second) sl := newBasicScrapeLoop(t, ctx, scraper, app, time.Second)
forcedErr := fmt.Errorf("forced err") forcedErr := errors.New("forced err")
sl.setForcedError(forcedErr) sl.setForcedError(forcedErr)
scraper.scrapeFunc = func(context.Context, io.Writer) error { scraper.scrapeFunc = func(context.Context, io.Writer) error {
@ -1464,7 +1464,7 @@ func TestScrapeLoopCache(t *testing.T) {
case 4: case 4:
cancel() cancel()
} }
return fmt.Errorf("scrape failed") return errors.New("scrape failed")
} }
go func() { go func() {
@ -3264,7 +3264,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
numScrapes++ numScrapes++
if numScrapes%4 == 0 { if numScrapes%4 == 0 {
return fmt.Errorf("scrape failed") return errors.New("scrape failed")
} }
w.Write([]byte("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n")) w.Write([]byte("metric_a 44\nmetric_b 44\nmetric_c 44\nmetric_d 44\n"))
return nil return nil

10
storage/interface.go

@ -41,17 +41,17 @@ var (
ErrOutOfOrderExemplar = errors.New("out of order exemplar") ErrOutOfOrderExemplar = errors.New("out of order exemplar")
ErrDuplicateExemplar = errors.New("duplicate exemplar") ErrDuplicateExemplar = errors.New("duplicate exemplar")
ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength)
ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") ErrExemplarsDisabled = errors.New("exemplar storage is disabled or max exemplars is less than or equal to 0")
ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled") ErrNativeHistogramsDisabled = errors.New("native histograms are disabled")
ErrOOONativeHistogramsDisabled = fmt.Errorf("out-of-order native histogram ingestion is disabled") ErrOOONativeHistogramsDisabled = errors.New("out-of-order native histogram ingestion is disabled")
// ErrOutOfOrderCT indicates failed append of CT to the storage // ErrOutOfOrderCT indicates failed append of CT to the storage
// due to CT being older the then newer sample. // due to CT being older the then newer sample.
// NOTE(bwplotka): This can be both an instrumentation failure or commonly expected // NOTE(bwplotka): This can be both an instrumentation failure or commonly expected
// behaviour, and we currently don't have a way to determine this. As a result // behaviour, and we currently don't have a way to determine this. As a result
// it's recommended to ignore this error for now. // it's recommended to ignore this error for now.
ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") ErrOutOfOrderCT = errors.New("created timestamp out of order, ignoring")
ErrCTNewerThanSample = fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") ErrCTNewerThanSample = errors.New("CT is newer or the same as sample's timestamp, ignoring")
) )
// SeriesRef is a generic series reference. In prometheus it is either a // SeriesRef is a generic series reference. In prometheus it is either a

27
storage/remote/azuread/azuread.go

@ -16,7 +16,6 @@ package azuread
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"net/http" "net/http"
"strings" "strings"
"sync" "sync"
@ -110,55 +109,55 @@ func (c *AzureADConfig) Validate() error {
} }
if c.Cloud != AzureChina && c.Cloud != AzureGovernment && c.Cloud != AzurePublic { if c.Cloud != AzureChina && c.Cloud != AzureGovernment && c.Cloud != AzurePublic {
return fmt.Errorf("must provide a cloud in the Azure AD config") return errors.New("must provide a cloud in the Azure AD config")
} }
if c.ManagedIdentity == nil && c.OAuth == nil && c.SDK == nil { if c.ManagedIdentity == nil && c.OAuth == nil && c.SDK == nil {
return fmt.Errorf("must provide an Azure Managed Identity, Azure OAuth or Azure SDK in the Azure AD config") return errors.New("must provide an Azure Managed Identity, Azure OAuth or Azure SDK in the Azure AD config")
} }
if c.ManagedIdentity != nil && c.OAuth != nil { if c.ManagedIdentity != nil && c.OAuth != nil {
return fmt.Errorf("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config") return errors.New("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config")
} }
if c.ManagedIdentity != nil && c.SDK != nil { if c.ManagedIdentity != nil && c.SDK != nil {
return fmt.Errorf("cannot provide both Azure Managed Identity and Azure SDK in the Azure AD config") return errors.New("cannot provide both Azure Managed Identity and Azure SDK in the Azure AD config")
} }
if c.OAuth != nil && c.SDK != nil { if c.OAuth != nil && c.SDK != nil {
return fmt.Errorf("cannot provide both Azure OAuth and Azure SDK in the Azure AD config") return errors.New("cannot provide both Azure OAuth and Azure SDK in the Azure AD config")
} }
if c.ManagedIdentity != nil { if c.ManagedIdentity != nil {
if c.ManagedIdentity.ClientID == "" { if c.ManagedIdentity.ClientID == "" {
return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config") return errors.New("must provide an Azure Managed Identity client_id in the Azure AD config")
} }
_, err := uuid.Parse(c.ManagedIdentity.ClientID) _, err := uuid.Parse(c.ManagedIdentity.ClientID)
if err != nil { if err != nil {
return fmt.Errorf("the provided Azure Managed Identity client_id is invalid") return errors.New("the provided Azure Managed Identity client_id is invalid")
} }
} }
if c.OAuth != nil { if c.OAuth != nil {
if c.OAuth.ClientID == "" { if c.OAuth.ClientID == "" {
return fmt.Errorf("must provide an Azure OAuth client_id in the Azure AD config") return errors.New("must provide an Azure OAuth client_id in the Azure AD config")
} }
if c.OAuth.ClientSecret == "" { if c.OAuth.ClientSecret == "" {
return fmt.Errorf("must provide an Azure OAuth client_secret in the Azure AD config") return errors.New("must provide an Azure OAuth client_secret in the Azure AD config")
} }
if c.OAuth.TenantID == "" { if c.OAuth.TenantID == "" {
return fmt.Errorf("must provide an Azure OAuth tenant_id in the Azure AD config") return errors.New("must provide an Azure OAuth tenant_id in the Azure AD config")
} }
var err error var err error
_, err = uuid.Parse(c.OAuth.ClientID) _, err = uuid.Parse(c.OAuth.ClientID)
if err != nil { if err != nil {
return fmt.Errorf("the provided Azure OAuth client_id is invalid") return errors.New("the provided Azure OAuth client_id is invalid")
} }
_, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.OAuth.TenantID) _, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.OAuth.TenantID)
if err != nil { if err != nil {
return fmt.Errorf("the provided Azure OAuth tenant_id is invalid") return errors.New("the provided Azure OAuth tenant_id is invalid")
} }
} }
@ -168,7 +167,7 @@ func (c *AzureADConfig) Validate() error {
if c.SDK.TenantID != "" { if c.SDK.TenantID != "" {
_, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.SDK.TenantID) _, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.SDK.TenantID)
if err != nil { if err != nil {
return fmt.Errorf("the provided Azure OAuth tenant_id is invalid") return errors.New("the provided Azure OAuth tenant_id is invalid")
} }
} }
} }

2
storage/remote/queue_manager_test.go

@ -763,7 +763,7 @@ func TestDisableReshardOnRetry(t *testing.T) {
onStoreCalled() onStoreCalled()
return WriteResponseStats{}, RecoverableError{ return WriteResponseStats{}, RecoverableError{
error: fmt.Errorf("fake error"), error: errors.New("fake error"),
retryAfter: model.Duration(retryAfter), retryAfter: model.Duration(retryAfter),
} }
}, },

4
storage/remote/write_handler_test.go

@ -672,7 +672,7 @@ func TestCommitErr_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload)) req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err) require.NoError(t, err)
appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")} appendable := &mockAppendable{commitErr: errors.New("commit error")}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
@ -696,7 +696,7 @@ func TestCommitErr_V2Message(t *testing.T) {
req.Header.Set("Content-Encoding", string(SnappyBlockCompression)) req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")} appendable := &mockAppendable{commitErr: errors.New("commit error")}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()

2
tsdb/chunkenc/chunk_test.go

@ -132,7 +132,7 @@ func TestPool(t *testing.T) {
{ {
name: "invalid encoding", name: "invalid encoding",
encoding: EncNone, encoding: EncNone,
expErr: fmt.Errorf(`invalid chunk encoding "none"`), expErr: errors.New(`invalid chunk encoding "none"`),
}, },
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {

7
tsdb/chunkenc/float_histogram.go

@ -15,6 +15,7 @@ package chunkenc
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"math" "math"
@ -761,9 +762,9 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
if !okToAppend || counterReset { if !okToAppend || counterReset {
if appendOnly { if appendOnly {
if counterReset { if counterReset {
return nil, false, a, fmt.Errorf("float histogram counter reset") return nil, false, a, errors.New("float histogram counter reset")
} }
return nil, false, a, fmt.Errorf("float histogram schema change") return nil, false, a, errors.New("float histogram schema change")
} }
newChunk := NewFloatHistogramChunk() newChunk := NewFloatHistogramChunk()
app, err := newChunk.Appender() app, err := newChunk.Appender()
@ -812,7 +813,7 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h) pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h)
if !okToAppend { if !okToAppend {
if appendOnly { if appendOnly {
return nil, false, a, fmt.Errorf("float gauge histogram schema change") return nil, false, a, errors.New("float gauge histogram schema change")
} }
newChunk := NewFloatHistogramChunk() newChunk := NewFloatHistogramChunk()
app, err := newChunk.Appender() app, err := newChunk.Appender()

7
tsdb/chunkenc/histogram.go

@ -15,6 +15,7 @@ package chunkenc
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"math" "math"
@ -795,9 +796,9 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
if !okToAppend || counterReset { if !okToAppend || counterReset {
if appendOnly { if appendOnly {
if counterReset { if counterReset {
return nil, false, a, fmt.Errorf("histogram counter reset") return nil, false, a, errors.New("histogram counter reset")
} }
return nil, false, a, fmt.Errorf("histogram schema change") return nil, false, a, errors.New("histogram schema change")
} }
newChunk := NewHistogramChunk() newChunk := NewHistogramChunk()
app, err := newChunk.Appender() app, err := newChunk.Appender()
@ -846,7 +847,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h
pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h) pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, pMergedSpans, nMergedSpans, okToAppend := a.appendableGauge(h)
if !okToAppend { if !okToAppend {
if appendOnly { if appendOnly {
return nil, false, a, fmt.Errorf("gauge histogram schema change") return nil, false, a, errors.New("gauge histogram schema change")
} }
newChunk := NewHistogramChunk() newChunk := NewHistogramChunk()
app, err := newChunk.Appender() app, err := newChunk.Appender()

7
tsdb/chunks/chunks.go

@ -16,6 +16,7 @@ package chunks
import ( import (
"bufio" "bufio"
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"hash" "hash"
"hash/crc32" "hash/crc32"
@ -172,7 +173,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
return emptyChunk, err return emptyChunk, err
} }
if newChunk != nil { if newChunk != nil {
return emptyChunk, fmt.Errorf("did not expect to start a second chunk") return emptyChunk, errors.New("did not expect to start a second chunk")
} }
case chunkenc.ValFloatHistogram: case chunkenc.ValFloatHistogram:
newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).T(), s.Get(i).FH(), false) newChunk, _, ca, err = ca.AppendFloatHistogram(nil, s.Get(i).T(), s.Get(i).FH(), false)
@ -180,7 +181,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
return emptyChunk, err return emptyChunk, err
} }
if newChunk != nil { if newChunk != nil {
return emptyChunk, fmt.Errorf("did not expect to start a second chunk") return emptyChunk, errors.New("did not expect to start a second chunk")
} }
default: default:
panic(fmt.Sprintf("unknown sample type %s", sampleType.String())) panic(fmt.Sprintf("unknown sample type %s", sampleType.String()))
@ -250,7 +251,7 @@ func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool {
return cm.MinTime <= maxt && mint <= cm.MaxTime return cm.MinTime <= maxt && mint <= cm.MaxTime
} }
var errInvalidSize = fmt.Errorf("invalid size") var errInvalidSize = errors.New("invalid size")
var castagnoliTable *crc32.Table var castagnoliTable *crc32.Table

2
tsdb/compact.go

@ -184,7 +184,7 @@ func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l *slog.L
func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) { func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) {
if len(ranges) == 0 { if len(ranges) == 0 {
return nil, fmt.Errorf("at least one range must be provided") return nil, errors.New("at least one range must be provided")
} }
if pool == nil { if pool == nil {
pool = chunkenc.NewPool() pool = chunkenc.NewPool()

4
tsdb/db.go

@ -2004,10 +2004,10 @@ func (db *DB) ForceHeadMMap() {
// will create a new block containing all data that's currently in the memory buffer/WAL. // will create a new block containing all data that's currently in the memory buffer/WAL.
func (db *DB) Snapshot(dir string, withHead bool) error { func (db *DB) Snapshot(dir string, withHead bool) error {
if dir == db.dir { if dir == db.dir {
return fmt.Errorf("cannot snapshot into base directory") return errors.New("cannot snapshot into base directory")
} }
if _, err := ulid.ParseStrict(dir); err == nil { if _, err := ulid.ParseStrict(dir); err == nil {
return fmt.Errorf("dir must not be a valid ULID") return errors.New("dir must not be a valid ULID")
} }
db.cmtx.Lock() db.cmtx.Lock()

5
tsdb/db_test.go

@ -18,6 +18,7 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/binary" "encoding/binary"
"errors"
"flag" "flag"
"fmt" "fmt"
"hash/crc32" "hash/crc32"
@ -1432,7 +1433,7 @@ func (*mockCompactorFailing) Plan(string) ([]string, error) {
func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) ([]ulid.ULID, error) { func (c *mockCompactorFailing) Write(dest string, _ BlockReader, _, _ int64, _ *BlockMeta) ([]ulid.ULID, error) {
if len(c.blocks) >= c.max { if len(c.blocks) >= c.max {
return []ulid.ULID{}, fmt.Errorf("the compactor already did the maximum allowed blocks so it is time to fail") return []ulid.ULID{}, errors.New("the compactor already did the maximum allowed blocks so it is time to fail")
} }
block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil) block, err := OpenBlock(nil, createBlock(c.t, dest, genSeries(1, 1, 0, 1)), nil)
@ -1459,7 +1460,7 @@ func (*mockCompactorFailing) Compact(string, []string, []*Block) ([]ulid.ULID, e
} }
func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ulid.ULID, err error) { func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ulid.ULID, err error) {
return nil, fmt.Errorf("mock compaction failing CompactOOO") return nil, errors.New("mock compaction failing CompactOOO")
} }
func TestTimeRetention(t *testing.T) { func TestTimeRetention(t *testing.T) {

2
tsdb/ooo_head_read_test.go

@ -509,7 +509,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
Ref: 0x1800000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300, Ref: 0x1800000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300,
}) })
require.Nil(t, iterable) require.Nil(t, iterable)
require.Equal(t, err, fmt.Errorf("not found")) require.EqualError(t, err, "not found")
require.Nil(t, c) require.Nil(t, c)
}) })

2
tsdb/querier_test.go

@ -3324,7 +3324,7 @@ func (m mockMatcherIndex) LabelNames(context.Context, ...*labels.Matcher) ([]str
} }
func (m mockMatcherIndex) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings { func (m mockMatcherIndex) PostingsForLabelMatching(context.Context, string, func(string) bool) index.Postings {
return index.ErrPostings(fmt.Errorf("PostingsForLabelMatching called")) return index.ErrPostings(errors.New("PostingsForLabelMatching called"))
} }
func TestPostingsForMatcher(t *testing.T) { func TestPostingsForMatcher(t *testing.T) {

3
tsdb/tsdbblockutil.go

@ -15,6 +15,7 @@ package tsdb
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"log/slog" "log/slog"
"path/filepath" "path/filepath"
@ -23,7 +24,7 @@ import (
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
) )
var ErrInvalidTimes = fmt.Errorf("max time is lesser than min time") var ErrInvalidTimes = errors.New("max time is lesser than min time")
// CreateBlock creates a chunkrange block from the samples passed to it, and writes it to disk. // CreateBlock creates a chunkrange block from the samples passed to it, and writes it to disk.
func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger *slog.Logger) (string, error) { func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger *slog.Logger) (string, error) {

4
web/api/v1/api.go

@ -1606,7 +1606,7 @@ func parseListRulesPaginationRequest(r *http.Request) (int64, string, *apiFuncRe
nextToken := r.URL.Query().Get("group_next_token") nextToken := r.URL.Query().Get("group_next_token")
if nextToken != "" && maxGroups == "" { if nextToken != "" && maxGroups == "" {
errResult := invalidParamError(fmt.Errorf("group_limit needs to be present in order to paginate over the groups"), "group_next_token") errResult := invalidParamError(errors.New("group_limit needs to be present in order to paginate over the groups"), "group_next_token")
return -1, "", &errResult return -1, "", &errResult
} }
@ -1617,7 +1617,7 @@ func parseListRulesPaginationRequest(r *http.Request) (int64, string, *apiFuncRe
return -1, "", &errResult return -1, "", &errResult
} }
if parsedMaxGroups <= 0 { if parsedMaxGroups <= 0 {
errResult := invalidParamError(fmt.Errorf("group_limit needs to be greater than 0"), "group_limit") errResult := invalidParamError(errors.New("group_limit needs to be greater than 0"), "group_limit")
return -1, "", &errResult return -1, "", &errResult
} }
} }

12
web/api/v1/api_test.go

@ -615,7 +615,7 @@ func TestGetSeries(t *testing.T) {
matchers: []string{`{foo="boo"}`, `{foo="baz"}`}, matchers: []string{`{foo="boo"}`, `{foo="baz"}`},
expectedErrorType: errorExec, expectedErrorType: errorExec,
api: &API{ api: &API{
Queryable: errorTestQueryable{err: fmt.Errorf("generic")}, Queryable: errorTestQueryable{err: errors.New("generic")},
}, },
}, },
{ {
@ -623,7 +623,7 @@ func TestGetSeries(t *testing.T) {
matchers: []string{`{foo="boo"}`, `{foo="baz"}`}, matchers: []string{`{foo="boo"}`, `{foo="baz"}`},
expectedErrorType: errorInternal, expectedErrorType: errorInternal,
api: &API{ api: &API{
Queryable: errorTestQueryable{err: promql.ErrStorage{Err: fmt.Errorf("generic")}}, Queryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}},
}, },
}, },
} { } {
@ -717,7 +717,7 @@ func TestQueryExemplars(t *testing.T) {
name: "should return errorExec upon genetic error", name: "should return errorExec upon genetic error",
expectedErrorType: errorExec, expectedErrorType: errorExec,
api: &API{ api: &API{
ExemplarQueryable: errorTestQueryable{err: fmt.Errorf("generic")}, ExemplarQueryable: errorTestQueryable{err: errors.New("generic")},
}, },
query: url.Values{ query: url.Values{
"query": []string{`test_metric3{foo="boo"} - test_metric4{foo="bar"}`}, "query": []string{`test_metric3{foo="boo"} - test_metric4{foo="bar"}`},
@ -729,7 +729,7 @@ func TestQueryExemplars(t *testing.T) {
name: "should return errorInternal err type is ErrStorage", name: "should return errorInternal err type is ErrStorage",
expectedErrorType: errorInternal, expectedErrorType: errorInternal,
api: &API{ api: &API{
ExemplarQueryable: errorTestQueryable{err: promql.ErrStorage{Err: fmt.Errorf("generic")}}, ExemplarQueryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}},
}, },
query: url.Values{ query: url.Values{
"query": []string{`test_metric3{foo="boo"} - test_metric4{foo="bar"}`}, "query": []string{`test_metric3{foo="boo"} - test_metric4{foo="bar"}`},
@ -838,7 +838,7 @@ func TestLabelNames(t *testing.T) {
matchers: []string{`{foo="boo"}`, `{foo="baz"}`}, matchers: []string{`{foo="boo"}`, `{foo="baz"}`},
expectedErrorType: errorExec, expectedErrorType: errorExec,
api: &API{ api: &API{
Queryable: errorTestQueryable{err: fmt.Errorf("generic")}, Queryable: errorTestQueryable{err: errors.New("generic")},
}, },
}, },
{ {
@ -846,7 +846,7 @@ func TestLabelNames(t *testing.T) {
matchers: []string{`{foo="boo"}`, `{foo="baz"}`}, matchers: []string{`{foo="boo"}`, `{foo="baz"}`},
expectedErrorType: errorInternal, expectedErrorType: errorInternal,
api: &API{ api: &API{
Queryable: errorTestQueryable{err: promql.ErrStorage{Err: fmt.Errorf("generic")}}, Queryable: errorTestQueryable{err: promql.ErrStorage{Err: errors.New("generic")}},
}, },
}, },
} { } {

Loading…
Cancel
Save