Merge branch 'main' into label-replace-docs

pull/8765/head
Matthew Smedberg 2021-05-24 08:37:24 -06:00
commit 3ea5e994c2
64 changed files with 3799 additions and 1607 deletions

View File

@ -41,7 +41,7 @@ jobs:
GOMAXPROCS: "2" GOMAXPROCS: "2"
GO111MODULE: "on" GO111MODULE: "on"
- prometheus/check_proto: - prometheus/check_proto:
version: "3.12.3" version: "3.15.8"
- prometheus/store_artifact: - prometheus/store_artifact:
file: prometheus file: prometheus
- prometheus/store_artifact: - prometheus/store_artifact:

File diff suppressed because it is too large Load Diff

View File

@ -32,8 +32,10 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.25 | 2021-02-10 | Julien Pivotto (GitHub: @roidelapluie) | | v2.25 | 2021-02-10 | Julien Pivotto (GitHub: @roidelapluie) |
| v2.26 | 2021-03-24 | Bartek Plotka (GitHub: @bwplotka) | | v2.26 | 2021-03-24 | Bartek Plotka (GitHub: @bwplotka) |
| v2.27 | 2021-05-05 | Chris Marchbanks (GitHub: @csmarchbanks) | | v2.27 | 2021-05-05 | Chris Marchbanks (GitHub: @csmarchbanks) |
| v2.28 | 2021-06-16 | **searching for volunteer** | | v2.28 | 2021-06-16 | Julius Volz (GitHub: @juliusv) |
| v2.29 | 2021-07-28 | **searching for volunteer** | | v2.29 | 2021-07-28 | Frederic Branczyk (GitHub: @brancz) |
| v2.30 | 2021-09-08 | **searching for volunteer** |
| v2.31 | 2021-10-20 | **searching for volunteer** |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.

View File

@ -1 +1 @@
2.26.0 2.27.1

View File

@ -382,11 +382,21 @@ type ScrapeConfig struct {
MetricsPath string `yaml:"metrics_path,omitempty"` MetricsPath string `yaml:"metrics_path,omitempty"`
// The URL scheme with which to fetch metrics from targets. // The URL scheme with which to fetch metrics from targets.
Scheme string `yaml:"scheme,omitempty"` Scheme string `yaml:"scheme,omitempty"`
// More than this many samples post metric-relabeling will cause the scrape to fail. // More than this many samples post metric-relabeling will cause the scrape to
// fail.
SampleLimit uint `yaml:"sample_limit,omitempty"` SampleLimit uint `yaml:"sample_limit,omitempty"`
// More than this many targets after the target relabeling will cause the // More than this many targets after the target relabeling will cause the
// scrapes to fail. // scrapes to fail.
TargetLimit uint `yaml:"target_limit,omitempty"` TargetLimit uint `yaml:"target_limit,omitempty"`
// More than this many labels post metric-relabeling will cause the scrape to
// fail.
LabelLimit uint `yaml:"label_limit,omitempty"`
// More than this label name length post metric-relabeling will cause the
// scrape to fail.
LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
// More than this label value length post metric-relabeling will cause the
// scrape to fail.
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse // We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types. // values arbitrarily into the overflow maps of further-down types.
@ -622,6 +632,7 @@ type RemoteWriteConfig struct {
Headers map[string]string `yaml:"headers,omitempty"` Headers map[string]string `yaml:"headers,omitempty"`
WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"` WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"`
Name string `yaml:"name,omitempty"` Name string `yaml:"name,omitempty"`
SendExemplars bool `yaml:"send_exemplars,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse // We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types. // values arbitrarily into the overflow maps of further-down types.
@ -663,10 +674,10 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
} }
httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
c.HTTPClientConfig.Authorization != nil c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
if httpClientConfigAuthEnabled && c.SigV4Config != nil { if httpClientConfigAuthEnabled && c.SigV4Config != nil {
return fmt.Errorf("at most one of basic_auth, authorization, & sigv4 must be configured") return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
} }
return nil return nil
@ -675,7 +686,7 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
func validateHeaders(headers map[string]string) error { func validateHeaders(headers map[string]string) error {
for header := range headers { for header := range headers {
if strings.ToLower(header) == "authorization" { if strings.ToLower(header) == "authorization" {
return errors.New("authorization header must be changed via the basic_auth or authorization parameter") return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter")
} }
if _, ok := reservedHeaders[strings.ToLower(header)]; ok { if _, ok := reservedHeaders[strings.ToLower(header)]; ok {
return errors.Errorf("%s is a reserved header. It must not be changed", header) return errors.Errorf("%s is a reserved header. It must not be changed", header)

View File

@ -30,11 +30,11 @@ import (
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/aws"
"github.com/prometheus/prometheus/discovery/azure" "github.com/prometheus/prometheus/discovery/azure"
"github.com/prometheus/prometheus/discovery/consul" "github.com/prometheus/prometheus/discovery/consul"
"github.com/prometheus/prometheus/discovery/digitalocean" "github.com/prometheus/prometheus/discovery/digitalocean"
"github.com/prometheus/prometheus/discovery/dns" "github.com/prometheus/prometheus/discovery/dns"
"github.com/prometheus/prometheus/discovery/ec2"
"github.com/prometheus/prometheus/discovery/eureka" "github.com/prometheus/prometheus/discovery/eureka"
"github.com/prometheus/prometheus/discovery/file" "github.com/prometheus/prometheus/discovery/file"
"github.com/prometheus/prometheus/discovery/hetzner" "github.com/prometheus/prometheus/discovery/hetzner"
@ -92,7 +92,14 @@ var expectedConf = &Config{
}, },
QueueConfig: DefaultQueueConfig, QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig, MetadataConfig: DefaultMetadataConfig,
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.HTTPClientConfig{
OAuth2: &config.OAuth2{
ClientID: "123",
ClientSecret: "456",
TokenURL: "http://remote1/auth",
},
FollowRedirects: true,
},
}, },
{ {
URL: mustParseURL("http://remote2/push"), URL: mustParseURL("http://remote2/push"),
@ -465,14 +472,14 @@ var expectedConf = &Config{
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{ ServiceDiscoveryConfigs: discovery.Configs{
&ec2.SDConfig{ &aws.EC2SDConfig{
Region: "us-east-1", Region: "us-east-1",
AccessKey: "access", AccessKey: "access",
SecretKey: "mysecret", SecretKey: "mysecret",
Profile: "profile", Profile: "profile",
RefreshInterval: model.Duration(60 * time.Second), RefreshInterval: model.Duration(60 * time.Second),
Port: 80, Port: 80,
Filters: []*ec2.Filter{ Filters: []*aws.EC2Filter{
{ {
Name: "tag:environment", Name: "tag:environment",
Values: []string{"prod"}, Values: []string{"prod"},
@ -485,6 +492,28 @@ var expectedConf = &Config{
}, },
}, },
}, },
{
JobName: "service-lightsail",
HonorTimestamps: true,
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
MetricsPath: DefaultScrapeConfig.MetricsPath,
Scheme: DefaultScrapeConfig.Scheme,
HTTPClientConfig: config.DefaultHTTPClientConfig,
ServiceDiscoveryConfigs: discovery.Configs{
&aws.LightsailSDConfig{
Region: "us-east-1",
AccessKey: "access",
SecretKey: "mysecret",
Profile: "profile",
RefreshInterval: model.Duration(60 * time.Second),
Port: 80,
},
},
},
{ {
JobName: "service-azure", JobName: "service-azure",
@ -886,7 +915,7 @@ func TestElideSecrets(t *testing.T) {
yamlConfig := string(config) yamlConfig := string(config)
matches := secretRe.FindAllStringIndex(yamlConfig, -1) matches := secretRe.FindAllStringIndex(yamlConfig, -1)
require.Equal(t, 12, len(matches), "wrong number of secret matches found") require.Equal(t, 14, len(matches), "wrong number of secret matches found")
require.NotContains(t, yamlConfig, "mysecret", require.NotContains(t, yamlConfig, "mysecret",
"yaml marshal reveals authentication credentials.") "yaml marshal reveals authentication credentials.")
} }
@ -988,7 +1017,7 @@ var expectedErrors = []struct {
errMsg: "at most one of bearer_token & bearer_token_file must be configured", errMsg: "at most one of bearer_token & bearer_token_file must be configured",
}, { }, {
filename: "bearertoken_basicauth.bad.yml", filename: "bearertoken_basicauth.bad.yml",
errMsg: "at most one of basic_auth, bearer_token & bearer_token_file must be configured", errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
}, { }, {
filename: "kubernetes_http_config_without_api_server.bad.yml", filename: "kubernetes_http_config_without_api_server.bad.yml",
errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly", errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly",
@ -1024,10 +1053,10 @@ var expectedErrors = []struct {
errMsg: "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'", errMsg: "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'",
}, { }, {
filename: "kubernetes_bearertoken_basicauth.bad.yml", filename: "kubernetes_bearertoken_basicauth.bad.yml",
errMsg: "at most one of basic_auth, bearer_token & bearer_token_file must be configured", errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
}, { }, {
filename: "kubernetes_authorization_basicauth.bad.yml", filename: "kubernetes_authorization_basicauth.bad.yml",
errMsg: "at most one of basic_auth & authorization must be configured", errMsg: "at most one of basic_auth, oauth2 & authorization must be configured",
}, { }, {
filename: "marathon_no_servers.bad.yml", filename: "marathon_no_servers.bad.yml",
errMsg: "marathon_sd: must contain at least one Marathon server", errMsg: "marathon_sd: must contain at least one Marathon server",
@ -1072,7 +1101,7 @@ var expectedErrors = []struct {
errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`, errMsg: `x-prometheus-remote-write-version is a reserved header. It must not be changed`,
}, { }, {
filename: "remote_write_authorization_header.bad.yml", filename: "remote_write_authorization_header.bad.yml",
errMsg: `authorization header must be changed via the basic_auth or authorization parameter`, errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`,
}, { }, {
filename: "remote_write_url_missing.bad.yml", filename: "remote_write_url_missing.bad.yml",
errMsg: `url for remote_write is empty`, errMsg: `url for remote_write is empty`,

View File

@ -19,6 +19,11 @@ remote_write:
- source_labels: [__name__] - source_labels: [__name__]
regex: expensive.* regex: expensive.*
action: drop action: drop
oauth2:
client_id: "123"
client_secret: "456"
token_url: "http://remote1/auth"
- url: http://remote2/push - url: http://remote2/push
name: rw_tls name: rw_tls
tls_config: tls_config:
@ -215,6 +220,13 @@ scrape_configs:
- web - web
- db - db
- job_name: service-lightsail
lightsail_sd_configs:
- region: us-east-1
access_key: access
secret_key: mysecret
profile: profile
- job_name: service-azure - job_name: service-azure
azure_sd_configs: azure_sd_configs:
- environment: AzurePublicCloud - environment: AzurePublicCloud

View File

@ -262,6 +262,6 @@ Here are some non-obvious parts of adding service discoveries that need to be ve
### Examples of Service Discovery pull requests ### Examples of Service Discovery pull requests
The exemples given might become out of date but should give a good impression about the areas touched by a new service discovery. The examples given might become out of date but should give a good impression about the areas touched by a new service discovery.
- [Eureka](https://github.com/prometheus/prometheus/pull/3369) - [Eureka](https://github.com/prometheus/prometheus/pull/3369)

View File

@ -1,4 +1,4 @@
// Copyright 2015 The Prometheus Authors // Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
// You may obtain a copy of the License at // You may obtain a copy of the License at
@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package ec2 package aws
import ( import (
"context" "context"
@ -61,24 +61,26 @@ const (
ec2LabelSeparator = "," ec2LabelSeparator = ","
) )
// DefaultSDConfig is the default EC2 SD configuration. var (
var DefaultSDConfig = SDConfig{ // DefaultEC2SDConfig is the default EC2 SD configuration.
DefaultEC2SDConfig = EC2SDConfig{
Port: 80, Port: 80,
RefreshInterval: model.Duration(60 * time.Second), RefreshInterval: model.Duration(60 * time.Second),
} }
)
func init() { func init() {
discovery.RegisterConfig(&SDConfig{}) discovery.RegisterConfig(&EC2SDConfig{})
} }
// Filter is the configuration for filtering EC2 instances. // EC2Filter is the configuration for filtering EC2 instances.
type Filter struct { type EC2Filter struct {
Name string `yaml:"name"` Name string `yaml:"name"`
Values []string `yaml:"values"` Values []string `yaml:"values"`
} }
// SDConfig is the configuration for EC2 based service discovery. // EC2SDConfig is the configuration for EC2 based service discovery.
type SDConfig struct { type EC2SDConfig struct {
Endpoint string `yaml:"endpoint"` Endpoint string `yaml:"endpoint"`
Region string `yaml:"region"` Region string `yaml:"region"`
AccessKey string `yaml:"access_key,omitempty"` AccessKey string `yaml:"access_key,omitempty"`
@ -87,21 +89,21 @@ type SDConfig struct {
RoleARN string `yaml:"role_arn,omitempty"` RoleARN string `yaml:"role_arn,omitempty"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
Port int `yaml:"port"` Port int `yaml:"port"`
Filters []*Filter `yaml:"filters"` Filters []*EC2Filter `yaml:"filters"`
} }
// Name returns the name of the Config. // Name returns the name of the EC2 Config.
func (*SDConfig) Name() string { return "ec2" } func (*EC2SDConfig) Name() string { return "ec2" }
// NewDiscoverer returns a Discoverer for the Config. // NewDiscoverer returns a Discoverer for the EC2 Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger), nil return NewEC2Discovery(c, opts.Logger), nil
} }
// UnmarshalYAML implements the yaml.Unmarshaler interface. // UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config.
func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultSDConfig *c = DefaultEC2SDConfig
type plain SDConfig type plain EC2SDConfig
err := unmarshal((*plain)(c)) err := unmarshal((*plain)(c))
if err != nil { if err != nil {
return err return err
@ -126,20 +128,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
return nil return nil
} }
// Discovery periodically performs EC2-SD requests. It implements // EC2Discovery periodically performs EC2-SD requests. It implements
// the Discoverer interface. // the Discoverer interface.
type Discovery struct { type EC2Discovery struct {
*refresh.Discovery *refresh.Discovery
cfg *SDConfig cfg *EC2SDConfig
ec2 *ec2.EC2 ec2 *ec2.EC2
} }
// NewDiscovery returns a new EC2Discovery which periodically refreshes its targets. // NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets.
func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery { func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
d := &Discovery{ d := &EC2Discovery{
cfg: conf, cfg: conf,
} }
d.Discovery = refresh.NewDiscovery( d.Discovery = refresh.NewDiscovery(
@ -151,7 +153,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery {
return d return d
} }
func (d *Discovery) ec2Client() (*ec2.EC2, error) { func (d *EC2Discovery) ec2Client() (*ec2.EC2, error) {
if d.ec2 != nil { if d.ec2 != nil {
return d.ec2, nil return d.ec2, nil
} }
@ -183,7 +185,7 @@ func (d *Discovery) ec2Client() (*ec2.EC2, error) {
return d.ec2, nil return d.ec2, nil
} }
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
ec2Client, err := d.ec2Client() ec2Client, err := d.ec2Client()
if err != nil { if err != nil {
return nil, err return nil, err

234
discovery/aws/lightsail.go Normal file
View File

@ -0,0 +1,234 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aws
import (
"context"
"fmt"
"net"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/lightsail"
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
)
const (
lightsailLabel = model.MetaLabelPrefix + "lightsail_"
lightsailLabelAZ = lightsailLabel + "availability_zone"
lightsailLabelBlueprintID = lightsailLabel + "blueprint_id"
lightsailLabelBundleID = lightsailLabel + "bundle_id"
lightsailLabelInstanceName = lightsailLabel + "instance_name"
lightsailLabelInstanceState = lightsailLabel + "instance_state"
lightsailLabelInstanceSupportCode = lightsailLabel + "instance_support_code"
lightsailLabelIPv6Addresses = lightsailLabel + "ipv6_addresses"
lightsailLabelPrivateIP = lightsailLabel + "private_ip"
lightsailLabelPublicIP = lightsailLabel + "public_ip"
lightsailLabelTag = lightsailLabel + "tag_"
lightsailLabelSeparator = ","
)
var (
// DefaultLightsailSDConfig is the default Lightsail SD configuration.
DefaultLightsailSDConfig = LightsailSDConfig{
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
}
)
func init() {
discovery.RegisterConfig(&LightsailSDConfig{})
}
// LightsailSDConfig is the configuration for Lightsail based service discovery.
type LightsailSDConfig struct {
Endpoint string `yaml:"endpoint"`
Region string `yaml:"region"`
AccessKey string `yaml:"access_key,omitempty"`
SecretKey config.Secret `yaml:"secret_key,omitempty"`
Profile string `yaml:"profile,omitempty"`
RoleARN string `yaml:"role_arn,omitempty"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
Port int `yaml:"port"`
}
// Name returns the name of the Lightsail Config.
func (*LightsailSDConfig) Name() string { return "lightsail" }
// NewDiscoverer returns a Discoverer for the Lightsail Config.
func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewLightsailDiscovery(c, opts.Logger), nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config.
func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultLightsailSDConfig
type plain LightsailSDConfig
err := unmarshal((*plain)(c))
if err != nil {
return err
}
if c.Region == "" {
sess, err := session.NewSession()
if err != nil {
return err
}
metadata := ec2metadata.New(sess)
region, err := metadata.Region()
if err != nil {
return errors.New("Lightsail SD configuration requires a region")
}
c.Region = region
}
return nil
}
// LightsailDiscovery periodically performs Lightsail-SD requests. It implements
// the Discoverer interface.
type LightsailDiscovery struct {
*refresh.Discovery
cfg *LightsailSDConfig
lightsail *lightsail.Lightsail
}
// NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets.
func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger) *LightsailDiscovery {
if logger == nil {
logger = log.NewNopLogger()
}
d := &LightsailDiscovery{
cfg: conf,
}
d.Discovery = refresh.NewDiscovery(
logger,
"lightsail",
time.Duration(d.cfg.RefreshInterval),
d.refresh,
)
return d
}
func (d *LightsailDiscovery) lightsailClient() (*lightsail.Lightsail, error) {
if d.lightsail != nil {
return d.lightsail, nil
}
creds := credentials.NewStaticCredentials(d.cfg.AccessKey, string(d.cfg.SecretKey), "")
if d.cfg.AccessKey == "" && d.cfg.SecretKey == "" {
creds = nil
}
sess, err := session.NewSessionWithOptions(session.Options{
Config: aws.Config{
Endpoint: &d.cfg.Endpoint,
Region: &d.cfg.Region,
Credentials: creds,
},
Profile: d.cfg.Profile,
})
if err != nil {
return nil, errors.Wrap(err, "could not create aws session")
}
if d.cfg.RoleARN != "" {
creds := stscreds.NewCredentials(sess, d.cfg.RoleARN)
d.lightsail = lightsail.New(sess, &aws.Config{Credentials: creds})
} else {
d.lightsail = lightsail.New(sess)
}
return d.lightsail, nil
}
func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
lightsailClient, err := d.lightsailClient()
if err != nil {
return nil, err
}
tg := &targetgroup.Group{
Source: d.cfg.Region,
}
input := &lightsail.GetInstancesInput{}
output, err := lightsailClient.GetInstancesWithContext(ctx, input)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && (awsErr.Code() == "AuthFailure" || awsErr.Code() == "UnauthorizedOperation") {
d.lightsail = nil
}
return nil, errors.Wrap(err, "could not get instances")
}
for _, inst := range output.Instances {
if inst.PrivateIpAddress == nil {
continue
}
labels := model.LabelSet{
lightsailLabelAZ: model.LabelValue(*inst.Location.AvailabilityZone),
lightsailLabelBlueprintID: model.LabelValue(*inst.BlueprintId),
lightsailLabelBundleID: model.LabelValue(*inst.BundleId),
lightsailLabelInstanceName: model.LabelValue(*inst.Name),
lightsailLabelInstanceState: model.LabelValue(*inst.State.Name),
lightsailLabelInstanceSupportCode: model.LabelValue(*inst.SupportCode),
lightsailLabelPrivateIP: model.LabelValue(*inst.PrivateIpAddress),
}
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port))
labels[model.AddressLabel] = model.LabelValue(addr)
if inst.PublicIpAddress != nil {
labels[lightsailLabelPublicIP] = model.LabelValue(*inst.PublicIpAddress)
}
if len(inst.Ipv6Addresses) > 0 {
var ipv6addrs []string
for _, ipv6addr := range inst.Ipv6Addresses {
ipv6addrs = append(ipv6addrs, *ipv6addr)
}
labels[lightsailLabelIPv6Addresses] = model.LabelValue(
lightsailLabelSeparator +
strings.Join(ipv6addrs, lightsailLabelSeparator) +
lightsailLabelSeparator)
}
for _, t := range inst.Tags {
if t == nil || t.Key == nil || t.Value == nil {
continue
}
name := strutil.SanitizeLabelName(*t.Key)
labels[lightsailLabelTag+model.LabelName(name)] = model.LabelValue(*t.Value)
}
tg.Targets = append(tg.Targets, labels)
}
return []*targetgroup.Group{tg}, nil
}

View File

@ -426,6 +426,15 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
} }
} }
} }
// Send targetgroup with no targets if nothing was discovered.
if len(services) == 0 {
select {
case <-ctx.Done():
return
case ch <- []*targetgroup.Group{{}}:
}
}
} }
// consulService contains data belonging to the same service. // consulService contains data belonging to the same service.

View File

@ -298,6 +298,23 @@ func TestAllServices(t *testing.T) {
<-ch <-ch
} }
// targetgroup with no targets is emitted if no services were discovered.
func TestNoTargets(t *testing.T) {
stub, config := newServer(t)
defer stub.Close()
config.ServiceTags = []string{"missing"}
d := newDiscovery(t, config)
ctx, cancel := context.WithCancel(context.Background())
ch := make(chan []*targetgroup.Group)
go d.Run(ctx, ch)
targets := (<-ch)[0].Targets
require.Equal(t, 0, len(targets))
cancel()
}
// Watch only the test service. // Watch only the test service.
func TestOneService(t *testing.T) { func TestOneService(t *testing.T) {
stub, config := newServer(t) stub, config := newServer(t)

View File

@ -116,7 +116,7 @@ type Discovery struct {
server string server string
} }
// New creates a new Eureka discovery for the given role. // NewDiscovery creates a new Eureka discovery for the given role.
func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd", config.WithHTTP2Disabled()) rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd", config.WithHTTP2Disabled())
if err != nil { if err != nil {

View File

@ -16,11 +16,11 @@
package install package install
import ( import (
_ "github.com/prometheus/prometheus/discovery/aws" // register aws
_ "github.com/prometheus/prometheus/discovery/azure" // register azure _ "github.com/prometheus/prometheus/discovery/azure" // register azure
_ "github.com/prometheus/prometheus/discovery/consul" // register consul _ "github.com/prometheus/prometheus/discovery/consul" // register consul
_ "github.com/prometheus/prometheus/discovery/digitalocean" // register digitalocean _ "github.com/prometheus/prometheus/discovery/digitalocean" // register digitalocean
_ "github.com/prometheus/prometheus/discovery/dns" // register dns _ "github.com/prometheus/prometheus/discovery/dns" // register dns
_ "github.com/prometheus/prometheus/discovery/ec2" // register ec2
_ "github.com/prometheus/prometheus/discovery/eureka" // register eureka _ "github.com/prometheus/prometheus/discovery/eureka" // register eureka
_ "github.com/prometheus/prometheus/discovery/file" // register file _ "github.com/prometheus/prometheus/discovery/file" // register file
_ "github.com/prometheus/prometheus/discovery/gce" // register gce _ "github.com/prometheus/prometheus/discovery/gce" // register gce

View File

@ -279,15 +279,17 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group {
defer m.mtx.RUnlock() defer m.mtx.RUnlock()
tSets := map[string][]*targetgroup.Group{} tSets := map[string][]*targetgroup.Group{}
n := map[string]int{}
for pkey, tsets := range m.targets { for pkey, tsets := range m.targets {
var n int
for _, tg := range tsets { for _, tg := range tsets {
// Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager'
// to signal that it needs to stop all scrape loops for this target set. // to signal that it needs to stop all scrape loops for this target set.
tSets[pkey.setName] = append(tSets[pkey.setName], tg) tSets[pkey.setName] = append(tSets[pkey.setName], tg)
n += len(tg.Targets) n[pkey.setName] += len(tg.Targets)
} }
discoveredTargets.WithLabelValues(m.name, pkey.setName).Set(float64(n)) }
for setName, v := range n {
discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v))
} }
return tSets return tSets
} }

View File

@ -180,6 +180,11 @@ authorization:
# configured file. It is mutually exclusive with `credentials`. # configured file. It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth or authorization.
oauth2:
[ <oauth2> ]
# Configure whether scrape requests follow HTTP 3xx redirects. # Configure whether scrape requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <bool> | default = true ]
@ -238,6 +243,10 @@ hetzner_sd_configs:
kubernetes_sd_configs: kubernetes_sd_configs:
[ - <kubernetes_sd_config> ... ] [ - <kubernetes_sd_config> ... ]
# List of Lightsail service discovery configurations.
lightsail_sd_configs:
[ - <lightsail_sd_config> ... ]
# List of Marathon service discovery configurations. # List of Marathon service discovery configurations.
marathon_sd_configs: marathon_sd_configs:
[ - <marathon_sd_config> ... ] [ - <marathon_sd_config> ... ]
@ -279,6 +288,21 @@ metric_relabel_configs:
# the entire scrape will be treated as failed. 0 means no limit. # the entire scrape will be treated as failed. 0 means no limit.
[ sample_limit: <int> | default = 0 ] [ sample_limit: <int> | default = 0 ]
# Per-scrape limit on number of labels that will be accepted for a sample. If
# more than this number of labels are present post metric-relabeling, the
# entire scrape will be treated as failed. 0 means no limit.
[ label_limit: <int> | default = 0 ]
# Per-scrape limit on length of labels name that will be accepted for a sample.
# If a label name is longer than this number post metric-relabeling, the entire
# scrape will be treated as failed. 0 means no limit.
[ label_name_length_limit: <int> | default = 0 ]
# Per-scrape limit on length of labels value that will be accepted for a sample.
# If a label value is longer than this number post metric-relabeling, the
# entire scrape will be treated as failed. 0 means no limit.
[ label_value_length_limit: <int> | default = 0 ]
# Per-scrape config limit on number of unique targets that will be # Per-scrape config limit on number of unique targets that will be
# accepted. If more than this number of targets are present after target # accepted. If more than this number of targets are present after target
# relabeling, Prometheus will mark the targets as failed without scraping them. # relabeling, Prometheus will mark the targets as failed without scraping them.
@ -309,6 +333,32 @@ A `tls_config` allows configuring TLS connections.
[ insecure_skip_verify: <boolean> ] [ insecure_skip_verify: <boolean> ]
``` ```
### `oauth2`
OAuth 2.0 authentication using the client credentials grant type.
Prometheus fetches an access token from the specified endpoint with
the given client access and secret keys.
```yaml
client_id: <string>
[ client_secret: <secret> ]
# Read the client secret from a file.
# It is mutually exclusive with `client_secret`.
[ client_secret_file: <filename> ]
# Scopes for the token request.
scopes:
[ - <string> ... ]
# The URL to fetch the token from.
token_url: <string>
# Optional parameters to append to the token URL.
endpoint_params:
[ <string>: <string> ... ]
```
### `<azure_sd_config>` ### `<azure_sd_config>`
Azure SD configurations allow retrieving scrape targets from Azure VMs. Azure SD configurations allow retrieving scrape targets from Azure VMs.
@ -473,6 +523,11 @@ authorization:
# It is mutually exclusive with `credentials`. # It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth or authorization.
oauth2:
[ <oauth2> ]
# Optional proxy URL. # Optional proxy URL.
[ proxy_url: <string> ] [ proxy_url: <string> ]
@ -565,6 +620,11 @@ authorization:
# It is mutually exclusive with `credentials`. # It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth or authorization.
oauth2:
[ <oauth2> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <bool> | default = true ]
@ -723,6 +783,11 @@ authorization:
# It is mutually exclusive with `credentials`. # It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth or authorization.
oauth2:
[ <oauth2> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <bool> | default = true ]
@ -1131,6 +1196,11 @@ authorization:
# It is mutually exclusive with `credentials`. # It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth or authorization.
oauth2:
[ <oauth2> ]
# Optional proxy URL. # Optional proxy URL.
[ proxy_url: <string> ] [ proxy_url: <string> ]
@ -1302,6 +1372,11 @@ authorization:
# It is mutually exclusive with `credentials`. # It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth or authorization.
oauth2:
[ <oauth2> ]
# Optional proxy URL. # Optional proxy URL.
[ proxy_url: <string> ] [ proxy_url: <string> ]
@ -1341,6 +1416,54 @@ for a detailed example of configuring Prometheus for Kubernetes.
You may wish to check out the 3rd party [Prometheus Operator](https://github.com/coreos/prometheus-operator), You may wish to check out the 3rd party [Prometheus Operator](https://github.com/coreos/prometheus-operator),
which automates the Prometheus setup on top of Kubernetes. which automates the Prometheus setup on top of Kubernetes.
### `<lightsail_sd_config>`
Lightsail SD configurations allow retrieving scrape targets from [AWS Lightsail](https://aws.amazon.com/lightsail/)
instances. The private IP address is used by default, but may be changed to
the public IP address with relabeling.
The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_lightsail_availability_zone`: the availability zone in which the instance is running
* `__meta_lightsail_blueprint_id`: the Lightsail blueprint ID
* `__meta_lightsail_bundle_id`: the Lightsail bundle ID
* `__meta_lightsail_instance_name`: the name of the Lightsail instance
* `__meta_lightsail_instance_state`: the state of the Lightsail instance
* `__meta_lightsail_instance_support_code`: the support code of the Lightsail instance
* `__meta_lightsail_ipv6_addresses`: comma separated list of IPv6 addresses assigned to the instance's network interfaces, if present
* `__meta_lightsail_private_ip`: the private IP address of the instance
* `__meta_lightsail_public_ip`: the public IP address of the instance, if available
* `__meta_lightsail_tag_<tagkey>`: each tag value of the instance
See below for the configuration options for Lightsail discovery:
```yaml
# The information to access the Lightsail API.
# The AWS region. If blank, the region from the instance metadata is used.
[ region: <string> ]
# Custom endpoint to be used.
[ endpoint: <string> ]
# The AWS API keys. If blank, the environment variables `AWS_ACCESS_KEY_ID`
# and `AWS_SECRET_ACCESS_KEY` are used.
[ access_key: <string> ]
[ secret_key: <secret> ]
# Named AWS profile used to connect to the API.
[ profile: <string> ]
# AWS Role ARN, an alternative to using AWS API keys.
[ role_arn: <string> ]
# Refresh interval to re-read the instance list.
[ refresh_interval: <duration> | default = 60s ]
# The port to scrape metrics from. If using the public IP address, this must
# instead be specified in the relabeling rule.
[ port: <int> | default = 80 ]
```
### `<marathon_sd_config>` ### `<marathon_sd_config>`
Marathon SD configurations allow retrieving scrape targets using the Marathon SD configurations allow retrieving scrape targets using the
@ -1402,6 +1525,11 @@ authorization:
# It is mutually exclusive with `credentials`. # It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth or authorization.
oauth2:
[ <oauth2> ]
# Configure whether HTTP requests follow HTTP 3xx redirects. # Configure whether HTTP requests follow HTTP 3xx redirects.
[ follow_redirects: <bool> | default = true ] [ follow_redirects: <bool> | default = true ]
@ -1600,6 +1728,11 @@ authorization:
# It is mutually exclusive with `credentials`. # It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth or authorization.
oauth2:
[ <oauth2> ]
# Configures the scrape request's TLS settings. # Configures the scrape request's TLS settings.
tls_config: tls_config:
[ <tls_config> ] [ <tls_config> ]
@ -1878,6 +2011,11 @@ authorization:
# It is mutually exclusive with `credentials`. # It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth or authorization.
oauth2:
[ <oauth2> ]
# Configures the scrape request's TLS settings. # Configures the scrape request's TLS settings.
tls_config: tls_config:
[ <tls_config> ] [ <tls_config> ]
@ -1936,6 +2074,10 @@ hetzner_sd_configs:
kubernetes_sd_configs: kubernetes_sd_configs:
[ - <kubernetes_sd_config> ... ] [ - <kubernetes_sd_config> ... ]
# List of Lightsail service discovery configurations.
lightsail_sd_configs:
[ - <lightsail_sd_config> ... ]
# List of Marathon service discovery configurations. # List of Marathon service discovery configurations.
marathon_sd_configs: marathon_sd_configs:
[ - <marathon_sd_config> ... ] [ - <marathon_sd_config> ... ]
@ -1999,6 +2141,9 @@ write_relabel_configs:
# remote write configs. # remote write configs.
[ name: <string> ] [ name: <string> ]
# Enables sending of exemplars over remote write. Note that exemplar storage itself must be enabled for exemplars to be scraped in the first place.
[ send_exemplars: <boolean> | default = false ]
# Sets the `Authorization` header on every remote write request with the # Sets the `Authorization` header on every remote write request with the
# configured username and password. # configured username and password.
# password and password_file are mutually exclusive. # password and password_file are mutually exclusive.
@ -2019,7 +2164,7 @@ authorization:
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optionally configures AWS's Signature Verification 4 signing process to # Optionally configures AWS's Signature Verification 4 signing process to
# sign requests. Cannot be set at the same time as basic_auth or authorization. # sign requests. Cannot be set at the same time as basic_auth, authorization, or oauth2.
# To use the default credentials from the AWS SDK, use `sigv4: {}`. # To use the default credentials from the AWS SDK, use `sigv4: {}`.
sigv4: sigv4:
# The AWS region. If blank, the region from the default credentials chain # The AWS region. If blank, the region from the default credentials chain
@ -2037,6 +2182,11 @@ sigv4:
# AWS Role ARN, an alternative to using AWS API keys. # AWS Role ARN, an alternative to using AWS API keys.
[ role_arn: <string> ] [ role_arn: <string> ]
# Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth, authorization, or sigv4.
oauth2:
[ <oauth2> ]
# Configures the remote write request's TLS settings. # Configures the remote write request's TLS settings.
tls_config: tls_config:
[ <tls_config> ] [ <tls_config> ]
@ -2131,6 +2281,11 @@ authorization:
# It is mutually exclusive with `credentials`. # It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ] [ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration.
# Cannot be used at the same time as basic_auth or authorization.
oauth2:
[ <oauth2> ]
# Configures the remote read request's TLS settings. # Configures the remote read request's TLS settings.
tls_config: tls_config:
[ <tls_config> ] [ <tls_config> ]

View File

@ -52,5 +52,4 @@ The remote write receiver allows Prometheus to accept remote write requests from
[OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars) introduces the ability for scrape targets to add exemplars to certain metrics. Exemplars are references to data outside of the MetricSet. A common use case are IDs of program traces. [OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars) introduces the ability for scrape targets to add exemplars to certain metrics. Exemplars are references to data outside of the MetricSet. A common use case are IDs of program traces.
Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The flag `storage.exemplars.exemplars-limit` can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `traceID=<jaeger-trace-id>` uses roughly 100 bytes of memory via the in-memory exemplar storage. Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The flag `storage.exemplars.exemplars-limit` can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `traceID=<jaeger-trace-id>` uses roughly 100 bytes of memory via the in-memory exemplar storage. If the exemplar storage is enabled, we will also append the exemplars to WAL for local persistence (for WAL duration).

View File

@ -186,4 +186,4 @@ The output of `promtool tsdb create-blocks-from rules` command is a directory th
- All rules in the recording rule files will be evaluated. - All rules in the recording rule files will be evaluated.
- If the `interval` is set in the recording rule file that will take priority over the `eval-interval` flag in the rule backfill command. - If the `interval` is set in the recording rule file that will take priority over the `eval-interval` flag in the rule backfill command.
- Alerts are currently ignored if they are in the recording rule file. - Alerts are currently ignored if they are in the recording rule file.
- Rules in the same group cannot see the results of previous rules. Meaning that rules that refer to other rules being backfilled is not supported. A workaround is to backfill mulitple times and create the dependent data first (and move dependent data to the Prometheus server data dir so that it is accessible from the Prometheus API). - Rules in the same group cannot see the results of previous rules. Meaning that rules that refer to other rules being backfilled is not supported. A workaround is to backfill multiple times and create the dependent data first (and move dependent data to the Prometheus server data dir so that it is accessible from the Prometheus API).

View File

@ -39,7 +39,15 @@ func main() {
fmt.Println(m) fmt.Println(m)
for _, s := range ts.Samples { for _, s := range ts.Samples {
fmt.Printf(" %f %d\n", s.Value, s.Timestamp) fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
}
for _, e := range ts.Exemplars {
m := make(model.Metric, len(e.Labels))
for _, l := range e.Labels {
m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
}
fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp)
} }
} }
}) })

View File

@ -261,6 +261,20 @@
description: 'Prometheus %(prometheusName)s has dropped {{ printf "%%.0f" $value }} targets because the number of targets exceeded the configured target_limit.' % $._config, description: 'Prometheus %(prometheusName)s has dropped {{ printf "%%.0f" $value }} targets because the number of targets exceeded the configured target_limit.' % $._config,
}, },
}, },
{
alert: 'PrometheusLabelLimitHit',
expr: |||
increase(prometheus_target_scrape_pool_exceeded_label_limits_total{%(prometheusSelector)s}[5m]) > 0
||| % $._config,
'for': '15m',
labels: {
severity: 'warning',
},
annotations: {
summary: 'Prometheus has dropped targets because some scrape configs have exceeded the labels limit.',
description: 'Prometheus %(prometheusName)s has dropped {{ printf "%%.0f" $value }} targets because some samples exceeded the configured label_limit, label_name_length_limit or label_value_length_limit.' % $._config,
},
},
] + if $._config.prometheusHAGroupLabels == '' then self.rulesWithoutHA else self.rulesWithHA, ] + if $._config.prometheusHAGroupLabels == '' then self.rulesWithoutHA else self.rulesWithHA,
rulesWithoutHA:: [ rulesWithoutHA:: [
{ {

34
go.mod
View File

@ -3,7 +3,7 @@ module github.com/prometheus/prometheus
go 1.14 go 1.14
require ( require (
github.com/Azure/azure-sdk-for-go v52.5.0+incompatible github.com/Azure/azure-sdk-for-go v54.0.0+incompatible
github.com/Azure/go-autorest/autorest v0.11.18 github.com/Azure/go-autorest/autorest v0.11.18
github.com/Azure/go-autorest/autorest/adal v0.9.13 github.com/Azure/go-autorest/autorest/adal v0.9.13
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
@ -11,29 +11,29 @@ require (
github.com/HdrHistogram/hdrhistogram-go v1.0.1 // indirect github.com/HdrHistogram/hdrhistogram-go v1.0.1 // indirect
github.com/Microsoft/go-winio v0.4.16 // indirect github.com/Microsoft/go-winio v0.4.16 // indirect
github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15
github.com/aws/aws-sdk-go v1.38.3 github.com/aws/aws-sdk-go v1.38.35
github.com/cespare/xxhash/v2 v2.1.1 github.com/cespare/xxhash/v2 v2.1.1
github.com/containerd/containerd v1.4.3 // indirect github.com/containerd/containerd v1.4.3 // indirect
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
github.com/digitalocean/godo v1.58.0 github.com/digitalocean/godo v1.60.0
github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/docker v20.10.5+incompatible github.com/docker/docker v20.10.6+incompatible
github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-connections v0.4.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 github.com/edsrzf/mmap-go v1.0.0
github.com/go-kit/kit v0.10.0 github.com/go-kit/kit v0.10.0
github.com/go-logfmt/logfmt v0.5.0 github.com/go-logfmt/logfmt v0.5.0
github.com/go-openapi/strfmt v0.20.0 github.com/go-openapi/strfmt v0.20.1
github.com/go-openapi/validate v0.20.2 // indirect github.com/go-openapi/validate v0.20.2 // indirect
github.com/go-zookeeper/zk v1.0.2 github.com/go-zookeeper/zk v1.0.2
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.3 github.com/golang/snappy v0.0.3
github.com/google/pprof v0.0.0-20210323184331-8eee2492667d github.com/google/pprof v0.0.0-20210504235042-3a04a4d88a10
github.com/gophercloud/gophercloud v0.16.0 github.com/gophercloud/gophercloud v0.17.0
github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.8.1 github.com/hashicorp/consul/api v1.8.1
github.com/hetznercloud/hcloud-go v1.24.0 github.com/hetznercloud/hcloud-go v1.25.0
github.com/influxdata/influxdb v1.8.4 github.com/influxdata/influxdb v1.8.5
github.com/json-iterator/go v1.1.10 github.com/json-iterator/go v1.1.11
github.com/miekg/dns v1.1.41 github.com/miekg/dns v1.1.41
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
github.com/morikuni/aec v1.0.0 // indirect github.com/morikuni/aec v1.0.0 // indirect
@ -48,23 +48,23 @@ require (
github.com/prometheus/alertmanager v0.21.0 github.com/prometheus/alertmanager v0.21.0
github.com/prometheus/client_golang v1.10.0 github.com/prometheus/client_golang v1.10.0
github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.21.0 github.com/prometheus/common v0.23.0
github.com/prometheus/exporter-toolkit v0.5.1 github.com/prometheus/exporter-toolkit v0.5.1
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
github.com/uber/jaeger-client-go v2.25.0+incompatible github.com/uber/jaeger-client-go v2.28.0+incompatible
github.com/uber/jaeger-lib v2.4.0+incompatible github.com/uber/jaeger-lib v2.4.1+incompatible
go.uber.org/atomic v1.7.0 go.uber.org/atomic v1.7.0
go.uber.org/goleak v1.1.10 go.uber.org/goleak v1.1.10
golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826 golang.org/x/net v0.0.0-20210505214959-0714010a04ed
golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558 golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210324051608-47abb6519492 golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
golang.org/x/tools v0.1.0 golang.org/x/tools v0.1.0
google.golang.org/api v0.42.0 google.golang.org/api v0.46.0
gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/fsnotify/fsnotify.v1 v1.4.7 gopkg.in/fsnotify/fsnotify.v1 v1.4.7
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0

108
go.sum
View File

@ -17,8 +17,9 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0 h1:oqqswrt4x6b9OGBnNqdssxBl1xf0rSUNjU2BR4BZar0=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@ -39,8 +40,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v52.5.0+incompatible h1:/NLBWHCnIHtZyLPc1P7WIqi4Te4CC23kIQyK3Ep/7lA= github.com/Azure/azure-sdk-for-go v54.0.0+incompatible h1:Bq3L9LF0DHCexlT0fccwxgrOMfjHx8LGz+d+L7gGQv4=
github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v54.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
@ -113,8 +114,8 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.38.3 h1:QCL/le04oAz2jELMRSuJVjGT7H+4hhoQc66eMPCfU/k= github.com/aws/aws-sdk-go v1.38.35 h1:7AlAO0FC+8nFjxiGKEmq0QLpiA8/XFr6eIxgRTwkdTg=
github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -157,17 +158,18 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4=
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/digitalocean/godo v1.58.0 h1:Iy8ULTvgCAxH8dlxZ54qRYpm5uTEb2deUqijywLH7Lo= github.com/digitalocean/godo v1.60.0 h1:o/vimtn/HKtYSakFAAZ59Zc5ASORd41S4z1X7pAXPn8=
github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/digitalocean/godo v1.60.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.5+incompatible h1:o5WL5onN4awYGwrW7+oTn5x9AF2prw7V0Ox8ZEkoCdg= github.com/docker/docker v20.10.6+incompatible h1:oXI3Vas8TI8Eu/EjH4srKHJBVqraSzJybhxY7Om9faQ=
github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
@ -189,6 +191,7 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
@ -289,8 +292,9 @@ github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6
github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
github.com/go-openapi/strfmt v0.20.0 h1:l2omNtmNbMc39IGptl9BuXBEKcZfS8zjrTsPKTiJiDM=
github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
github.com/go-openapi/strfmt v0.20.1 h1:1VgxvehFne1mbChGeCmZ5pc0LxUf6yaACVSIYAR91Xc=
github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
@ -380,8 +384,11 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
@ -421,8 +428,8 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210323184331-8eee2492667d h1:Rwivyny4wymF1qWzOk800eSVa/n9njfdOm+kHjiQhZQ= github.com/google/pprof v0.0.0-20210504235042-3a04a4d88a10 h1:wAh7XxYU1O92WP9JMsK0elU+haxEN0HTc2m/C89wQvk=
github.com/google/pprof v0.0.0-20210323184331-8eee2492667d/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210504235042-3a04a4d88a10/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -433,8 +440,8 @@ github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/gophercloud/gophercloud v0.16.0 h1:sWjPfypuzxRxjVbk3/MsU4H8jS0NNlyauZtIUl78BPU= github.com/gophercloud/gophercloud v0.17.0 h1:BgVw0saxyeHWH5us/SQe1ltp0GRnytjmOLXDA8pO77E=
github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gophercloud/gophercloud v0.17.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@ -491,8 +498,8 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM=
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
github.com/hetznercloud/hcloud-go v1.24.0 h1:/CeHDzhH3Fhm83pjxvE3xNNLbvACl0Lu1/auJ83gG5U= github.com/hetznercloud/hcloud-go v1.25.0 h1:QAaFKtGKWRxjwjKJWBGMxGYUxVEQmIkb35j/WXrsazY=
github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= github.com/hetznercloud/hcloud-go v1.25.0/go.mod h1:2C5uMtBiMoFr3m7lBFPf7wXTdh33CevmZpQIIDPGYJI=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
@ -501,8 +508,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
github.com/influxdata/influxdb v1.8.4 h1:FUcPJJ1/sM47gX3Xr7QCIbkmZ2N4ug5CV7iOFQCR75A= github.com/influxdata/influxdb v1.8.5 h1:MMrJF6eWCD7sWMxOFFctm1U1RNQ2Hh2nHgz2iF9/wHY=
github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb v1.8.5/go.mod h1:oFH+pbEyDln/1TKwa98oJzVrkZwdjrJOwIDGYZj7Ma0=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
@ -526,8 +533,9 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
@ -722,8 +730,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/common v0.21.0 h1:SMvI2JVldvfUvRVlP64jkIJEC6WiGHJcN2e5tB+ztF8= github.com/prometheus/common v0.23.0 h1:GXWvPYuTUenIa+BhOq/x+L/QZzCqASkVRny5KTlPDGM=
github.com/prometheus/common v0.21.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q=
github.com/prometheus/exporter-toolkit v0.5.1 h1:9eqgis5er9xN613ZSADjypCJaDGj9ZlcWBvsIHa8/3c= github.com/prometheus/exporter-toolkit v0.5.1 h1:9eqgis5er9xN613ZSADjypCJaDGj9ZlcWBvsIHa8/3c=
github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@ -802,19 +810,23 @@ github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= github.com/uber/jaeger-client-go v2.28.0+incompatible h1:G4QSBfvPKvg5ZM2j9MrJFdfI5iSljY/WnJqOGFao6HI=
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.4.0+incompatible h1:fY7QsGQWiCt8pajv4r7JEvmATdCVaWxXbjwyYwsNaLQ= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@ -828,8 +840,9 @@ go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS
go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
go.mongodb.org/mongo-driver v1.4.6 h1:rh7GdYmDrb8AQSkF8yteAus8qYOgOASWDOv1BWqBXkU=
go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
go.mongodb.org/mongo-driver v1.5.1 h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI=
go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@ -865,6 +878,7 @@ golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
@ -957,8 +971,10 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826 h1:lNRDRnwZWawoPHDS50ebYHTOHjctRMLSrUSQFcAHiW4= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210505214959-0714010a04ed h1:V9kAVxLvz1lkufatrpHuUVyJ/5tR3Ms7rk951P4mI98=
golang.org/x/net v0.0.0-20210505214959-0714010a04ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -970,8 +986,8 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558 h1:D7nTwh4J0i+5mW4Zjzn5omvlr6YBcWywE6KOcatyNxY= golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c h1:SgVl/sCtkicsS7psKkje4H9YtjdEl3xsYh7N+5TDHqY=
golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1050,10 +1066,12 @@ golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6 h1:cdsMqa2nXzqlgs183pHxtvoVwU7CyzaCTAUOg94af4c=
golang.org/x/sys v0.0.0-20210503173754-0981d6026fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
@ -1064,8 +1082,9 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1170,8 +1189,9 @@ google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.42.0 h1:uqATLkpxiBrhrvFoebXUjvyzE9nQf+pVyy0Z0IHE+fc= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= google.golang.org/api v0.46.0 h1:jkDWHOBIoNSD0OQpq4rtBVu+Rh325MPjXG1rakAp8JU=
google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1220,8 +1240,10 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f h1:YRBxgxUW6GFi+AKsn8WGA9k1SZohK+gGuEqdeT5aoNQ= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab h1:dkb90hr43A2Q5as5ZBphcOF2II0+EqfCBqGp7qFSpN4=
google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -1243,8 +1265,10 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0 h1:o1bcQ6imQMIOpdrO3SWf2z5RV72WbDwdXuK0MDlc8As=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1254,8 +1278,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -15,12 +15,16 @@ package exemplar
import "github.com/prometheus/prometheus/pkg/labels" import "github.com/prometheus/prometheus/pkg/labels"
// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
const ExemplarMaxLabelSetLength = 128
// Exemplar is additional information associated with a time series. // Exemplar is additional information associated with a time series.
type Exemplar struct { type Exemplar struct {
Labels labels.Labels Labels labels.Labels `json:"labels"`
Value float64 Value float64 `json:"value"`
Ts int64 `json:"timestamp"`
HasTs bool HasTs bool
Ts int64
} }
type QueryResult struct { type QueryResult struct {

View File

@ -96,7 +96,7 @@ func (x LabelMatcher_Type) String() string {
} }
func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{5, 0} return fileDescriptor_d938547f84707355, []int{6, 0}
} }
// We require this to match chunkenc.Encoding. // We require this to match chunkenc.Encoding.
@ -122,7 +122,7 @@ func (x Chunk_Encoding) String() string {
} }
func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{7, 0} return fileDescriptor_d938547f84707355, []int{8, 0}
} }
type MetricMetadata struct { type MetricMetadata struct {
@ -200,6 +200,8 @@ func (m *MetricMetadata) GetUnit() string {
type Sample struct { type Sample struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
// timestamp is in ms format, see pkg/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp.
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
@ -253,10 +255,79 @@ func (m *Sample) GetTimestamp() int64 {
return 0 return 0
} }
type Exemplar struct {
// Optional, can be empty.
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
// timestamp is in ms format, see pkg/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp.
Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Exemplar) Reset() { *m = Exemplar{} }
func (m *Exemplar) String() string { return proto.CompactTextString(m) }
func (*Exemplar) ProtoMessage() {}
func (*Exemplar) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{2}
}
func (m *Exemplar) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Exemplar) XXX_Merge(src proto.Message) {
xxx_messageInfo_Exemplar.Merge(m, src)
}
func (m *Exemplar) XXX_Size() int {
return m.Size()
}
func (m *Exemplar) XXX_DiscardUnknown() {
xxx_messageInfo_Exemplar.DiscardUnknown(m)
}
var xxx_messageInfo_Exemplar proto.InternalMessageInfo
func (m *Exemplar) GetLabels() []Label {
if m != nil {
return m.Labels
}
return nil
}
func (m *Exemplar) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
func (m *Exemplar) GetTimestamp() int64 {
if m != nil {
return m.Timestamp
}
return 0
}
// TimeSeries represents samples and labels for a single time series. // TimeSeries represents samples and labels for a single time series.
type TimeSeries struct { type TimeSeries struct {
// For a timeseries to be valid, and for the samples and exemplars
// to be ingested by the remote system properly, the labels field is required.
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"` XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_sizecache int32 `json:"-"`
@ -266,7 +337,7 @@ func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) } func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {} func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) { func (*TimeSeries) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{2} return fileDescriptor_d938547f84707355, []int{3}
} }
func (m *TimeSeries) XXX_Unmarshal(b []byte) error { func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -309,6 +380,13 @@ func (m *TimeSeries) GetSamples() []Sample {
return nil return nil
} }
func (m *TimeSeries) GetExemplars() []Exemplar {
if m != nil {
return m.Exemplars
}
return nil
}
type Label struct { type Label struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
@ -321,7 +399,7 @@ func (m *Label) Reset() { *m = Label{} }
func (m *Label) String() string { return proto.CompactTextString(m) } func (m *Label) String() string { return proto.CompactTextString(m) }
func (*Label) ProtoMessage() {} func (*Label) ProtoMessage() {}
func (*Label) Descriptor() ([]byte, []int) { func (*Label) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{3} return fileDescriptor_d938547f84707355, []int{4}
} }
func (m *Label) XXX_Unmarshal(b []byte) error { func (m *Label) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -375,7 +453,7 @@ func (m *Labels) Reset() { *m = Labels{} }
func (m *Labels) String() string { return proto.CompactTextString(m) } func (m *Labels) String() string { return proto.CompactTextString(m) }
func (*Labels) ProtoMessage() {} func (*Labels) ProtoMessage() {}
func (*Labels) Descriptor() ([]byte, []int) { func (*Labels) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{4} return fileDescriptor_d938547f84707355, []int{5}
} }
func (m *Labels) XXX_Unmarshal(b []byte) error { func (m *Labels) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -425,7 +503,7 @@ func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } func (m *LabelMatcher) String() string { return proto.CompactTextString(m) }
func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) { func (*LabelMatcher) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{5} return fileDescriptor_d938547f84707355, []int{6}
} }
func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { func (m *LabelMatcher) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -492,7 +570,7 @@ func (m *ReadHints) Reset() { *m = ReadHints{} }
func (m *ReadHints) String() string { return proto.CompactTextString(m) } func (m *ReadHints) String() string { return proto.CompactTextString(m) }
func (*ReadHints) ProtoMessage() {} func (*ReadHints) ProtoMessage() {}
func (*ReadHints) Descriptor() ([]byte, []int) { func (*ReadHints) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{6} return fileDescriptor_d938547f84707355, []int{7}
} }
func (m *ReadHints) XXX_Unmarshal(b []byte) error { func (m *ReadHints) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -586,7 +664,7 @@ func (m *Chunk) Reset() { *m = Chunk{} }
func (m *Chunk) String() string { return proto.CompactTextString(m) } func (m *Chunk) String() string { return proto.CompactTextString(m) }
func (*Chunk) ProtoMessage() {} func (*Chunk) ProtoMessage() {}
func (*Chunk) Descriptor() ([]byte, []int) { func (*Chunk) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{7} return fileDescriptor_d938547f84707355, []int{8}
} }
func (m *Chunk) XXX_Unmarshal(b []byte) error { func (m *Chunk) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -658,7 +736,7 @@ func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} }
func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) } func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) }
func (*ChunkedSeries) ProtoMessage() {} func (*ChunkedSeries) ProtoMessage() {}
func (*ChunkedSeries) Descriptor() ([]byte, []int) { func (*ChunkedSeries) Descriptor() ([]byte, []int) {
return fileDescriptor_d938547f84707355, []int{8} return fileDescriptor_d938547f84707355, []int{9}
} }
func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error { func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@ -707,6 +785,7 @@ func init() {
proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value) proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value)
proto.RegisterType((*MetricMetadata)(nil), "prometheus.MetricMetadata") proto.RegisterType((*MetricMetadata)(nil), "prometheus.MetricMetadata")
proto.RegisterType((*Sample)(nil), "prometheus.Sample") proto.RegisterType((*Sample)(nil), "prometheus.Sample")
proto.RegisterType((*Exemplar)(nil), "prometheus.Exemplar")
proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries") proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries")
proto.RegisterType((*Label)(nil), "prometheus.Label") proto.RegisterType((*Label)(nil), "prometheus.Label")
proto.RegisterType((*Labels)(nil), "prometheus.Labels") proto.RegisterType((*Labels)(nil), "prometheus.Labels")
@ -719,51 +798,53 @@ func init() {
func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) }
var fileDescriptor_d938547f84707355 = []byte{ var fileDescriptor_d938547f84707355 = []byte{
// 690 bytes of a gzipped FileDescriptorProto // 734 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcd, 0x6e, 0xda, 0x40, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcb, 0x6e, 0xdb, 0x46,
0x10, 0xce, 0xfa, 0x17, 0x86, 0x04, 0x39, 0xab, 0x54, 0x75, 0xa3, 0x96, 0x22, 0x4b, 0x95, 0x38, 0x14, 0xf5, 0xf0, 0x29, 0x5e, 0xd9, 0x02, 0x3d, 0x50, 0x51, 0xd6, 0x68, 0x55, 0x81, 0x40, 0x01,
0x54, 0x44, 0x49, 0x4f, 0x91, 0x7a, 0x21, 0x91, 0xf3, 0xa3, 0xc6, 0xa0, 0x2c, 0xa0, 0xfe, 0x5c, 0x2d, 0x0a, 0x19, 0x76, 0x37, 0x35, 0xd0, 0x8d, 0x6c, 0xd0, 0x0f, 0xd4, 0x94, 0xe0, 0x91, 0x84,
0xd0, 0x02, 0x1b, 0xb0, 0x8a, 0x8d, 0xe3, 0x5d, 0xaa, 0xf0, 0x20, 0xbd, 0xf5, 0x15, 0x7a, 0xe8, 0x3e, 0x36, 0xc2, 0x48, 0x1a, 0x4b, 0x44, 0xc4, 0x47, 0x38, 0x54, 0x60, 0x7d, 0x48, 0x76, 0xf9,
0x5b, 0xe4, 0xd8, 0x27, 0xa8, 0xaa, 0x3c, 0x49, 0xb5, 0x6b, 0x13, 0x13, 0xa5, 0x97, 0xf6, 0x36, 0x83, 0x20, 0x8b, 0xfc, 0x85, 0x97, 0xf9, 0x82, 0x20, 0xf0, 0x97, 0x04, 0x33, 0xa4, 0x4c, 0x29,
0xf3, 0x7d, 0xdf, 0xfc, 0xec, 0xcc, 0xd8, 0x50, 0x11, 0xcb, 0x84, 0xf1, 0x66, 0x92, 0xce, 0xc5, 0x4e, 0x16, 0xce, 0xee, 0xde, 0x7b, 0xce, 0xb9, 0x8f, 0xb9, 0x97, 0x84, 0x6a, 0xb6, 0x4a, 0x18,
0x1c, 0x43, 0x92, 0xce, 0x23, 0x26, 0xa6, 0x6c, 0xc1, 0x77, 0x77, 0x26, 0xf3, 0xc9, 0x5c, 0xc1, 0x6f, 0x27, 0x69, 0x9c, 0xc5, 0x18, 0x92, 0x34, 0x0e, 0x59, 0x36, 0x67, 0x4b, 0x7e, 0x50, 0x9f,
0x7b, 0xd2, 0xca, 0x14, 0xde, 0x37, 0x0d, 0xaa, 0x01, 0x13, 0x69, 0x38, 0x0a, 0x98, 0xa0, 0x63, 0xc5, 0xb3, 0x58, 0x86, 0x0f, 0x85, 0x95, 0x33, 0xdc, 0x37, 0x0a, 0xd4, 0x7c, 0x96, 0xa5, 0xc1,
0x2a, 0x28, 0x3e, 0x04, 0x43, 0xe6, 0x70, 0x51, 0x1d, 0x35, 0xaa, 0x07, 0xaf, 0x9a, 0x45, 0x8e, 0xc4, 0x67, 0x19, 0x9d, 0xd2, 0x8c, 0xe2, 0x13, 0xd0, 0x44, 0x0e, 0x07, 0x35, 0x51, 0xab, 0x76,
0xe6, 0x43, 0x65, 0xee, 0xf6, 0x96, 0x09, 0x23, 0x2a, 0x04, 0xbf, 0x06, 0x1c, 0x29, 0x6c, 0x70, 0xfc, 0x5b, 0xbb, 0xcc, 0xd1, 0xde, 0x66, 0x16, 0xee, 0x60, 0x95, 0x30, 0x22, 0x25, 0xf8, 0x77,
0x45, 0xa3, 0x70, 0xb6, 0x1c, 0xc4, 0x34, 0x62, 0xae, 0x56, 0x47, 0x8d, 0x32, 0x71, 0x32, 0xe6, 0xc0, 0xa1, 0x8c, 0x8d, 0x6e, 0x69, 0x18, 0x2c, 0x56, 0xa3, 0x88, 0x86, 0xcc, 0x51, 0x9a, 0xa8,
0x44, 0x11, 0x6d, 0x1a, 0x31, 0x8c, 0xc1, 0x98, 0xb2, 0x59, 0xe2, 0x1a, 0x8a, 0x57, 0xb6, 0xc4, 0x65, 0x11, 0x3b, 0x47, 0xce, 0x25, 0xd0, 0xa5, 0x21, 0xc3, 0x18, 0xb4, 0x39, 0x5b, 0x24, 0x8e,
0x16, 0x71, 0x28, 0x5c, 0x33, 0xc3, 0xa4, 0xed, 0x2d, 0x01, 0x8a, 0x4a, 0xb8, 0x02, 0x76, 0xbf, 0x26, 0x71, 0x69, 0x8b, 0xd8, 0x32, 0x0a, 0x32, 0x47, 0xcf, 0x63, 0xc2, 0x76, 0x57, 0x00, 0x65,
0xfd, 0xae, 0xdd, 0x79, 0xdf, 0x76, 0x36, 0xa4, 0x73, 0xdc, 0xe9, 0xb7, 0x7b, 0x3e, 0x71, 0x10, 0x25, 0x5c, 0x05, 0x73, 0xd8, 0xfd, 0xbb, 0xdb, 0xfb, 0xa7, 0x6b, 0xef, 0x08, 0xe7, 0xac, 0x37,
0x2e, 0x83, 0x79, 0xda, 0xea, 0x9f, 0xfa, 0x8e, 0x86, 0xb7, 0xa0, 0x7c, 0x76, 0xde, 0xed, 0x75, 0xec, 0x0e, 0x3c, 0x62, 0x23, 0x6c, 0x81, 0x7e, 0xd1, 0x19, 0x5e, 0x78, 0xb6, 0x82, 0xf7, 0xc0,
0x4e, 0x49, 0x2b, 0x70, 0x74, 0x8c, 0xa1, 0xaa, 0x98, 0x02, 0x33, 0x64, 0x68, 0xb7, 0x1f, 0x04, 0xba, 0xbc, 0xea, 0x0f, 0x7a, 0x17, 0xa4, 0xe3, 0xdb, 0x2a, 0xc6, 0x50, 0x93, 0x48, 0x19, 0xd3,
0x2d, 0xf2, 0xd1, 0x31, 0x71, 0x09, 0x8c, 0xf3, 0xf6, 0x49, 0xc7, 0xb1, 0xf0, 0x26, 0x94, 0xba, 0x84, 0xb4, 0x3f, 0xf4, 0xfd, 0x0e, 0xf9, 0xcf, 0xd6, 0x71, 0x05, 0xb4, 0xab, 0xee, 0x79, 0xcf,
0xbd, 0x56, 0xcf, 0xef, 0xfa, 0x3d, 0xc7, 0xf6, 0xde, 0x82, 0xd5, 0xa5, 0x51, 0x32, 0x63, 0x78, 0x36, 0xf0, 0x2e, 0x54, 0xfa, 0x83, 0xce, 0xc0, 0xeb, 0x7b, 0x03, 0xdb, 0x74, 0xff, 0x02, 0xa3,
0x07, 0xcc, 0x2f, 0x74, 0xb6, 0xc8, 0xc6, 0x82, 0x48, 0xe6, 0xe0, 0xe7, 0x50, 0x16, 0x61, 0xc4, 0x4f, 0xc3, 0x64, 0xc1, 0x70, 0x1d, 0xf4, 0x57, 0x74, 0xb1, 0xcc, 0x9f, 0x05, 0x91, 0xdc, 0xc1,
0xb8, 0xa0, 0x51, 0xa2, 0xde, 0xa9, 0x93, 0x02, 0xf0, 0xae, 0x01, 0x7a, 0x61, 0xc4, 0xba, 0x2c, 0x3f, 0x83, 0x95, 0x05, 0x21, 0xe3, 0x19, 0x0d, 0x13, 0x39, 0xa7, 0x4a, 0xca, 0x80, 0x1b, 0x43,
0x0d, 0x19, 0xc7, 0x7b, 0x60, 0xcd, 0xe8, 0x90, 0xcd, 0xb8, 0x8b, 0xea, 0x7a, 0xa3, 0x72, 0xb0, 0xc5, 0xbb, 0x63, 0x61, 0xb2, 0xa0, 0x29, 0x3e, 0x04, 0x63, 0x41, 0xc7, 0x6c, 0xc1, 0x1d, 0xd4,
0xbd, 0x3e, 0xd9, 0x0b, 0xc9, 0x1c, 0x19, 0xb7, 0xbf, 0x5e, 0x6e, 0x90, 0x5c, 0x86, 0x0f, 0xc0, 0x54, 0x5b, 0xd5, 0xe3, 0xfd, 0xcd, 0x77, 0xbd, 0x16, 0xc8, 0xa9, 0x76, 0xff, 0xf1, 0xd7, 0x1d,
0xe6, 0xaa, 0x38, 0x77, 0x35, 0x15, 0x81, 0xd7, 0x23, 0xb2, 0xbe, 0xf2, 0x90, 0x95, 0xd0, 0xdb, 0x52, 0xd0, 0xca, 0x82, 0xca, 0x37, 0x0b, 0xaa, 0x5f, 0x16, 0x7c, 0x8b, 0x00, 0x06, 0x41, 0xc8,
0x07, 0x53, 0xa5, 0x92, 0x83, 0x54, 0xc3, 0x47, 0xd9, 0x20, 0xa5, 0x5d, 0xbc, 0x21, 0xdb, 0x48, 0xfa, 0x2c, 0x0d, 0x18, 0x7f, 0x7e, 0xcd, 0x63, 0x30, 0xb9, 0x1c, 0x97, 0x3b, 0x8a, 0x54, 0xe0,
0xe6, 0x78, 0x87, 0x60, 0x5d, 0x64, 0x05, 0xff, 0xb5, 0x43, 0xef, 0x2b, 0x82, 0x4d, 0x85, 0x07, 0x4d, 0x45, 0xfe, 0x12, 0x85, 0x64, 0x4d, 0xc4, 0x7f, 0x82, 0xc5, 0x8a, 0x21, 0xb9, 0xa3, 0x4a,
0x54, 0x8c, 0xa6, 0x2c, 0xc5, 0xfb, 0x0f, 0x6e, 0xe7, 0xc5, 0xa3, 0xf8, 0x5c, 0xd7, 0x5c, 0xbb, 0x55, 0x7d, 0x53, 0xb5, 0x7e, 0x81, 0x42, 0x57, 0x92, 0xdd, 0x23, 0xd0, 0x65, 0x13, 0x62, 0xe9,
0x99, 0x55, 0xa3, 0xda, 0xdf, 0x1a, 0xd5, 0xd7, 0x1b, 0x6d, 0x80, 0xa1, 0x2e, 0xc0, 0x02, 0xcd, 0xf2, 0x50, 0x50, 0xbe, 0x74, 0x61, 0x6f, 0x8f, 0x6f, 0x15, 0xe3, 0xbb, 0x27, 0x60, 0x5c, 0xe7,
0xbf, 0x74, 0x36, 0xb0, 0x0d, 0x7a, 0xdb, 0xbf, 0x74, 0x90, 0x04, 0x88, 0xdc, 0xba, 0x04, 0x88, 0xad, 0x3e, 0x77, 0x36, 0xf7, 0x35, 0x82, 0x5d, 0x19, 0xf7, 0x69, 0x36, 0x99, 0xb3, 0x14, 0x1f,
0xef, 0xe8, 0xde, 0x0f, 0x04, 0x65, 0xc2, 0xe8, 0xf8, 0x2c, 0x8c, 0x05, 0xc7, 0x4f, 0xc1, 0xe6, 0x6d, 0xdd, 0xf9, 0x2f, 0x4f, 0xf4, 0x05, 0xaf, 0xbd, 0x71, 0xdf, 0xeb, 0x46, 0x95, 0xaf, 0x35,
0x82, 0x25, 0x83, 0x88, 0xab, 0xbe, 0x74, 0x62, 0x49, 0x37, 0xe0, 0xb2, 0xf4, 0xd5, 0x22, 0x1e, 0xaa, 0x6e, 0x36, 0xda, 0x02, 0x4d, 0x5e, 0xab, 0x01, 0x8a, 0x77, 0x63, 0xef, 0x60, 0x13, 0xd4,
0xad, 0x4a, 0x4b, 0x1b, 0x3f, 0x83, 0x12, 0x17, 0x34, 0x15, 0x52, 0xad, 0x2b, 0xb5, 0xad, 0xfc, 0xae, 0x77, 0x63, 0x23, 0x11, 0x20, 0xe2, 0x42, 0x45, 0x80, 0x78, 0xb6, 0xea, 0xbe, 0x47, 0x60,
0x80, 0xe3, 0x27, 0x60, 0xb1, 0x78, 0x2c, 0x09, 0x43, 0x11, 0x26, 0x8b, 0xc7, 0x01, 0xc7, 0xbb, 0x11, 0x46, 0xa7, 0x97, 0x41, 0x94, 0x71, 0xfc, 0x23, 0x98, 0x3c, 0x63, 0xc9, 0x28, 0xe4, 0xb2,
0x50, 0x9a, 0xa4, 0xf3, 0x45, 0x12, 0xc6, 0x13, 0xd7, 0xac, 0xeb, 0x8d, 0x32, 0xb9, 0xf7, 0x71, 0x2f, 0x95, 0x18, 0xc2, 0xf5, 0xb9, 0x28, 0x7d, 0xbb, 0x8c, 0x26, 0xeb, 0xd2, 0xc2, 0xc6, 0x3f,
0x15, 0xb4, 0xe1, 0xd2, 0xb5, 0xea, 0xa8, 0x51, 0x22, 0xda, 0x70, 0x29, 0xb3, 0xa7, 0x34, 0x9e, 0x41, 0x85, 0x67, 0x34, 0xcd, 0x04, 0x3b, 0xbf, 0x05, 0x53, 0xfa, 0x3e, 0xc7, 0x3f, 0x80, 0xc1,
0x30, 0x99, 0xc4, 0xce, 0xb2, 0x2b, 0x3f, 0xe0, 0xde, 0x77, 0x04, 0xe6, 0xf1, 0x74, 0x11, 0x7f, 0xa2, 0xa9, 0x00, 0x34, 0x09, 0xe8, 0x2c, 0x9a, 0xfa, 0x1c, 0x1f, 0x40, 0x65, 0x96, 0xc6, 0xcb,
0xc6, 0x35, 0xa8, 0x44, 0x61, 0x3c, 0x90, 0x77, 0x54, 0xf4, 0x5c, 0x8e, 0xc2, 0x58, 0x1e, 0x53, 0x24, 0x88, 0x66, 0x8e, 0xde, 0x54, 0x5b, 0x16, 0x79, 0xf4, 0x71, 0x0d, 0x94, 0xf1, 0xca, 0x31,
0xc0, 0x15, 0x4f, 0x6f, 0xee, 0xf9, 0xfc, 0xec, 0x22, 0x7a, 0x93, 0xf3, 0xcd, 0x7c, 0x09, 0xba, 0x9a, 0xa8, 0x55, 0x21, 0xca, 0x78, 0x25, 0xb2, 0xa7, 0x34, 0x9a, 0x31, 0x91, 0xc4, 0xcc, 0xb3,
0x5a, 0xc2, 0xee, 0xfa, 0x12, 0x54, 0x81, 0xa6, 0x1f, 0x8f, 0xe6, 0xe3, 0x30, 0x9e, 0x14, 0x1b, 0x4b, 0xdf, 0xe7, 0xee, 0x3b, 0x04, 0xfa, 0xd9, 0x7c, 0x19, 0xbd, 0xc0, 0x0d, 0xa8, 0x86, 0x41,
0x90, 0x9f, 0xb3, 0x7a, 0xd5, 0x26, 0x51, 0xb6, 0x57, 0x87, 0xd2, 0x4a, 0xf5, 0xf0, 0x8b, 0xb3, 0x34, 0x12, 0x27, 0x58, 0xf6, 0x6c, 0x85, 0x41, 0x24, 0xce, 0xd0, 0xe7, 0x12, 0xa7, 0x77, 0x8f,
0x41, 0xff, 0xd0, 0x21, 0x0e, 0xf2, 0xae, 0x61, 0x4b, 0x65, 0x63, 0xe3, 0xff, 0xbd, 0xef, 0x3d, 0x78, 0xf1, 0x89, 0x84, 0xf4, 0xae, 0xc0, 0xdb, 0xc5, 0x12, 0x54, 0xb9, 0x84, 0x83, 0xcd, 0x25,
0xb0, 0x46, 0x32, 0xc3, 0xea, 0xbc, 0xb7, 0x1f, 0x75, 0xba, 0x0a, 0xc8, 0x64, 0x47, 0x3b, 0xb7, 0xc8, 0x02, 0x6d, 0x2f, 0x9a, 0xc4, 0xd3, 0x20, 0x9a, 0x95, 0x1b, 0x10, 0xbf, 0x1e, 0x39, 0xd5,
0x77, 0x35, 0xf4, 0xf3, 0xae, 0x86, 0x7e, 0xdf, 0xd5, 0xd0, 0x27, 0x4b, 0xaa, 0x93, 0xe1, 0xd0, 0x2e, 0x91, 0xb6, 0xdb, 0x84, 0xca, 0x9a, 0xb5, 0xfd, 0x77, 0x30, 0x41, 0xfd, 0xb7, 0x47, 0x6c,
0x52, 0x7f, 0xb2, 0x37, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf3, 0xb7, 0x12, 0x44, 0xfa, 0x04, 0xe4, 0xbe, 0x84, 0x3d, 0x99, 0x8d, 0x4d, 0xbf, 0xf7, 0xcb, 0x38, 0x04, 0x63, 0x22, 0x32, 0xac,
0x00, 0x00, 0x3f, 0x8c, 0xfd, 0x27, 0x9d, 0xae, 0x05, 0x39, 0xed, 0xb4, 0x7e, 0xff, 0xd0, 0x40, 0x1f, 0x1e,
0x1a, 0xe8, 0xd3, 0x43, 0x03, 0xfd, 0x6f, 0x08, 0x76, 0x32, 0x1e, 0x1b, 0xf2, 0xaf, 0xfb, 0xc7,
0xe7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xa3, 0x6c, 0x23, 0xa6, 0x05, 0x00, 0x00,
} }
func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { func (m *MetricMetadata) Marshal() (dAtA []byte, err error) {
@ -857,6 +938,58 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil return len(dAtA) - i, nil
} }
func (m *Exemplar) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Timestamp != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp))
i--
dAtA[i] = 0x18
}
if m.Value != 0 {
i -= 8
encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
i--
dAtA[i] = 0x11
}
if len(m.Labels) > 0 {
for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *TimeSeries) Marshal() (dAtA []byte, err error) { func (m *TimeSeries) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)
@ -881,6 +1014,20 @@ func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized) i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized)
} }
if len(m.Exemplars) > 0 {
for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if len(m.Samples) > 0 { if len(m.Samples) > 0 {
for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
{ {
@ -1273,6 +1420,30 @@ func (m *Sample) Size() (n int) {
return n return n
} }
func (m *Exemplar) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Labels) > 0 {
for _, e := range m.Labels {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
if m.Value != 0 {
n += 9
}
if m.Timestamp != 0 {
n += 1 + sovTypes(uint64(m.Timestamp))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *TimeSeries) Size() (n int) { func (m *TimeSeries) Size() (n int) {
if m == nil { if m == nil {
return 0 return 0
@ -1291,6 +1462,12 @@ func (m *TimeSeries) Size() (n int) {
n += 1 + l + sovTypes(uint64(l)) n += 1 + l + sovTypes(uint64(l))
} }
} }
if len(m.Exemplars) > 0 {
for _, e := range m.Exemplars {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
if m.XXX_unrecognized != nil { if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized) n += len(m.XXX_unrecognized)
} }
@ -1697,6 +1874,121 @@ func (m *Sample) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *Exemplar) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Exemplar: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Labels = append(m.Labels, Label{})
if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.Value = float64(math.Float64frombits(v))
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
}
m.Timestamp = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Timestamp |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TimeSeries) Unmarshal(dAtA []byte) error { func (m *TimeSeries) Unmarshal(dAtA []byte) error {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0
@ -1794,6 +2086,40 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error {
return err return err
} }
iNdEx = postIndex iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Exemplars = append(m.Exemplars, Exemplar{})
if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:]) skippy, err := skipTypes(dAtA[iNdEx:])

View File

@ -40,13 +40,27 @@ message MetricMetadata {
message Sample { message Sample {
double value = 1; double value = 1;
// timestamp is in ms format, see pkg/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp.
int64 timestamp = 2; int64 timestamp = 2;
} }
message Exemplar {
// Optional, can be empty.
repeated Label labels = 1 [(gogoproto.nullable) = false];
double value = 2;
// timestamp is in ms format, see pkg/timestamp/timestamp.go for
// conversion from time.Time to Prometheus timestamp.
int64 timestamp = 3;
}
// TimeSeries represents samples and labels for a single time series. // TimeSeries represents samples and labels for a single time series.
message TimeSeries { message TimeSeries {
// For a timeseries to be valid, and for the samples and exemplars
// to be ingested by the remote system properly, the labels field is required.
repeated Label labels = 1 [(gogoproto.nullable) = false]; repeated Label labels = 1 [(gogoproto.nullable) = false];
repeated Sample samples = 2 [(gogoproto.nullable) = false]; repeated Sample samples = 2 [(gogoproto.nullable) = false];
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
} }
message Label { message Label {

View File

@ -703,7 +703,7 @@ func (p *parser) addOffset(e Node, offset time.Duration) {
orgoffsetp = &s.OriginalOffset orgoffsetp = &s.OriginalOffset
endPosp = &s.EndPos endPosp = &s.EndPos
default: default:
p.addParseErrf(e.PositionRange(), "offset modifier must be preceded by an instant selector vector or range vector selector or a subquery") p.addParseErrf(e.PositionRange(), "offset modifier must be preceded by an instant vector selector or range vector selector or a subquery")
return return
} }
@ -778,7 +778,7 @@ func (p *parser) getAtModifierVars(e Node) (**int64, *ItemType, *Pos, bool) {
timestampp = &s.Timestamp timestampp = &s.Timestamp
endPosp = &s.EndPos endPosp = &s.EndPos
default: default:
p.addParseErrf(e.PositionRange(), "@ modifier must be preceded by an instant selector vector or range vector selector or a subquery") p.addParseErrf(e.PositionRange(), "@ modifier must be preceded by an instant vector selector or range vector selector or a subquery")
return nil, nil, nil, false return nil, nil, nil, false
} }

View File

@ -528,7 +528,7 @@ var testExpr = []struct {
}, { }, {
input: "1 offset 1d", input: "1 offset 1d",
fail: true, fail: true,
errMsg: "1:1: parse error: offset modifier must be preceded by an instant selector vector or range vector selector or a subquery", errMsg: "1:1: parse error: offset modifier must be preceded by an instant vector selector or range vector selector or a subquery",
}, { }, {
input: "foo offset 1s offset 2s", input: "foo offset 1s offset 2s",
fail: true, fail: true,
@ -2270,7 +2270,7 @@ var testExpr = []struct {
}, { }, {
input: `rate(some_metric[5m]) @ 1234`, input: `rate(some_metric[5m]) @ 1234`,
fail: true, fail: true,
errMsg: "1:1: parse error: @ modifier must be preceded by an instant selector vector or range vector selector or a subquery", errMsg: "1:1: parse error: @ modifier must be preceded by an instant vector selector or range vector selector or a subquery",
}, },
// Test function calls. // Test function calls.
{ {

View File

@ -26,6 +26,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/pkg/timestamp" "github.com/prometheus/prometheus/pkg/timestamp"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
@ -277,6 +278,7 @@ type loadCmd struct {
gap time.Duration gap time.Duration
metrics map[uint64]labels.Labels metrics map[uint64]labels.Labels
defs map[uint64][]Point defs map[uint64][]Point
exemplars map[uint64][]exemplar.Exemplar
} }
func newLoadCmd(gap time.Duration) *loadCmd { func newLoadCmd(gap time.Duration) *loadCmd {
@ -284,6 +286,7 @@ func newLoadCmd(gap time.Duration) *loadCmd {
gap: gap, gap: gap,
metrics: map[uint64]labels.Labels{}, metrics: map[uint64]labels.Labels{},
defs: map[uint64][]Point{}, defs: map[uint64][]Point{},
exemplars: map[uint64][]exemplar.Exemplar{},
} }
} }

View File

@ -53,37 +53,37 @@ const namespace = "prometheus"
// Metrics for rule evaluation. // Metrics for rule evaluation.
type Metrics struct { type Metrics struct {
evalDuration prometheus.Summary EvalDuration prometheus.Summary
iterationDuration prometheus.Summary IterationDuration prometheus.Summary
iterationsMissed *prometheus.CounterVec IterationsMissed *prometheus.CounterVec
iterationsScheduled *prometheus.CounterVec IterationsScheduled *prometheus.CounterVec
evalTotal *prometheus.CounterVec EvalTotal *prometheus.CounterVec
evalFailures *prometheus.CounterVec EvalFailures *prometheus.CounterVec
groupInterval *prometheus.GaugeVec GroupInterval *prometheus.GaugeVec
groupLastEvalTime *prometheus.GaugeVec GroupLastEvalTime *prometheus.GaugeVec
groupLastDuration *prometheus.GaugeVec GroupLastDuration *prometheus.GaugeVec
groupRules *prometheus.GaugeVec GroupRules *prometheus.GaugeVec
groupSamples *prometheus.GaugeVec GroupSamples *prometheus.GaugeVec
} }
// NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer, // NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer,
// if not nil. // if not nil.
func NewGroupMetrics(reg prometheus.Registerer) *Metrics { func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
m := &Metrics{ m := &Metrics{
evalDuration: prometheus.NewSummary( EvalDuration: prometheus.NewSummary(
prometheus.SummaryOpts{ prometheus.SummaryOpts{
Namespace: namespace, Namespace: namespace,
Name: "rule_evaluation_duration_seconds", Name: "rule_evaluation_duration_seconds",
Help: "The duration for a rule to execute.", Help: "The duration for a rule to execute.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}), }),
iterationDuration: prometheus.NewSummary(prometheus.SummaryOpts{ IterationDuration: prometheus.NewSummary(prometheus.SummaryOpts{
Namespace: namespace, Namespace: namespace,
Name: "rule_group_duration_seconds", Name: "rule_group_duration_seconds",
Help: "The duration of rule group evaluations.", Help: "The duration of rule group evaluations.",
Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
}), }),
iterationsMissed: prometheus.NewCounterVec( IterationsMissed: prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
Namespace: namespace, Namespace: namespace,
Name: "rule_group_iterations_missed_total", Name: "rule_group_iterations_missed_total",
@ -91,7 +91,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
}, },
[]string{"rule_group"}, []string{"rule_group"},
), ),
iterationsScheduled: prometheus.NewCounterVec( IterationsScheduled: prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
Namespace: namespace, Namespace: namespace,
Name: "rule_group_iterations_total", Name: "rule_group_iterations_total",
@ -99,7 +99,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
}, },
[]string{"rule_group"}, []string{"rule_group"},
), ),
evalTotal: prometheus.NewCounterVec( EvalTotal: prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
Namespace: namespace, Namespace: namespace,
Name: "rule_evaluations_total", Name: "rule_evaluations_total",
@ -107,7 +107,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
}, },
[]string{"rule_group"}, []string{"rule_group"},
), ),
evalFailures: prometheus.NewCounterVec( EvalFailures: prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
Namespace: namespace, Namespace: namespace,
Name: "rule_evaluation_failures_total", Name: "rule_evaluation_failures_total",
@ -115,7 +115,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
}, },
[]string{"rule_group"}, []string{"rule_group"},
), ),
groupInterval: prometheus.NewGaugeVec( GroupInterval: prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{
Namespace: namespace, Namespace: namespace,
Name: "rule_group_interval_seconds", Name: "rule_group_interval_seconds",
@ -123,7 +123,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
}, },
[]string{"rule_group"}, []string{"rule_group"},
), ),
groupLastEvalTime: prometheus.NewGaugeVec( GroupLastEvalTime: prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{
Namespace: namespace, Namespace: namespace,
Name: "rule_group_last_evaluation_timestamp_seconds", Name: "rule_group_last_evaluation_timestamp_seconds",
@ -131,7 +131,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
}, },
[]string{"rule_group"}, []string{"rule_group"},
), ),
groupLastDuration: prometheus.NewGaugeVec( GroupLastDuration: prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{
Namespace: namespace, Namespace: namespace,
Name: "rule_group_last_duration_seconds", Name: "rule_group_last_duration_seconds",
@ -139,7 +139,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
}, },
[]string{"rule_group"}, []string{"rule_group"},
), ),
groupRules: prometheus.NewGaugeVec( GroupRules: prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{
Namespace: namespace, Namespace: namespace,
Name: "rule_group_rules", Name: "rule_group_rules",
@ -147,7 +147,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
}, },
[]string{"rule_group"}, []string{"rule_group"},
), ),
groupSamples: prometheus.NewGaugeVec( GroupSamples: prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{
Namespace: namespace, Namespace: namespace,
Name: "rule_group_last_evaluation_samples", Name: "rule_group_last_evaluation_samples",
@ -159,17 +159,17 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
if reg != nil { if reg != nil {
reg.MustRegister( reg.MustRegister(
m.evalDuration, m.EvalDuration,
m.iterationDuration, m.IterationDuration,
m.iterationsMissed, m.IterationsMissed,
m.iterationsScheduled, m.IterationsScheduled,
m.evalTotal, m.EvalTotal,
m.evalFailures, m.EvalFailures,
m.groupInterval, m.GroupInterval,
m.groupLastEvalTime, m.GroupLastEvalTime,
m.groupLastDuration, m.GroupLastDuration,
m.groupRules, m.GroupRules,
m.groupSamples, m.GroupSamples,
) )
} }
@ -281,15 +281,15 @@ func NewGroup(o GroupOptions) *Group {
} }
key := GroupKey(o.File, o.Name) key := GroupKey(o.File, o.Name)
metrics.iterationsMissed.WithLabelValues(key) metrics.IterationsMissed.WithLabelValues(key)
metrics.iterationsScheduled.WithLabelValues(key) metrics.IterationsScheduled.WithLabelValues(key)
metrics.evalTotal.WithLabelValues(key) metrics.EvalTotal.WithLabelValues(key)
metrics.evalFailures.WithLabelValues(key) metrics.EvalFailures.WithLabelValues(key)
metrics.groupLastEvalTime.WithLabelValues(key) metrics.GroupLastEvalTime.WithLabelValues(key)
metrics.groupLastDuration.WithLabelValues(key) metrics.GroupLastDuration.WithLabelValues(key)
metrics.groupRules.WithLabelValues(key).Set(float64(len(o.Rules))) metrics.GroupRules.WithLabelValues(key).Set(float64(len(o.Rules)))
metrics.groupSamples.WithLabelValues(key) metrics.GroupSamples.WithLabelValues(key)
metrics.groupInterval.WithLabelValues(key).Set(o.Interval.Seconds()) metrics.GroupInterval.WithLabelValues(key).Set(o.Interval.Seconds())
return &Group{ return &Group{
name: o.Name, name: o.Name,
@ -338,13 +338,13 @@ func (g *Group) run(ctx context.Context) {
}) })
iter := func() { iter := func() {
g.metrics.iterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Inc() g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Inc()
start := time.Now() start := time.Now()
g.Eval(ctx, evalTimestamp) g.Eval(ctx, evalTimestamp)
timeSinceStart := time.Since(start) timeSinceStart := time.Since(start)
g.metrics.iterationDuration.Observe(timeSinceStart.Seconds()) g.metrics.IterationDuration.Observe(timeSinceStart.Seconds())
g.setEvaluationTime(timeSinceStart) g.setEvaluationTime(timeSinceStart)
g.setLastEvaluation(start) g.setLastEvaluation(start)
} }
@ -390,8 +390,8 @@ func (g *Group) run(ctx context.Context) {
case <-tick.C: case <-tick.C:
missed := (time.Since(evalTimestamp) / g.interval) - 1 missed := (time.Since(evalTimestamp) / g.interval) - 1
if missed > 0 { if missed > 0 {
g.metrics.iterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) g.metrics.IterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
g.metrics.iterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
} }
evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval) evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
iter() iter()
@ -412,8 +412,8 @@ func (g *Group) run(ctx context.Context) {
case <-tick.C: case <-tick.C:
missed := (time.Since(evalTimestamp) / g.interval) - 1 missed := (time.Since(evalTimestamp) / g.interval) - 1
if missed > 0 { if missed > 0 {
g.metrics.iterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) g.metrics.IterationsMissed.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
g.metrics.iterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed)) g.metrics.IterationsScheduled.WithLabelValues(GroupKey(g.file, g.name)).Add(float64(missed))
} }
evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval) evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
iter() iter()
@ -476,7 +476,7 @@ func (g *Group) GetEvaluationTime() time.Duration {
// setEvaluationTime sets the time in seconds the last evaluation took. // setEvaluationTime sets the time in seconds the last evaluation took.
func (g *Group) setEvaluationTime(dur time.Duration) { func (g *Group) setEvaluationTime(dur time.Duration) {
g.metrics.groupLastDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(dur.Seconds()) g.metrics.GroupLastDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(dur.Seconds())
g.mtx.Lock() g.mtx.Lock()
defer g.mtx.Unlock() defer g.mtx.Unlock()
@ -492,7 +492,7 @@ func (g *Group) GetLastEvaluation() time.Time {
// setLastEvaluation updates evaluationTimestamp to the timestamp of when the rule group was last evaluated. // setLastEvaluation updates evaluationTimestamp to the timestamp of when the rule group was last evaluated.
func (g *Group) setLastEvaluation(ts time.Time) { func (g *Group) setLastEvaluation(ts time.Time) {
g.metrics.groupLastEvalTime.WithLabelValues(GroupKey(g.file, g.name)).Set(float64(ts.UnixNano()) / 1e9) g.metrics.GroupLastEvalTime.WithLabelValues(GroupKey(g.file, g.name)).Set(float64(ts.UnixNano()) / 1e9)
g.mtx.Lock() g.mtx.Lock()
defer g.mtx.Unlock() defer g.mtx.Unlock()
@ -584,24 +584,24 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
sp.Finish() sp.Finish()
since := time.Since(t) since := time.Since(t)
g.metrics.evalDuration.Observe(since.Seconds()) g.metrics.EvalDuration.Observe(since.Seconds())
rule.SetEvaluationDuration(since) rule.SetEvaluationDuration(since)
rule.SetEvaluationTimestamp(t) rule.SetEvaluationTimestamp(t)
}(time.Now()) }(time.Now())
g.metrics.evalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL) vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL)
if err != nil { if err != nil {
rule.SetHealth(HealthBad) rule.SetHealth(HealthBad)
rule.SetLastError(err) rule.SetLastError(err)
g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
// Canceled queries are intentional termination of queries. This normally // Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here. // happens on shutdown and thus we skip logging of any errors here.
if _, ok := err.(promql.ErrQueryCanceled); !ok { if _, ok := err.(promql.ErrQueryCanceled); !ok {
level.Warn(g.logger).Log("msg", "Evaluating rule failed", "rule", rule, "err", err) level.Warn(g.logger).Log("msg", "Evaluating rule failed", "rule", rule, "err", err)
} }
g.metrics.evalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
return return
} }
samplesTotal += float64(len(vector)) samplesTotal += float64(len(vector))
@ -620,6 +620,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
if err := app.Commit(); err != nil { if err := app.Commit(); err != nil {
rule.SetHealth(HealthBad) rule.SetHealth(HealthBad)
rule.SetLastError(err) rule.SetLastError(err)
g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
level.Warn(g.logger).Log("msg", "Rule sample appending failed", "err", err) level.Warn(g.logger).Log("msg", "Rule sample appending failed", "err", err)
return return
@ -670,7 +671,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
}(i, rule) }(i, rule)
} }
if g.metrics != nil { if g.metrics != nil {
g.metrics.groupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal) g.metrics.GroupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal)
} }
g.cleanupStaleSeries(ctx, ts) g.cleanupStaleSeries(ctx, ts)
} }
@ -995,15 +996,15 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels
g.markStale = true g.markStale = true
g.stop() g.stop()
if m := g.metrics; m != nil { if m := g.metrics; m != nil {
m.iterationsMissed.DeleteLabelValues(n) m.IterationsMissed.DeleteLabelValues(n)
m.iterationsScheduled.DeleteLabelValues(n) m.IterationsScheduled.DeleteLabelValues(n)
m.evalTotal.DeleteLabelValues(n) m.EvalTotal.DeleteLabelValues(n)
m.evalFailures.DeleteLabelValues(n) m.EvalFailures.DeleteLabelValues(n)
m.groupInterval.DeleteLabelValues(n) m.GroupInterval.DeleteLabelValues(n)
m.groupLastEvalTime.DeleteLabelValues(n) m.GroupLastEvalTime.DeleteLabelValues(n)
m.groupLastDuration.DeleteLabelValues(n) m.GroupLastDuration.DeleteLabelValues(n)
m.groupRules.DeleteLabelValues(n) m.GroupRules.DeleteLabelValues(n)
m.groupSamples.DeleteLabelValues((n)) m.GroupSamples.DeleteLabelValues((n))
} }
wg.Done() wg.Done()
}(n, oldg) }(n, oldg)

View File

@ -170,6 +170,12 @@ var (
Help: "Total number of exemplar rejected due to not being out of the expected order.", Help: "Total number of exemplar rejected due to not being out of the expected order.",
}, },
) )
targetScrapePoolExceededLabelLimits = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_target_scrape_pool_exceeded_label_limits_total",
Help: "Total number of times scrape pools hit the label limits, during sync or config reload.",
},
)
) )
func init() { func init() {
@ -192,6 +198,7 @@ func init() {
targetScrapeCacheFlushForced, targetScrapeCacheFlushForced,
targetMetadataCache, targetMetadataCache,
targetScrapeExemplarOutOfOrder, targetScrapeExemplarOutOfOrder,
targetScrapePoolExceededLabelLimits,
) )
} }
@ -218,10 +225,17 @@ type scrapePool struct {
newLoop func(scrapeLoopOptions) loop newLoop func(scrapeLoopOptions) loop
} }
type labelLimits struct {
labelLimit int
labelNameLengthLimit int
labelValueLengthLimit int
}
type scrapeLoopOptions struct { type scrapeLoopOptions struct {
target *Target target *Target
scraper scraper scraper scraper
limit int sampleLimit int
labelLimits *labelLimits
honorLabels bool honorLabels bool
honorTimestamps bool honorTimestamps bool
mrc []*relabel.Config mrc []*relabel.Config
@ -273,10 +287,11 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed
return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc) return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc)
}, },
func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) }, func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) },
func(ctx context.Context) storage.Appender { return appender(app.Appender(ctx), opts.limit) }, func(ctx context.Context) storage.Appender { return appender(app.Appender(ctx), opts.sampleLimit) },
cache, cache,
jitterSeed, jitterSeed,
opts.honorTimestamps, opts.honorTimestamps,
opts.labelLimits,
) )
} }
@ -360,7 +375,12 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
wg sync.WaitGroup wg sync.WaitGroup
interval = time.Duration(sp.config.ScrapeInterval) interval = time.Duration(sp.config.ScrapeInterval)
timeout = time.Duration(sp.config.ScrapeTimeout) timeout = time.Duration(sp.config.ScrapeTimeout)
limit = int(sp.config.SampleLimit) sampleLimit = int(sp.config.SampleLimit)
labelLimits = &labelLimits{
labelLimit: int(sp.config.LabelLimit),
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
}
honorLabels = sp.config.HonorLabels honorLabels = sp.config.HonorLabels
honorTimestamps = sp.config.HonorTimestamps honorTimestamps = sp.config.HonorTimestamps
mrc = sp.config.MetricRelabelConfigs mrc = sp.config.MetricRelabelConfigs
@ -383,7 +403,8 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
newLoop = sp.newLoop(scrapeLoopOptions{ newLoop = sp.newLoop(scrapeLoopOptions{
target: t, target: t,
scraper: s, scraper: s,
limit: limit, sampleLimit: sampleLimit,
labelLimits: labelLimits,
honorLabels: honorLabels, honorLabels: honorLabels,
honorTimestamps: honorTimestamps, honorTimestamps: honorTimestamps,
mrc: mrc, mrc: mrc,
@ -454,7 +475,12 @@ func (sp *scrapePool) sync(targets []*Target) {
uniqueLoops = make(map[uint64]loop) uniqueLoops = make(map[uint64]loop)
interval = time.Duration(sp.config.ScrapeInterval) interval = time.Duration(sp.config.ScrapeInterval)
timeout = time.Duration(sp.config.ScrapeTimeout) timeout = time.Duration(sp.config.ScrapeTimeout)
limit = int(sp.config.SampleLimit) sampleLimit = int(sp.config.SampleLimit)
labelLimits = &labelLimits{
labelLimit: int(sp.config.LabelLimit),
labelNameLengthLimit: int(sp.config.LabelNameLengthLimit),
labelValueLengthLimit: int(sp.config.LabelValueLengthLimit),
}
honorLabels = sp.config.HonorLabels honorLabels = sp.config.HonorLabels
honorTimestamps = sp.config.HonorTimestamps honorTimestamps = sp.config.HonorTimestamps
mrc = sp.config.MetricRelabelConfigs mrc = sp.config.MetricRelabelConfigs
@ -469,7 +495,8 @@ func (sp *scrapePool) sync(targets []*Target) {
l := sp.newLoop(scrapeLoopOptions{ l := sp.newLoop(scrapeLoopOptions{
target: t, target: t,
scraper: s, scraper: s,
limit: limit, sampleLimit: sampleLimit,
labelLimits: labelLimits,
honorLabels: honorLabels, honorLabels: honorLabels,
honorTimestamps: honorTimestamps, honorTimestamps: honorTimestamps,
mrc: mrc, mrc: mrc,
@ -544,6 +571,41 @@ func (sp *scrapePool) refreshTargetLimitErr() error {
return err return err
} }
func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error {
if limits == nil {
return nil
}
met := lset.Get(labels.MetricName)
if limits.labelLimit > 0 {
nbLabels := len(lset)
if nbLabels > int(limits.labelLimit) {
return fmt.Errorf("label_limit exceeded (metric: %.50s, number of label: %d, limit: %d)", met, nbLabels, limits.labelLimit)
}
}
if limits.labelNameLengthLimit == 0 && limits.labelValueLengthLimit == 0 {
return nil
}
for _, l := range lset {
if limits.labelNameLengthLimit > 0 {
nameLength := len(l.Name)
if nameLength > int(limits.labelNameLengthLimit) {
return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label: %.50v, name length: %d, limit: %d)", met, l, nameLength, limits.labelNameLengthLimit)
}
}
if limits.labelValueLengthLimit > 0 {
valueLength := len(l.Value)
if valueLength > int(limits.labelValueLengthLimit) {
return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label: %.50v, value length: %d, limit: %d)", met, l, valueLength, limits.labelValueLengthLimit)
}
}
}
return nil
}
func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels { func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels {
lb := labels.NewBuilder(lset) lb := labels.NewBuilder(lset)
@ -707,6 +769,7 @@ type scrapeLoop struct {
honorTimestamps bool honorTimestamps bool
forcedErr error forcedErr error
forcedErrMtx sync.Mutex forcedErrMtx sync.Mutex
labelLimits *labelLimits
appender func(ctx context.Context) storage.Appender appender func(ctx context.Context) storage.Appender
sampleMutator labelsMutator sampleMutator labelsMutator
@ -974,6 +1037,7 @@ func newScrapeLoop(ctx context.Context,
cache *scrapeCache, cache *scrapeCache,
jitterSeed uint64, jitterSeed uint64,
honorTimestamps bool, honorTimestamps bool,
labelLimits *labelLimits,
) *scrapeLoop { ) *scrapeLoop {
if l == nil { if l == nil {
l = log.NewNopLogger() l = log.NewNopLogger()
@ -996,6 +1060,7 @@ func newScrapeLoop(ctx context.Context,
l: l, l: l,
parentCtx: ctx, parentCtx: ctx,
honorTimestamps: honorTimestamps, honorTimestamps: honorTimestamps,
labelLimits: labelLimits,
} }
sl.ctx, sl.cancel = context.WithCancel(ctx) sl.ctx, sl.cancel = context.WithCancel(ctx)
@ -1346,6 +1411,12 @@ loop:
err = errNameLabelMandatory err = errNameLabelMandatory
break loop break loop
} }
// If any label limits is exceeded the scrape should fail.
if err = verifyLabelLimits(lset, sl.labelLimits); err != nil {
targetScrapePoolExceededLabelLimits.Inc()
break loop
}
} }
ref, err = app.Append(ref, lset, t, v) ref, err = app.Append(ref, lset, t, v)
@ -1426,7 +1497,6 @@ func yoloString(b []byte) string {
// Adds samples to the appender, checking the error, and then returns the # of samples added, // Adds samples to the appender, checking the error, and then returns the # of samples added,
// whether the caller should continue to process more samples, and any sample limit errors. // whether the caller should continue to process more samples, and any sample limit errors.
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr *error, appErrs *appendErrors) (bool, error) { func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr *error, appErrs *appendErrors) (bool, error) {
switch errors.Cause(err) { switch errors.Cause(err) {
case nil: case nil:
@ -1577,6 +1647,9 @@ func zeroConfig(c *config.ScrapeConfig) *config.ScrapeConfig {
z.ScrapeInterval = 0 z.ScrapeInterval = 0
z.ScrapeTimeout = 0 z.ScrapeTimeout = 0
z.SampleLimit = 0 z.SampleLimit = 0
z.LabelLimit = 0
z.LabelNameLengthLimit = 0
z.LabelValueLengthLimit = 0
z.HTTPClientConfig = config_util.HTTPClientConfig{} z.HTTPClientConfig = config_util.HTTPClientConfig{}
return &z return &z
} }

View File

@ -465,7 +465,7 @@ func TestScrapePoolAppender(t *testing.T) {
loop = sp.newLoop(scrapeLoopOptions{ loop = sp.newLoop(scrapeLoopOptions{
target: &Target{}, target: &Target{},
limit: 100, sampleLimit: 100,
}) })
appl, ok = loop.(*scrapeLoop) appl, ok = loop.(*scrapeLoop)
require.True(t, ok, "Expected scrapeLoop but got %T", loop) require.True(t, ok, "Expected scrapeLoop but got %T", loop)
@ -577,6 +577,7 @@ func TestScrapeLoopStopBeforeRun(t *testing.T) {
nopMutator, nopMutator,
nil, nil, 0, nil, nil, 0,
true, true,
nil,
) )
// The scrape pool synchronizes on stopping scrape loops. However, new scrape // The scrape pool synchronizes on stopping scrape loops. However, new scrape
@ -641,6 +642,7 @@ func TestScrapeLoopStop(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
// Terminate loop after 2 scrapes. // Terminate loop after 2 scrapes.
@ -708,6 +710,7 @@ func TestScrapeLoopRun(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
// The loop must terminate during the initial offset if the context // The loop must terminate during the initial offset if the context
@ -755,6 +758,7 @@ func TestScrapeLoopRun(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
go func() { go func() {
@ -806,6 +810,7 @@ func TestScrapeLoopForcedErr(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
forcedErr := fmt.Errorf("forced err") forcedErr := fmt.Errorf("forced err")
@ -856,6 +861,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
cache, cache,
0, 0,
true, true,
nil,
) )
defer cancel() defer cancel()
@ -905,6 +911,7 @@ func TestScrapeLoopSeriesAdded(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
defer cancel() defer cancel()
@ -943,6 +950,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrape(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
// Succeed once, several failures, then stop. // Succeed once, several failures, then stop.
numScrapes := 0 numScrapes := 0
@ -997,6 +1005,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnParseFailure(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
// Succeed once, several failures, then stop. // Succeed once, several failures, then stop.
@ -1055,6 +1064,7 @@ func TestScrapeLoopCache(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
numScrapes := 0 numScrapes := 0
@ -1129,6 +1139,7 @@ func TestScrapeLoopCacheMemoryExhaustionProtection(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
numScrapes := 0 numScrapes := 0
@ -1235,6 +1246,7 @@ func TestScrapeLoopAppend(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
now := time.Now() now := time.Now()
@ -1276,6 +1288,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
fakeRef := uint64(1) fakeRef := uint64(1)
@ -1325,6 +1338,7 @@ func TestScrapeLoopAppendSampleLimit(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
// Get the value of the Counter before performing the append. // Get the value of the Counter before performing the append.
@ -1394,6 +1408,7 @@ func TestScrapeLoop_ChangingMetricString(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
now := time.Now() now := time.Now()
@ -1434,6 +1449,7 @@ func TestScrapeLoopAppendStaleness(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
now := time.Now() now := time.Now()
@ -1477,6 +1493,7 @@ func TestScrapeLoopAppendNoStalenessIfTimestamp(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
now := time.Now() now := time.Now()
@ -1578,6 +1595,7 @@ metric_total{n="2"} 2 # {t="2"} 2.0 20000
nil, nil,
0, 0,
true, true,
nil,
) )
now := time.Now() now := time.Now()
@ -1635,6 +1653,7 @@ func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
now := time.Now() now := time.Now()
@ -1679,6 +1698,7 @@ func TestScrapeLoopRunReportsTargetDownOnScrapeError(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
@ -1707,6 +1727,7 @@ func TestScrapeLoopRunReportsTargetDownOnInvalidUTF8(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error { scraper.scrapeFunc = func(ctx context.Context, w io.Writer) error {
@ -1748,6 +1769,7 @@ func TestScrapeLoopAppendGracefullyIfAmendOrOutOfOrderOrOutOfBounds(t *testing.T
nil, nil,
0, 0,
true, true,
nil,
) )
now := time.Unix(1, 0) now := time.Unix(1, 0)
@ -1785,6 +1807,7 @@ func TestScrapeLoopOutOfBoundsTimeError(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
now := time.Now().Add(20 * time.Minute) now := time.Now().Add(20 * time.Minute)
@ -1972,6 +1995,7 @@ func TestScrapeLoop_RespectTimestamps(t *testing.T) {
func(ctx context.Context) storage.Appender { return capp }, func(ctx context.Context) storage.Appender { return capp },
nil, 0, nil, 0,
true, true,
nil,
) )
now := time.Now() now := time.Now()
@ -2005,6 +2029,7 @@ func TestScrapeLoop_DiscardTimestamps(t *testing.T) {
func(ctx context.Context) storage.Appender { return capp }, func(ctx context.Context) storage.Appender { return capp },
nil, 0, nil, 0,
false, false,
nil,
) )
now := time.Now() now := time.Now()
@ -2037,6 +2062,7 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
defer cancel() defer cancel()
@ -2087,6 +2113,7 @@ func TestScrapeLoopDiscardUnnamedMetrics(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
defer cancel() defer cancel()
@ -2304,6 +2331,7 @@ func TestScrapeAddFast(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
defer cancel() defer cancel()
@ -2387,6 +2415,7 @@ func TestScrapeReportSingleAppender(t *testing.T) {
nil, nil,
0, 0,
true, true,
nil,
) )
numScrapes := 0 numScrapes := 0
@ -2430,3 +2459,103 @@ func TestScrapeReportSingleAppender(t *testing.T) {
t.Fatalf("Scrape wasn't stopped.") t.Fatalf("Scrape wasn't stopped.")
} }
} }
func TestScrapeLoopLabelLimit(t *testing.T) {
tests := []struct {
title string
scrapeLabels string
discoveryLabels []string
labelLimits labelLimits
expectErr bool
}{
{
title: "Valid number of labels",
scrapeLabels: `metric{l1="1", l2="2"} 0`,
discoveryLabels: nil,
labelLimits: labelLimits{labelLimit: 5},
expectErr: false,
}, {
title: "Too many labels",
scrapeLabels: `metric{l1="1", l2="2", l3="3", l4="4", l5="5", l6="6"} 0`,
discoveryLabels: nil,
labelLimits: labelLimits{labelLimit: 5},
expectErr: true,
}, {
title: "Too many labels including discovery labels",
scrapeLabels: `metric{l1="1", l2="2", l3="3", l4="4"} 0`,
discoveryLabels: []string{"l5", "5", "l6", "6"},
labelLimits: labelLimits{labelLimit: 5},
expectErr: true,
}, {
title: "Valid labels name length",
scrapeLabels: `metric{l1="1", l2="2"} 0`,
discoveryLabels: nil,
labelLimits: labelLimits{labelNameLengthLimit: 10},
expectErr: false,
}, {
title: "Label name too long",
scrapeLabels: `metric{label_name_too_long="0"} 0`,
discoveryLabels: nil,
labelLimits: labelLimits{labelNameLengthLimit: 10},
expectErr: true,
}, {
title: "Discovery label name too long",
scrapeLabels: `metric{l1="1", l2="2"} 0`,
discoveryLabels: []string{"label_name_too_long", "0"},
labelLimits: labelLimits{labelNameLengthLimit: 10},
expectErr: true,
}, {
title: "Valid labels value length",
scrapeLabels: `metric{l1="1", l2="2"} 0`,
discoveryLabels: nil,
labelLimits: labelLimits{labelValueLengthLimit: 10},
expectErr: false,
}, {
title: "Label value too long",
scrapeLabels: `metric{l1="label_value_too_long"} 0`,
discoveryLabels: nil,
labelLimits: labelLimits{labelValueLengthLimit: 10},
expectErr: true,
}, {
title: "Discovery label value too long",
scrapeLabels: `metric{l1="1", l2="2"} 0`,
discoveryLabels: []string{"l1", "label_value_too_long"},
labelLimits: labelLimits{labelValueLengthLimit: 10},
expectErr: true,
},
}
for _, test := range tests {
app := &collectResultAppender{}
discoveryLabels := &Target{
labels: labels.FromStrings(test.discoveryLabels...),
}
sl := newScrapeLoop(context.Background(),
nil, nil, nil,
func(l labels.Labels) labels.Labels {
return mutateSampleLabels(l, discoveryLabels, false, nil)
},
func(l labels.Labels) labels.Labels {
return mutateReportSampleLabels(l, discoveryLabels)
},
func(ctx context.Context) storage.Appender { return app },
nil,
0,
true,
&test.labelLimits,
)
slApp := sl.appender(context.Background())
_, _, _, err := sl.append(slApp, []byte(test.scrapeLabels), "", time.Now())
t.Logf("Test:%s", test.title)
if test.expectErr {
require.Error(t, err)
} else {
require.NoError(t, err)
require.NoError(t, slApp.Commit())
}
}
}

View File

@ -10,8 +10,8 @@ if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then
exit 255 exit 255
fi fi
if ! [[ $(protoc --version) =~ "3.12.3" ]]; then if ! [[ $(protoc --version) =~ "3.15.8" ]]; then
echo "could not find protoc 3.12.3, is it installed + in PATH?" echo "could not find protoc 3.15.8, is it installed + in PATH?"
exit 255 exit 255
fi fi

View File

@ -16,6 +16,7 @@ package storage
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"github.com/prometheus/prometheus/pkg/exemplar" "github.com/prometheus/prometheus/pkg/exemplar"
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
@ -30,6 +31,8 @@ var (
ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp") ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp")
ErrOutOfBounds = errors.New("out of bounds") ErrOutOfBounds = errors.New("out of bounds")
ErrOutOfOrderExemplar = errors.New("out of order exemplar") ErrOutOfOrderExemplar = errors.New("out of order exemplar")
ErrDuplicateExemplar = errors.New("duplicate exemplar")
ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength)
) )
// Appendable allows creating appenders. // Appendable allows creating appenders.
@ -121,7 +124,7 @@ type ExemplarQueryable interface {
ExemplarQuerier(ctx context.Context) (ExemplarQuerier, error) ExemplarQuerier(ctx context.Context) (ExemplarQuerier, error)
} }
// Querier provides reading access to time series data. // ExemplarQuerier provides reading access to time series data.
type ExemplarQuerier interface { type ExemplarQuerier interface {
// Select all the exemplars that match the matchers. // Select all the exemplars that match the matchers.
// Within a single slice of matchers, it is an intersection. Between the slices, it is a union. // Within a single slice of matchers, it is an intersection. Between the slices, it is a union.

View File

@ -667,7 +667,7 @@ func (c *compactChunkIterator) Next() bool {
} }
// Add last as it's not yet included in overlap. We operate on same series, so labels does not matter here. // Add last as it's not yet included in overlap. We operate on same series, so labels does not matter here.
iter = (&seriesToChunkEncoder{Series: c.mergeFunc(append(overlapping, newChunkToSeriesDecoder(nil, c.curr))...)}).Iterator() iter = NewSeriesToChunkEncoder(c.mergeFunc(append(overlapping, newChunkToSeriesDecoder(nil, c.curr))...)).Iterator()
if !iter.Next() { if !iter.Next() {
if c.err = iter.Err(); c.err != nil { if c.err = iter.Err(); c.err != nil {
return false return false

View File

@ -465,6 +465,27 @@ func TestCompactingChunkSeriesMerger(t *testing.T) {
[]tsdbutil.Sample{sample{31, 31}, sample{35, 35}}, []tsdbutil.Sample{sample{31, 31}, sample{35, 35}},
), ),
}, },
{
name: "110 overlapping",
input: []ChunkSeries{
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 110)), // [0 - 110)
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(60, 50)), // [60 - 110)
},
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
tsdbutil.GenerateSamples(0, 110),
),
},
{
name: "150 overlapping samples, split chunk",
input: []ChunkSeries{
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(0, 90)), // [0 - 90)
NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"), tsdbutil.GenerateSamples(60, 90)), // [90 - 150)
},
expected: NewListChunkSeriesFromSamples(labels.FromStrings("bar", "baz"),
tsdbutil.GenerateSamples(0, 120),
tsdbutil.GenerateSamples(120, 30),
),
},
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
merged := m(tc.input...) merged := m(tc.input...)

View File

@ -53,22 +53,27 @@ type queueManagerMetrics struct {
reg prometheus.Registerer reg prometheus.Registerer
samplesTotal prometheus.Counter samplesTotal prometheus.Counter
exemplarsTotal prometheus.Counter
metadataTotal prometheus.Counter metadataTotal prometheus.Counter
failedSamplesTotal prometheus.Counter failedSamplesTotal prometheus.Counter
failedExemplarsTotal prometheus.Counter
failedMetadataTotal prometheus.Counter failedMetadataTotal prometheus.Counter
retriedSamplesTotal prometheus.Counter retriedSamplesTotal prometheus.Counter
retriedExemplarsTotal prometheus.Counter
retriedMetadataTotal prometheus.Counter retriedMetadataTotal prometheus.Counter
droppedSamplesTotal prometheus.Counter droppedSamplesTotal prometheus.Counter
droppedExemplarsTotal prometheus.Counter
enqueueRetriesTotal prometheus.Counter enqueueRetriesTotal prometheus.Counter
sentBatchDuration prometheus.Histogram sentBatchDuration prometheus.Histogram
highestSentTimestamp *maxTimestamp highestSentTimestamp *maxTimestamp
pendingSamples prometheus.Gauge pendingSamples prometheus.Gauge
pendingExemplars prometheus.Gauge
shardCapacity prometheus.Gauge shardCapacity prometheus.Gauge
numShards prometheus.Gauge numShards prometheus.Gauge
maxNumShards prometheus.Gauge maxNumShards prometheus.Gauge
minNumShards prometheus.Gauge minNumShards prometheus.Gauge
desiredNumShards prometheus.Gauge desiredNumShards prometheus.Gauge
samplesBytesTotal prometheus.Counter sentBytesTotal prometheus.Counter
metadataBytesTotal prometheus.Counter metadataBytesTotal prometheus.Counter
maxSamplesPerSend prometheus.Gauge maxSamplesPerSend prometheus.Gauge
} }
@ -89,6 +94,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
Help: "Total number of samples sent to remote storage.", Help: "Total number of samples sent to remote storage.",
ConstLabels: constLabels, ConstLabels: constLabels,
}) })
m.exemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "exemplars_total",
Help: "Total number of exemplars sent to remote storage.",
ConstLabels: constLabels,
})
m.metadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ m.metadataTotal = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace, Namespace: namespace,
Subsystem: subsystem, Subsystem: subsystem,
@ -103,6 +115,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
Help: "Total number of samples which failed on send to remote storage, non-recoverable errors.", Help: "Total number of samples which failed on send to remote storage, non-recoverable errors.",
ConstLabels: constLabels, ConstLabels: constLabels,
}) })
m.failedExemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "exemplars_failed_total",
Help: "Total number of exemplars which failed on send to remote storage, non-recoverable errors.",
ConstLabels: constLabels,
})
m.failedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ m.failedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace, Namespace: namespace,
Subsystem: subsystem, Subsystem: subsystem,
@ -117,6 +136,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
Help: "Total number of samples which failed on send to remote storage but were retried because the send error was recoverable.", Help: "Total number of samples which failed on send to remote storage but were retried because the send error was recoverable.",
ConstLabels: constLabels, ConstLabels: constLabels,
}) })
m.retriedExemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "exemplars_retried_total",
Help: "Total number of exemplars which failed on send to remote storage but were retried because the send error was recoverable.",
ConstLabels: constLabels,
})
m.retriedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ m.retriedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace, Namespace: namespace,
Subsystem: subsystem, Subsystem: subsystem,
@ -128,7 +154,14 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
Namespace: namespace, Namespace: namespace,
Subsystem: subsystem, Subsystem: subsystem,
Name: "samples_dropped_total", Name: "samples_dropped_total",
Help: "Total number of samples which were dropped after being read from the WAL before being sent via remote write.", Help: "Total number of samples which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.",
ConstLabels: constLabels,
})
m.droppedExemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "exemplars_dropped_total",
Help: "Total number of exemplars which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.",
ConstLabels: constLabels, ConstLabels: constLabels,
}) })
m.enqueueRetriesTotal = prometheus.NewCounter(prometheus.CounterOpts{ m.enqueueRetriesTotal = prometheus.NewCounter(prometheus.CounterOpts{
@ -162,6 +195,13 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
Help: "The number of samples pending in the queues shards to be sent to the remote storage.", Help: "The number of samples pending in the queues shards to be sent to the remote storage.",
ConstLabels: constLabels, ConstLabels: constLabels,
}) })
m.pendingExemplars = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "exemplars_pending",
Help: "The number of exemplars pending in the queues shards to be sent to the remote storage.",
ConstLabels: constLabels,
})
m.shardCapacity = prometheus.NewGauge(prometheus.GaugeOpts{ m.shardCapacity = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace, Namespace: namespace,
Subsystem: subsystem, Subsystem: subsystem,
@ -197,11 +237,11 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
Help: "The number of shards that the queues shard calculation wants to run based on the rate of samples in vs. samples out.", Help: "The number of shards that the queues shard calculation wants to run based on the rate of samples in vs. samples out.",
ConstLabels: constLabels, ConstLabels: constLabels,
}) })
m.samplesBytesTotal = prometheus.NewCounter(prometheus.CounterOpts{ m.sentBytesTotal = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace, Namespace: namespace,
Subsystem: subsystem, Subsystem: subsystem,
Name: "samples_bytes_total", Name: "bytes_total",
Help: "The total number of bytes of samples sent by the queue after compression.", Help: "The total number of bytes of data (not metadata) sent by the queue after compression. Note that when exemplars over remote write is enabled the exemplars included in a remote write request count towards this metric.",
ConstLabels: constLabels, ConstLabels: constLabels,
}) })
m.metadataBytesTotal = prometheus.NewCounter(prometheus.CounterOpts{ m.metadataBytesTotal = prometheus.NewCounter(prometheus.CounterOpts{
@ -215,7 +255,7 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
Namespace: namespace, Namespace: namespace,
Subsystem: subsystem, Subsystem: subsystem,
Name: "max_samples_per_send", Name: "max_samples_per_send",
Help: "The maximum number of samples to be sent, in a single request, to the remote storage.", Help: "The maximum number of samples to be sent, in a single request, to the remote storage. Note that, when sending of exemplars over remote write is enabled, exemplars count towards this limt.",
ConstLabels: constLabels, ConstLabels: constLabels,
}) })
@ -226,22 +266,27 @@ func (m *queueManagerMetrics) register() {
if m.reg != nil { if m.reg != nil {
m.reg.MustRegister( m.reg.MustRegister(
m.samplesTotal, m.samplesTotal,
m.exemplarsTotal,
m.metadataTotal, m.metadataTotal,
m.failedSamplesTotal, m.failedSamplesTotal,
m.failedExemplarsTotal,
m.failedMetadataTotal, m.failedMetadataTotal,
m.retriedSamplesTotal, m.retriedSamplesTotal,
m.retriedExemplarsTotal,
m.retriedMetadataTotal, m.retriedMetadataTotal,
m.droppedSamplesTotal, m.droppedSamplesTotal,
m.droppedExemplarsTotal,
m.enqueueRetriesTotal, m.enqueueRetriesTotal,
m.sentBatchDuration, m.sentBatchDuration,
m.highestSentTimestamp, m.highestSentTimestamp,
m.pendingSamples, m.pendingSamples,
m.pendingExemplars,
m.shardCapacity, m.shardCapacity,
m.numShards, m.numShards,
m.maxNumShards, m.maxNumShards,
m.minNumShards, m.minNumShards,
m.desiredNumShards, m.desiredNumShards,
m.samplesBytesTotal, m.sentBytesTotal,
m.metadataBytesTotal, m.metadataBytesTotal,
m.maxSamplesPerSend, m.maxSamplesPerSend,
) )
@ -251,22 +296,27 @@ func (m *queueManagerMetrics) register() {
func (m *queueManagerMetrics) unregister() { func (m *queueManagerMetrics) unregister() {
if m.reg != nil { if m.reg != nil {
m.reg.Unregister(m.samplesTotal) m.reg.Unregister(m.samplesTotal)
m.reg.Unregister(m.exemplarsTotal)
m.reg.Unregister(m.metadataTotal) m.reg.Unregister(m.metadataTotal)
m.reg.Unregister(m.failedSamplesTotal) m.reg.Unregister(m.failedSamplesTotal)
m.reg.Unregister(m.failedExemplarsTotal)
m.reg.Unregister(m.failedMetadataTotal) m.reg.Unregister(m.failedMetadataTotal)
m.reg.Unregister(m.retriedSamplesTotal) m.reg.Unregister(m.retriedSamplesTotal)
m.reg.Unregister(m.retriedExemplarsTotal)
m.reg.Unregister(m.retriedMetadataTotal) m.reg.Unregister(m.retriedMetadataTotal)
m.reg.Unregister(m.droppedSamplesTotal) m.reg.Unregister(m.droppedSamplesTotal)
m.reg.Unregister(m.droppedExemplarsTotal)
m.reg.Unregister(m.enqueueRetriesTotal) m.reg.Unregister(m.enqueueRetriesTotal)
m.reg.Unregister(m.sentBatchDuration) m.reg.Unregister(m.sentBatchDuration)
m.reg.Unregister(m.highestSentTimestamp) m.reg.Unregister(m.highestSentTimestamp)
m.reg.Unregister(m.pendingSamples) m.reg.Unregister(m.pendingSamples)
m.reg.Unregister(m.pendingExemplars)
m.reg.Unregister(m.shardCapacity) m.reg.Unregister(m.shardCapacity)
m.reg.Unregister(m.numShards) m.reg.Unregister(m.numShards)
m.reg.Unregister(m.maxNumShards) m.reg.Unregister(m.maxNumShards)
m.reg.Unregister(m.minNumShards) m.reg.Unregister(m.minNumShards)
m.reg.Unregister(m.desiredNumShards) m.reg.Unregister(m.desiredNumShards)
m.reg.Unregister(m.samplesBytesTotal) m.reg.Unregister(m.sentBytesTotal)
m.reg.Unregister(m.metadataBytesTotal) m.reg.Unregister(m.metadataBytesTotal)
m.reg.Unregister(m.maxSamplesPerSend) m.reg.Unregister(m.maxSamplesPerSend)
} }
@ -295,6 +345,7 @@ type QueueManager struct {
mcfg config.MetadataConfig mcfg config.MetadataConfig
externalLabels labels.Labels externalLabels labels.Labels
relabelConfigs []*relabel.Config relabelConfigs []*relabel.Config
sendExemplars bool
watcher *wal.Watcher watcher *wal.Watcher
metadataWatcher *MetadataWatcher metadataWatcher *MetadataWatcher
@ -312,7 +363,7 @@ type QueueManager struct {
quit chan struct{} quit chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
samplesIn, samplesDropped, samplesOut, samplesOutDuration *ewmaRate dataIn, dataDropped, dataOut, dataOutDuration *ewmaRate
metrics *queueManagerMetrics metrics *queueManagerMetrics
interner *pool interner *pool
@ -336,6 +387,7 @@ func NewQueueManager(
interner *pool, interner *pool,
highestRecvTimestamp *maxTimestamp, highestRecvTimestamp *maxTimestamp,
sm ReadyScrapeManager, sm ReadyScrapeManager,
enableExemplarRemoteWrite bool,
) *QueueManager { ) *QueueManager {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
@ -350,6 +402,7 @@ func NewQueueManager(
externalLabels: externalLabels, externalLabels: externalLabels,
relabelConfigs: relabelConfigs, relabelConfigs: relabelConfigs,
storeClient: client, storeClient: client,
sendExemplars: enableExemplarRemoteWrite,
seriesLabels: make(map[uint64]labels.Labels), seriesLabels: make(map[uint64]labels.Labels),
seriesSegmentIndexes: make(map[uint64]int), seriesSegmentIndexes: make(map[uint64]int),
@ -359,17 +412,17 @@ func NewQueueManager(
reshardChan: make(chan int), reshardChan: make(chan int),
quit: make(chan struct{}), quit: make(chan struct{}),
samplesIn: samplesIn, dataIn: samplesIn,
samplesDropped: newEWMARate(ewmaWeight, shardUpdateDuration), dataDropped: newEWMARate(ewmaWeight, shardUpdateDuration),
samplesOut: newEWMARate(ewmaWeight, shardUpdateDuration), dataOut: newEWMARate(ewmaWeight, shardUpdateDuration),
samplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration), dataOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration),
metrics: metrics, metrics: metrics,
interner: interner, interner: interner,
highestRecvTimestamp: highestRecvTimestamp, highestRecvTimestamp: highestRecvTimestamp,
} }
t.watcher = wal.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, walDir) t.watcher = wal.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, walDir, enableExemplarRemoteWrite)
if t.mcfg.Send { if t.mcfg.Send {
t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline) t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline)
} }
@ -444,13 +497,14 @@ func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []p
// Append queues a sample to be sent to the remote storage. Blocks until all samples are // Append queues a sample to be sent to the remote storage. Blocks until all samples are
// enqueued on their shards or a shutdown signal is received. // enqueued on their shards or a shutdown signal is received.
func (t *QueueManager) Append(samples []record.RefSample) bool { func (t *QueueManager) Append(samples []record.RefSample) bool {
var appendSample prompb.Sample
outer: outer:
for _, s := range samples { for _, s := range samples {
t.seriesMtx.Lock() t.seriesMtx.Lock()
lbls, ok := t.seriesLabels[s.Ref] lbls, ok := t.seriesLabels[s.Ref]
if !ok { if !ok {
t.metrics.droppedSamplesTotal.Inc() t.metrics.droppedSamplesTotal.Inc()
t.samplesDropped.incr(1) t.dataDropped.incr(1)
if _, ok := t.droppedSeries[s.Ref]; !ok { if _, ok := t.droppedSeries[s.Ref]; !ok {
level.Info(t.logger).Log("msg", "Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) level.Info(t.logger).Log("msg", "Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref)
} }
@ -466,12 +520,56 @@ outer:
return false return false
default: default:
} }
appendSample.Value = s.V
appendSample.Timestamp = s.T
if t.shards.enqueue(s.Ref, writeSample{lbls, appendSample}) {
continue outer
}
if t.shards.enqueue(s.Ref, sample{ t.metrics.enqueueRetriesTotal.Inc()
labels: lbls, time.Sleep(time.Duration(backoff))
t: s.T, backoff = backoff * 2
v: s.V, if backoff > t.cfg.MaxBackoff {
}) { backoff = t.cfg.MaxBackoff
}
}
}
return true
}
func (t *QueueManager) AppendExemplars(exemplars []record.RefExemplar) bool {
if !t.sendExemplars {
return true
}
var appendExemplar prompb.Exemplar
outer:
for _, e := range exemplars {
t.seriesMtx.Lock()
lbls, ok := t.seriesLabels[e.Ref]
if !ok {
t.metrics.droppedExemplarsTotal.Inc()
// Track dropped exemplars in the same EWMA for sharding calc.
t.dataDropped.incr(1)
if _, ok := t.droppedSeries[e.Ref]; !ok {
level.Info(t.logger).Log("msg", "Dropped exemplar for series that was not explicitly dropped via relabelling", "ref", e.Ref)
}
t.seriesMtx.Unlock()
continue
}
t.seriesMtx.Unlock()
// This will only loop if the queues are being resharded.
backoff := t.cfg.MinBackoff
for {
select {
case <-t.quit:
return false
default:
}
appendExemplar.Labels = labelsToLabelsProto(e.Labels, nil)
appendExemplar.Timestamp = e.T
appendExemplar.Value = e.V
if t.shards.enqueue(e.Ref, writeExemplar{lbls, appendExemplar}) {
continue outer continue outer
} }
@ -687,27 +785,27 @@ func (t *QueueManager) shouldReshard(desiredShards int) bool {
// outlined in this functions implementation. It is up to the caller to reshard, or not, // outlined in this functions implementation. It is up to the caller to reshard, or not,
// based on the return value. // based on the return value.
func (t *QueueManager) calculateDesiredShards() int { func (t *QueueManager) calculateDesiredShards() int {
t.samplesOut.tick() t.dataOut.tick()
t.samplesDropped.tick() t.dataDropped.tick()
t.samplesOutDuration.tick() t.dataOutDuration.tick()
// We use the number of incoming samples as a prediction of how much work we // We use the number of incoming samples as a prediction of how much work we
// will need to do next iteration. We add to this any pending samples // will need to do next iteration. We add to this any pending samples
// (received - send) so we can catch up with any backlog. We use the average // (received - send) so we can catch up with any backlog. We use the average
// outgoing batch latency to work out how many shards we need. // outgoing batch latency to work out how many shards we need.
var ( var (
samplesInRate = t.samplesIn.rate() dataInRate = t.dataIn.rate()
samplesOutRate = t.samplesOut.rate() dataOutRate = t.dataOut.rate()
samplesKeptRatio = samplesOutRate / (t.samplesDropped.rate() + samplesOutRate) dataKeptRatio = dataOutRate / (t.dataDropped.rate() + dataOutRate)
samplesOutDuration = t.samplesOutDuration.rate() / float64(time.Second) dataOutDuration = t.dataOutDuration.rate() / float64(time.Second)
samplesPendingRate = samplesInRate*samplesKeptRatio - samplesOutRate dataPendingRate = dataInRate*dataKeptRatio - dataOutRate
highestSent = t.metrics.highestSentTimestamp.Get() highestSent = t.metrics.highestSentTimestamp.Get()
highestRecv = t.highestRecvTimestamp.Get() highestRecv = t.highestRecvTimestamp.Get()
delay = highestRecv - highestSent delay = highestRecv - highestSent
samplesPending = delay * samplesInRate * samplesKeptRatio dataPending = delay * dataInRate * dataKeptRatio
) )
if samplesOutRate <= 0 { if dataOutRate <= 0 {
return t.numShards return t.numShards
} }
@ -717,17 +815,17 @@ func (t *QueueManager) calculateDesiredShards() int {
const integralGain = 0.1 / float64(shardUpdateDuration/time.Second) const integralGain = 0.1 / float64(shardUpdateDuration/time.Second)
var ( var (
timePerSample = samplesOutDuration / samplesOutRate timePerSample = dataOutDuration / dataOutRate
desiredShards = timePerSample * (samplesInRate*samplesKeptRatio + integralGain*samplesPending) desiredShards = timePerSample * (dataInRate*dataKeptRatio + integralGain*dataPending)
) )
t.metrics.desiredNumShards.Set(desiredShards) t.metrics.desiredNumShards.Set(desiredShards)
level.Debug(t.logger).Log("msg", "QueueManager.calculateDesiredShards", level.Debug(t.logger).Log("msg", "QueueManager.calculateDesiredShards",
"samplesInRate", samplesInRate, "dataInRate", dataInRate,
"samplesOutRate", samplesOutRate, "dataOutRate", dataOutRate,
"samplesKeptRatio", samplesKeptRatio, "dataKeptRatio", dataKeptRatio,
"samplesPendingRate", samplesPendingRate, "dataPendingRate", dataPendingRate,
"samplesPending", samplesPending, "dataPending", dataPending,
"samplesOutDuration", samplesOutDuration, "dataOutDuration", dataOutDuration,
"timePerSample", timePerSample, "timePerSample", timePerSample,
"desiredShards", desiredShards, "desiredShards", desiredShards,
"highestSent", highestSent, "highestSent", highestSent,
@ -785,17 +883,24 @@ func (t *QueueManager) newShards() *shards {
return s return s
} }
type sample struct { type writeSample struct {
labels labels.Labels seriesLabels labels.Labels
t int64 sample prompb.Sample
v float64 }
type writeExemplar struct {
seriesLabels labels.Labels
exemplar prompb.Exemplar
} }
type shards struct { type shards struct {
mtx sync.RWMutex // With the WAL, this is never actually contended. mtx sync.RWMutex // With the WAL, this is never actually contended.
qm *QueueManager qm *QueueManager
queues []chan sample queues []chan interface{}
// So we can accurately track how many of each are lost during shard shutdowns.
enqueuedSamples atomic.Int64
enqueuedExemplars atomic.Int64
// Emulate a wait group with a channel and an atomic int, as you // Emulate a wait group with a channel and an atomic int, as you
// cannot select on a wait group. // cannot select on a wait group.
@ -808,7 +913,8 @@ type shards struct {
// Hard shutdown context is used to terminate outgoing HTTP connections // Hard shutdown context is used to terminate outgoing HTTP connections
// after giving them a chance to terminate. // after giving them a chance to terminate.
hardShutdown context.CancelFunc hardShutdown context.CancelFunc
droppedOnHardShutdown atomic.Uint32 samplesDroppedOnHardShutdown atomic.Uint32
exemplarsDroppedOnHardShutdown atomic.Uint32
} }
// start the shards; must be called before any call to enqueue. // start the shards; must be called before any call to enqueue.
@ -819,9 +925,9 @@ func (s *shards) start(n int) {
s.qm.metrics.pendingSamples.Set(0) s.qm.metrics.pendingSamples.Set(0)
s.qm.metrics.numShards.Set(float64(n)) s.qm.metrics.numShards.Set(float64(n))
newQueues := make([]chan sample, n) newQueues := make([]chan interface{}, n)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
newQueues[i] = make(chan sample, s.qm.cfg.Capacity) newQueues[i] = make(chan interface{}, s.qm.cfg.Capacity)
} }
s.queues = newQueues s.queues = newQueues
@ -831,7 +937,8 @@ func (s *shards) start(n int) {
s.softShutdown = make(chan struct{}) s.softShutdown = make(chan struct{})
s.running.Store(int32(n)) s.running.Store(int32(n))
s.done = make(chan struct{}) s.done = make(chan struct{})
s.droppedOnHardShutdown.Store(0) s.samplesDroppedOnHardShutdown.Store(0)
s.exemplarsDroppedOnHardShutdown.Store(0)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
go s.runShard(hardShutdownCtx, i, newQueues[i]) go s.runShard(hardShutdownCtx, i, newQueues[i])
} }
@ -864,14 +971,17 @@ func (s *shards) stop() {
// Force an unclean shutdown. // Force an unclean shutdown.
s.hardShutdown() s.hardShutdown()
<-s.done <-s.done
if dropped := s.droppedOnHardShutdown.Load(); dropped > 0 { if dropped := s.samplesDroppedOnHardShutdown.Load(); dropped > 0 {
level.Error(s.qm.logger).Log("msg", "Failed to flush all samples on shutdown", "count", dropped) level.Error(s.qm.logger).Log("msg", "Failed to flush all samples on shutdown", "count", dropped)
} }
if dropped := s.exemplarsDroppedOnHardShutdown.Load(); dropped > 0 {
level.Error(s.qm.logger).Log("msg", "Failed to flush all exemplars on shutdown", "count", dropped)
}
} }
// enqueue a sample. If we are currently in the process of shutting down or resharding, // enqueue data (sample or exemplar). If we are currently in the process of shutting down or resharding,
// will return false; in this case, you should back off and retry. // will return false; in this case, you should back off and retry.
func (s *shards) enqueue(ref uint64, sample sample) bool { func (s *shards) enqueue(ref uint64, data interface{}) bool {
s.mtx.RLock() s.mtx.RLock()
defer s.mtx.RUnlock() defer s.mtx.RUnlock()
@ -885,13 +995,22 @@ func (s *shards) enqueue(ref uint64, sample sample) bool {
select { select {
case <-s.softShutdown: case <-s.softShutdown:
return false return false
case s.queues[shard] <- sample: case s.queues[shard] <- data:
switch data.(type) {
case writeSample:
s.qm.metrics.pendingSamples.Inc() s.qm.metrics.pendingSamples.Inc()
s.enqueuedSamples.Inc()
case writeExemplar:
s.qm.metrics.pendingExemplars.Inc()
s.enqueuedExemplars.Inc()
default:
level.Warn(s.qm.logger).Log("msg", "Invalid object type in shards enqueue")
}
return true return true
} }
} }
func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) { func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface{}) {
defer func() { defer func() {
if s.running.Dec() == 0 { if s.running.Dec() == 0 {
close(s.done) close(s.done)
@ -901,14 +1020,26 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) {
shardNum := strconv.Itoa(shardID) shardNum := strconv.Itoa(shardID)
// Send batches of at most MaxSamplesPerSend samples to the remote storage. // Send batches of at most MaxSamplesPerSend samples to the remote storage.
// If we have fewer samples than that, flush them out after a deadline // If we have fewer samples than that, flush them out after a deadline anyways.
// anyways.
var ( var (
max = s.qm.cfg.MaxSamplesPerSend max = s.qm.cfg.MaxSamplesPerSend
nPending = 0 // Rough estimate, 1% of active series will contain an exemplar on each scrape.
pendingSamples = allocateTimeSeries(max) // TODO(cstyan): Casting this many times smells, also we could get index out of bounds issues here.
maxExemplars = int(math.Max(1, float64(max/10)))
nPending, nPendingSamples, nPendingExemplars = 0, 0, 0
sampleBuffer = allocateSampleBuffer(max)
buf []byte buf []byte
pendingData []prompb.TimeSeries
exemplarBuffer [][]prompb.Exemplar
) )
totalPending := max
if s.qm.sendExemplars {
exemplarBuffer = allocateExemplarBuffer(maxExemplars)
totalPending += maxExemplars
}
pendingData = make([]prompb.TimeSeries, totalPending)
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline)) timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
stop := func() { stop := func() {
@ -926,18 +1057,23 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) {
case <-ctx.Done(): case <-ctx.Done():
// In this case we drop all samples in the buffer and the queue. // In this case we drop all samples in the buffer and the queue.
// Remove them from pending and mark them as failed. // Remove them from pending and mark them as failed.
droppedSamples := nPending + len(queue) droppedSamples := nPendingSamples + int(s.enqueuedSamples.Load())
droppedExemplars := nPendingExemplars + int(s.enqueuedExemplars.Load())
s.qm.metrics.pendingSamples.Sub(float64(droppedSamples)) s.qm.metrics.pendingSamples.Sub(float64(droppedSamples))
s.qm.metrics.pendingExemplars.Sub(float64(droppedExemplars))
s.qm.metrics.failedSamplesTotal.Add(float64(droppedSamples)) s.qm.metrics.failedSamplesTotal.Add(float64(droppedSamples))
s.droppedOnHardShutdown.Add(uint32(droppedSamples)) s.qm.metrics.failedExemplarsTotal.Add(float64(droppedExemplars))
s.samplesDroppedOnHardShutdown.Add(uint32(droppedSamples))
s.exemplarsDroppedOnHardShutdown.Add(uint32(droppedExemplars))
return return
case sample, ok := <-queue: case sample, ok := <-queue:
if !ok { if !ok {
if nPending > 0 { if nPendingSamples > 0 || nPendingExemplars > 0 {
level.Debug(s.qm.logger).Log("msg", "Flushing samples to remote storage...", "count", nPending) level.Debug(s.qm.logger).Log("msg", "Flushing data to remote storage...", "samples", nPendingSamples, "exemplars", nPendingExemplars)
s.sendSamples(ctx, pendingSamples[:nPending], &buf) s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, &buf)
s.qm.metrics.pendingSamples.Sub(float64(nPending)) s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples))
s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars))
level.Debug(s.qm.logger).Log("msg", "Done flushing.") level.Debug(s.qm.logger).Log("msg", "Done flushing.")
} }
return return
@ -946,25 +1082,44 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) {
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff) // Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll // retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
// stop reading from the queue. This makes it safe to reference pendingSamples by index. // stop reading from the queue. This makes it safe to reference pendingSamples by index.
pendingSamples[nPending].Labels = labelsToLabelsProto(sample.labels, pendingSamples[nPending].Labels) switch d := sample.(type) {
pendingSamples[nPending].Samples[0].Timestamp = sample.t case writeSample:
pendingSamples[nPending].Samples[0].Value = sample.v sampleBuffer[nPendingSamples][0] = d.sample
pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
pendingData[nPending].Samples = sampleBuffer[nPendingSamples]
pendingData[nPending].Exemplars = nil
nPendingSamples++
nPending++ nPending++
if nPending >= max { case writeExemplar:
s.sendSamples(ctx, pendingSamples, &buf) exemplarBuffer[nPendingExemplars][0] = d.exemplar
pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
pendingData[nPending].Samples = nil
pendingData[nPending].Exemplars = exemplarBuffer[nPendingExemplars]
nPendingExemplars++
nPending++
}
if nPendingSamples >= max || nPendingExemplars >= maxExemplars {
s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, &buf)
s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples))
s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars))
nPendingSamples = 0
nPendingExemplars = 0
nPending = 0 nPending = 0
s.qm.metrics.pendingSamples.Sub(float64(max))
stop() stop()
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline)) timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
} }
case <-timer.C: case <-timer.C:
if nPending > 0 { if nPendingSamples > 0 || nPendingExemplars > 0 {
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending samples", "samples", nPending, "shard", shardNum) level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum)
s.sendSamples(ctx, pendingSamples[:nPending], &buf) s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, &buf)
s.qm.metrics.pendingSamples.Sub(float64(nPending)) s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples))
s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars))
nPendingSamples = 0
nPendingExemplars = 0
nPending = 0 nPending = 0
} }
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline)) timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
@ -972,23 +1127,24 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue chan sample) {
} }
} }
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, buf *[]byte) { func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, buf *[]byte) {
begin := time.Now() begin := time.Now()
err := s.sendSamplesWithBackoff(ctx, samples, buf) err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, buf)
if err != nil { if err != nil {
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", len(samples), "err", err) level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err)
s.qm.metrics.failedSamplesTotal.Add(float64(len(samples))) s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount))
s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount))
} }
// These counters are used to calculate the dynamic sharding, and as such // These counters are used to calculate the dynamic sharding, and as such
// should be maintained irrespective of success or failure. // should be maintained irrespective of success or failure.
s.qm.samplesOut.incr(int64(len(samples))) s.qm.dataOut.incr(int64(len(samples)))
s.qm.samplesOutDuration.incr(int64(time.Since(begin))) s.qm.dataOutDuration.incr(int64(time.Since(begin)))
s.qm.lastSendTimestamp.Store(time.Now().Unix()) s.qm.lastSendTimestamp.Store(time.Now().Unix())
} }
// sendSamples to the remote storage with backoff for recoverable errors. // sendSamples to the remote storage with backoff for recoverable errors.
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, buf *[]byte) error { func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount int, exemplarCount int, buf *[]byte) error {
// Build the WriteRequest with no metadata. // Build the WriteRequest with no metadata.
req, highest, err := buildWriteRequest(samples, nil, *buf) req, highest, err := buildWriteRequest(samples, nil, *buf)
if err != nil { if err != nil {
@ -998,7 +1154,6 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
} }
reqSize := len(*buf) reqSize := len(*buf)
sampleCount := len(samples)
*buf = req *buf = req
// An anonymous function allows us to defer the completion of our per-try spans // An anonymous function allows us to defer the completion of our per-try spans
@ -1009,6 +1164,9 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
defer span.Finish() defer span.Finish()
span.SetTag("samples", sampleCount) span.SetTag("samples", sampleCount)
if exemplarCount > 0 {
span.SetTag("exemplars", exemplarCount)
}
span.SetTag("request_size", reqSize) span.SetTag("request_size", reqSize)
span.SetTag("try", try) span.SetTag("try", try)
span.SetTag("remote_name", s.qm.storeClient.Name()) span.SetTag("remote_name", s.qm.storeClient.Name())
@ -1016,6 +1174,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
begin := time.Now() begin := time.Now()
s.qm.metrics.samplesTotal.Add(float64(sampleCount)) s.qm.metrics.samplesTotal.Add(float64(sampleCount))
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
err := s.qm.client().Store(ctx, *buf) err := s.qm.client().Store(ctx, *buf)
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
@ -1030,13 +1189,14 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
onRetry := func() { onRetry := func() {
s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount)) s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount))
s.qm.metrics.retriedExemplarsTotal.Add(float64(exemplarCount))
} }
err = sendWriteRequestWithBackoff(ctx, s.qm.cfg, s.qm.logger, attemptStore, onRetry) err = sendWriteRequestWithBackoff(ctx, s.qm.cfg, s.qm.logger, attemptStore, onRetry)
if err != nil { if err != nil {
return err return err
} }
s.qm.metrics.samplesBytesTotal.Add(float64(reqSize)) s.qm.metrics.sentBytesTotal.Add(float64(reqSize))
s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000)) s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000))
return nil return nil
} }
@ -1096,10 +1256,13 @@ func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l
func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMetadata, buf []byte) ([]byte, int64, error) { func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMetadata, buf []byte) ([]byte, int64, error) {
var highest int64 var highest int64
for _, ts := range samples { for _, ts := range samples {
// At the moment we only ever append a TimeSeries with a single sample in it. // At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
if ts.Samples[0].Timestamp > highest { if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
highest = ts.Samples[0].Timestamp highest = ts.Samples[0].Timestamp
} }
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
highest = ts.Exemplars[0].Timestamp
}
} }
req := &prompb.WriteRequest{ req := &prompb.WriteRequest{
@ -1121,11 +1284,18 @@ func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMeta
return compressed, highest, nil return compressed, highest, nil
} }
func allocateTimeSeries(capacity int) []prompb.TimeSeries { func allocateSampleBuffer(capacity int) [][]prompb.Sample {
timeseries := make([]prompb.TimeSeries, capacity) buf := make([][]prompb.Sample, capacity)
// We only ever send one sample per timeseries, so preallocate with length one. for i := range buf {
for i := range timeseries { buf[i] = []prompb.Sample{{}}
timeseries[i].Samples = []prompb.Sample{{}}
} }
return timeseries return buf
}
func allocateExemplarBuffer(capacity int) [][]prompb.Exemplar {
buf := make([][]prompb.Exemplar, capacity)
for i := range buf {
buf[i] = []prompb.Exemplar{{}}
}
return buf
} }

View File

@ -60,21 +60,22 @@ func newHighestTimestampMetric() *maxTimestamp {
} }
func TestSampleDelivery(t *testing.T) { func TestSampleDelivery(t *testing.T) {
testcases := []struct {
name string
samples bool
exemplars bool
}{
{samples: true, exemplars: false, name: "samples only"},
{samples: true, exemplars: true, name: "both samples and exemplars"},
{samples: false, exemplars: true, name: "exemplars only"},
}
// Let's create an even number of send batches so we don't run into the // Let's create an even number of send batches so we don't run into the
// batch timeout case. // batch timeout case.
n := config.DefaultQueueConfig.MaxSamplesPerSend * 2 n := 3
samples, series := createTimeseries(n, n)
c := NewTestWriteClient() dir, err := ioutil.TempDir("", "TestSampleDelivery")
c.expectSamples(samples[:len(samples)/2], series)
queueConfig := config.DefaultQueueConfig
queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond)
queueConfig.MaxShards = 1
queueConfig.Capacity = len(samples)
queueConfig.MaxSamplesPerSend = len(samples) / 2
dir, err := ioutil.TempDir("", "TestSampleDeliver")
require.NoError(t, err) require.NoError(t, err)
defer func() { defer func() {
require.NoError(t, os.RemoveAll(dir)) require.NoError(t, os.RemoveAll(dir))
@ -83,13 +84,11 @@ func TestSampleDelivery(t *testing.T) {
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil) s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
defer s.Close() defer s.Close()
queueConfig := config.DefaultQueueConfig
queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond)
queueConfig.MaxShards = 1
writeConfig := config.DefaultRemoteWriteConfig writeConfig := config.DefaultRemoteWriteConfig
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{
&writeConfig,
},
}
// We need to set URL's so that metric creation doesn't panic. // We need to set URL's so that metric creation doesn't panic.
writeConfig.URL = &common_config.URL{ writeConfig.URL = &common_config.URL{
URL: &url.URL{ URL: &url.URL{
@ -97,19 +96,60 @@ func TestSampleDelivery(t *testing.T) {
}, },
} }
writeConfig.QueueConfig = queueConfig writeConfig.QueueConfig = queueConfig
writeConfig.SendExemplars = true
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{
&writeConfig,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
var (
series []record.RefSeries
samples []record.RefSample
exemplars []record.RefExemplar
)
// Generates same series in both cases.
if tc.samples {
samples, series = createTimeseries(n, n)
}
if tc.exemplars {
exemplars, series = createExemplars(n, n)
}
// Apply new config.
queueConfig.Capacity = len(samples)
queueConfig.MaxSamplesPerSend = len(samples) / 2
require.NoError(t, s.ApplyConfig(conf)) require.NoError(t, s.ApplyConfig(conf))
hash, err := toHash(writeConfig) hash, err := toHash(writeConfig)
require.NoError(t, err) require.NoError(t, err)
qm := s.rws.queues[hash] qm := s.rws.queues[hash]
c := NewTestWriteClient()
qm.SetClient(c) qm.SetClient(c)
qm.StoreSeries(series, 0) qm.StoreSeries(series, 0)
// Send first half of data.
c.expectSamples(samples[:len(samples)/2], series)
c.expectExemplars(exemplars[:len(exemplars)/2], series)
qm.Append(samples[:len(samples)/2]) qm.Append(samples[:len(samples)/2])
c.waitForExpectedSamples(t) qm.AppendExemplars(exemplars[:len(exemplars)/2])
c.waitForExpectedData(t)
// Send second half of data.
c.expectSamples(samples[len(samples)/2:], series) c.expectSamples(samples[len(samples)/2:], series)
c.expectExemplars(exemplars[len(exemplars)/2:], series)
qm.Append(samples[len(samples)/2:]) qm.Append(samples[len(samples)/2:])
c.waitForExpectedSamples(t) qm.AppendExemplars(exemplars[len(exemplars)/2:])
c.waitForExpectedData(t)
})
}
} }
func TestMetadataDelivery(t *testing.T) { func TestMetadataDelivery(t *testing.T) {
@ -123,7 +163,7 @@ func TestMetadataDelivery(t *testing.T) {
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
m.Start() m.Start()
defer m.Stop() defer m.Stop()
@ -157,7 +197,7 @@ func TestSampleDeliveryTimeout(t *testing.T) {
}() }()
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
m.Start() m.Start()
defer m.Stop() defer m.Stop()
@ -165,11 +205,11 @@ func TestSampleDeliveryTimeout(t *testing.T) {
// Send the samples twice, waiting for the samples in the meantime. // Send the samples twice, waiting for the samples in the meantime.
c.expectSamples(samples, series) c.expectSamples(samples, series)
m.Append(samples) m.Append(samples)
c.waitForExpectedSamples(t) c.waitForExpectedData(t)
c.expectSamples(samples, series) c.expectSamples(samples, series)
m.Append(samples) m.Append(samples)
c.waitForExpectedSamples(t) c.waitForExpectedData(t)
} }
func TestSampleDeliveryOrder(t *testing.T) { func TestSampleDeliveryOrder(t *testing.T) {
@ -203,14 +243,14 @@ func TestSampleDeliveryOrder(t *testing.T) {
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
m.Start() m.Start()
defer m.Stop() defer m.Stop()
// These should be received by the client. // These should be received by the client.
m.Append(samples) m.Append(samples)
c.waitForExpectedSamples(t) c.waitForExpectedData(t)
} }
func TestShutdown(t *testing.T) { func TestShutdown(t *testing.T) {
@ -227,7 +267,7 @@ func TestShutdown(t *testing.T) {
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, deadline, newPool(), newHighestTimestampMetric(), nil) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false)
n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend
samples, series := createTimeseries(n, n) samples, series := createTimeseries(n, n)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
@ -269,7 +309,7 @@ func TestSeriesReset(t *testing.T) {
cfg := config.DefaultQueueConfig cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, deadline, newPool(), newHighestTimestampMetric(), nil) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false)
for i := 0; i < numSegments; i++ { for i := 0; i < numSegments; i++ {
series := []record.RefSeries{} series := []record.RefSeries{}
for j := 0; j < numSeries; j++ { for j := 0; j < numSeries; j++ {
@ -302,7 +342,7 @@ func TestReshard(t *testing.T) {
}() }()
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
m.Start() m.Start()
@ -322,7 +362,7 @@ func TestReshard(t *testing.T) {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
} }
c.waitForExpectedSamples(t) c.waitForExpectedData(t)
} }
func TestReshardRaceWithStop(t *testing.T) { func TestReshardRaceWithStop(t *testing.T) {
@ -337,7 +377,7 @@ func TestReshardRaceWithStop(t *testing.T) {
go func() { go func() {
for { for {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) m = NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
m.Start() m.Start()
h.Unlock() h.Unlock()
h.Lock() h.Lock()
@ -357,7 +397,7 @@ func TestReleaseNoninternedString(t *testing.T) {
mcfg := config.DefaultMetadataConfig mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestWriteClient() c := NewTestWriteClient()
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
m.Start() m.Start()
for i := 1; i < 1000; i++ { for i := 1; i < 1000; i++ {
@ -408,10 +448,10 @@ func TestShouldReshard(t *testing.T) {
for _, c := range cases { for _, c := range cases {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
client := NewTestWriteClient() client := NewTestWriteClient()
m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, client, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
m.numShards = c.startingShards m.numShards = c.startingShards
m.samplesIn.incr(c.samplesIn) m.dataIn.incr(c.samplesIn)
m.samplesOut.incr(c.samplesOut) m.dataOut.incr(c.samplesOut)
m.lastSendTimestamp.Store(c.lastSendTimestamp) m.lastSendTimestamp.Store(c.lastSendTimestamp)
m.Start() m.Start()
@ -436,7 +476,6 @@ func createTimeseries(numSamples, numSeries int) ([]record.RefSample, []record.R
T: int64(j), T: int64(j),
V: float64(i), V: float64(i),
}) })
} }
series = append(series, record.RefSeries{ series = append(series, record.RefSeries{
Ref: uint64(i), Ref: uint64(i),
@ -446,6 +485,28 @@ func createTimeseries(numSamples, numSeries int) ([]record.RefSample, []record.R
return samples, series return samples, series
} }
func createExemplars(numExemplars, numSeries int) ([]record.RefExemplar, []record.RefSeries) {
exemplars := make([]record.RefExemplar, 0, numExemplars)
series := make([]record.RefSeries, 0, numSeries)
for i := 0; i < numSeries; i++ {
name := fmt.Sprintf("test_metric_%d", i)
for j := 0; j < numExemplars; j++ {
e := record.RefExemplar{
Ref: uint64(i),
T: int64(j),
V: float64(i),
Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", i)),
}
exemplars = append(exemplars, e)
}
series = append(series, record.RefSeries{
Ref: uint64(i),
Labels: labels.Labels{{Name: "__name__", Value: name}},
})
}
return exemplars, series
}
func getSeriesNameFromRef(r record.RefSeries) string { func getSeriesNameFromRef(r record.RefSeries) string {
for _, l := range r.Labels { for _, l := range r.Labels {
if l.Name == "__name__" { if l.Name == "__name__" {
@ -458,6 +519,8 @@ func getSeriesNameFromRef(r record.RefSeries) string {
type TestWriteClient struct { type TestWriteClient struct {
receivedSamples map[string][]prompb.Sample receivedSamples map[string][]prompb.Sample
expectedSamples map[string][]prompb.Sample expectedSamples map[string][]prompb.Sample
receivedExemplars map[string][]prompb.Exemplar
expectedExemplars map[string][]prompb.Exemplar
receivedMetadata map[string][]prompb.MetricMetadata receivedMetadata map[string][]prompb.MetricMetadata
withWaitGroup bool withWaitGroup bool
wg sync.WaitGroup wg sync.WaitGroup
@ -494,7 +557,29 @@ func (c *TestWriteClient) expectSamples(ss []record.RefSample, series []record.R
c.wg.Add(len(ss)) c.wg.Add(len(ss))
} }
func (c *TestWriteClient) waitForExpectedSamples(tb testing.TB) { func (c *TestWriteClient) expectExemplars(ss []record.RefExemplar, series []record.RefSeries) {
if !c.withWaitGroup {
return
}
c.mtx.Lock()
defer c.mtx.Unlock()
c.expectedExemplars = map[string][]prompb.Exemplar{}
c.receivedExemplars = map[string][]prompb.Exemplar{}
for _, s := range ss {
seriesName := getSeriesNameFromRef(series[s.Ref])
e := prompb.Exemplar{
Labels: labelsToLabelsProto(s.Labels, nil),
Timestamp: s.T,
Value: s.V,
}
c.expectedExemplars[seriesName] = append(c.expectedExemplars[seriesName], e)
}
c.wg.Add(len(ss))
}
func (c *TestWriteClient) waitForExpectedData(tb testing.TB) {
if !c.withWaitGroup { if !c.withWaitGroup {
return return
} }
@ -504,9 +589,12 @@ func (c *TestWriteClient) waitForExpectedSamples(tb testing.TB) {
for ts, expectedSamples := range c.expectedSamples { for ts, expectedSamples := range c.expectedSamples {
require.Equal(tb, expectedSamples, c.receivedSamples[ts], ts) require.Equal(tb, expectedSamples, c.receivedSamples[ts], ts)
} }
for ts, expectedExemplar := range c.expectedExemplars {
require.Equal(tb, expectedExemplar, c.receivedExemplars[ts], ts)
}
} }
func (c *TestWriteClient) expectSampleCount(numSamples int) { func (c *TestWriteClient) expectDataCount(numSamples int) {
if !c.withWaitGroup { if !c.withWaitGroup {
return return
} }
@ -515,7 +603,7 @@ func (c *TestWriteClient) expectSampleCount(numSamples int) {
c.wg.Add(numSamples) c.wg.Add(numSamples)
} }
func (c *TestWriteClient) waitForExpectedSampleCount() { func (c *TestWriteClient) waitForExpectedDataCount() {
if !c.withWaitGroup { if !c.withWaitGroup {
return return
} }
@ -553,6 +641,11 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte) error {
count++ count++
c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample) c.receivedSamples[seriesName] = append(c.receivedSamples[seriesName], sample)
} }
for _, ex := range ts.Exemplars {
count++
c.receivedExemplars[seriesName] = append(c.receivedExemplars[seriesName], ex)
}
} }
if c.withWaitGroup { if c.withWaitGroup {
c.wg.Add(-count) c.wg.Add(-count)
@ -621,7 +714,7 @@ func BenchmarkSampleDelivery(b *testing.B) {
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
m.StoreSeries(series, 0) m.StoreSeries(series, 0)
// These should be received by the client. // These should be received by the client.
@ -630,9 +723,9 @@ func BenchmarkSampleDelivery(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
c.expectSampleCount(len(samples)) c.expectDataCount(len(samples))
m.Append(samples) m.Append(samples)
c.waitForExpectedSampleCount() c.waitForExpectedDataCount()
} }
// Do not include shutdown // Do not include shutdown
b.StopTimer() b.StopTimer()
@ -667,7 +760,7 @@ func BenchmarkStartup(b *testing.B) {
c := NewTestBlockedWriteClient() c := NewTestBlockedWriteClient()
m := NewQueueManager(metrics, nil, nil, logger, dir, m := NewQueueManager(metrics, nil, nil, logger, dir,
newEWMARate(ewmaWeight, shardUpdateDuration), newEWMARate(ewmaWeight, shardUpdateDuration),
cfg, mcfg, nil, nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil) cfg, mcfg, nil, nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false)
m.watcher.SetStartTime(timestamp.Time(math.MaxInt64)) m.watcher.SetStartTime(timestamp.Time(math.MaxInt64))
m.watcher.MaxSegment = segments[len(segments)-2] m.watcher.MaxSegment = segments[len(segments)-2]
err := m.watcher.Run() err := m.watcher.Run()
@ -719,7 +812,7 @@ func TestCalculateDesiredShards(t *testing.T) {
metrics := newQueueManagerMetrics(nil, "", "") metrics := newQueueManagerMetrics(nil, "", "")
samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration) samplesIn := newEWMARate(ewmaWeight, shardUpdateDuration)
m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil) m := NewQueueManager(metrics, nil, nil, nil, dir, samplesIn, cfg, mcfg, nil, nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false)
// Need to start the queue manager so the proper metrics are initialized. // Need to start the queue manager so the proper metrics are initialized.
// However we can stop it right away since we don't need to do any actual // However we can stop it right away since we don't need to do any actual
@ -745,8 +838,8 @@ func TestCalculateDesiredShards(t *testing.T) {
// helper function for sending samples. // helper function for sending samples.
sendSamples := func(s int64, ts time.Duration) { sendSamples := func(s int64, ts time.Duration) {
pendingSamples -= s pendingSamples -= s
m.samplesOut.incr(s) m.dataOut.incr(s)
m.samplesOutDuration.incr(int64(m.numShards) * int64(shardUpdateDuration)) m.dataOutDuration.incr(int64(m.numShards) * int64(shardUpdateDuration))
// highest sent is how far back pending samples would be at our input rate. // highest sent is how far back pending samples would be at our input rate.
highestSent := startedAt.Add(ts - time.Duration(pendingSamples/inputRate)*time.Second) highestSent := startedAt.Add(ts - time.Duration(pendingSamples/inputRate)*time.Second)

View File

@ -37,6 +37,12 @@ var (
Name: "samples_in_total", Name: "samples_in_total",
Help: "Samples in to remote storage, compare to samples out for queue managers.", Help: "Samples in to remote storage, compare to samples out for queue managers.",
}) })
exemplarsIn = promauto.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "exemplars_in_total",
Help: "Exemplars in to remote storage, compare to exemplars out for queue managers.",
})
) )
// WriteStorage represents all the remote write storage. // WriteStorage represents all the remote write storage.
@ -169,6 +175,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
rws.interner, rws.interner,
rws.highestTimestamp, rws.highestTimestamp,
rws.scraper, rws.scraper,
rwConf.SendExemplars,
) )
// Keep track of which queues are new so we know which to start. // Keep track of which queues are new so we know which to start.
newHashes = append(newHashes, hash) newHashes = append(newHashes, hash)
@ -210,6 +217,7 @@ func (rws *WriteStorage) Close() error {
type timestampTracker struct { type timestampTracker struct {
writeStorage *WriteStorage writeStorage *WriteStorage
samples int64 samples int64
exemplars int64
highestTimestamp int64 highestTimestamp int64
highestRecvTimestamp *maxTimestamp highestRecvTimestamp *maxTimestamp
} }
@ -224,14 +232,16 @@ func (t *timestampTracker) Append(_ uint64, _ labels.Labels, ts int64, _ float64
} }
func (t *timestampTracker) AppendExemplar(_ uint64, _ labels.Labels, _ exemplar.Exemplar) (uint64, error) { func (t *timestampTracker) AppendExemplar(_ uint64, _ labels.Labels, _ exemplar.Exemplar) (uint64, error) {
t.exemplars++
return 0, nil return 0, nil
} }
// Commit implements storage.Appender. // Commit implements storage.Appender.
func (t *timestampTracker) Commit() error { func (t *timestampTracker) Commit() error {
t.writeStorage.samplesIn.incr(t.samples) t.writeStorage.samplesIn.incr(t.samples + t.exemplars)
samplesIn.Add(float64(t.samples)) samplesIn.Add(float64(t.samples))
exemplarsIn.Add(float64(t.exemplars))
t.highestRecvTimestamp.Set(float64(t.highestTimestamp / 1000)) t.highestRecvTimestamp.Set(float64(t.highestTimestamp / 1000))
return nil return nil
} }

View File

@ -204,9 +204,7 @@ func (c *seriesSetToChunkSet) Next() bool {
} }
func (c *seriesSetToChunkSet) At() ChunkSeries { func (c *seriesSetToChunkSet) At() ChunkSeries {
return &seriesToChunkEncoder{ return NewSeriesToChunkEncoder(c.SeriesSet.At())
Series: c.SeriesSet.At(),
}
} }
func (c *seriesSetToChunkSet) Err() error { func (c *seriesSetToChunkSet) Err() error {
@ -217,7 +215,13 @@ type seriesToChunkEncoder struct {
Series Series
} }
// TODO(bwplotka): Currently encoder will just naively build one chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670 const seriesToChunkEncoderSplit = 120
// NewSeriesToChunkEncoder encodes samples to chunks with 120 samples limit.
func NewSeriesToChunkEncoder(series Series) ChunkSeries {
return &seriesToChunkEncoder{series}
}
func (s *seriesToChunkEncoder) Iterator() chunks.Iterator { func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
chk := chunkenc.NewXORChunk() chk := chunkenc.NewXORChunk()
app, err := chk.Appender() app, err := chk.Appender()
@ -227,8 +231,28 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
mint := int64(math.MaxInt64) mint := int64(math.MaxInt64)
maxt := int64(math.MinInt64) maxt := int64(math.MinInt64)
chks := []chunks.Meta{}
i := 0
seriesIter := s.Series.Iterator() seriesIter := s.Series.Iterator()
for seriesIter.Next() { for seriesIter.Next() {
// Create a new chunk if too many samples in the current one.
if i >= seriesToChunkEncoderSplit {
chks = append(chks, chunks.Meta{
MinTime: mint,
MaxTime: maxt,
Chunk: chk,
})
chk = chunkenc.NewXORChunk()
app, err = chk.Appender()
if err != nil {
return errChunksIterator{err: err}
}
mint = int64(math.MaxInt64)
// maxt is immediately overwritten below which is why setting it here won't make a difference.
i = 0
}
t, v := seriesIter.At() t, v := seriesIter.At()
app.Append(t, v) app.Append(t, v)
@ -236,16 +260,19 @@ func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
if mint == math.MaxInt64 { if mint == math.MaxInt64 {
mint = t mint = t
} }
i++
} }
if err := seriesIter.Err(); err != nil { if err := seriesIter.Err(); err != nil {
return errChunksIterator{err: err} return errChunksIterator{err: err}
} }
return NewListChunkSeriesIterator(chunks.Meta{ chks = append(chks, chunks.Meta{
MinTime: mint, MinTime: mint,
MaxTime: maxt, MaxTime: maxt,
Chunk: chk, Chunk: chk,
}) })
return NewListChunkSeriesIterator(chks...)
} }
type errChunksIterator struct { type errChunksIterator struct {

View File

@ -323,7 +323,7 @@ func TestBlockSize(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, expAfterDelete, actAfterDelete, "after a delete reported block size doesn't match actual disk size") require.Equal(t, expAfterDelete, actAfterDelete, "after a delete reported block size doesn't match actual disk size")
c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil) c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil, nil)
require.NoError(t, err) require.NoError(t, err)
blockDirAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil) blockDirAfterCompact, err := c.Compact(tmpdir, []string{blockInit.Dir()}, nil)
require.NoError(t, err) require.NoError(t, err)
@ -426,7 +426,7 @@ func createBlock(tb testing.TB, dir string, series []storage.Series) string {
} }
func createBlockFromHead(tb testing.TB, dir string, head *Head) string { func createBlockFromHead(tb testing.TB, dir string, head *Head) string {
compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil) compactor, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, nil)
require.NoError(tb, err) require.NoError(tb, err)
require.NoError(tb, os.MkdirAll(dir, 0777)) require.NoError(tb, os.MkdirAll(dir, 0777))

View File

@ -100,7 +100,7 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) {
nil, nil,
w.logger, w.logger,
[]int64{w.blockSize}, []int64{w.blockSize},
chunkenc.NewPool()) chunkenc.NewPool(), nil)
if err != nil { if err != nil {
return ulid.ULID{}, errors.Wrap(err, "create leveled compactor") return ulid.ULID{}, errors.Wrap(err, "create leveled compactor")
} }

View File

@ -556,7 +556,14 @@ func (cdm *ChunkDiskMapper) Chunk(ref uint64) (chunkenc.Chunk, error) {
// The chunk data itself. // The chunk data itself.
chkData := mmapFile.byteSlice.Range(chkDataEnd-int(chkDataLen), chkDataEnd) chkData := mmapFile.byteSlice.Range(chkDataEnd-int(chkDataLen), chkDataEnd)
chk, err := cdm.pool.Get(chunkenc.Encoding(chkEnc), chkData)
// Make a copy of the chunk data to prevent a panic occurring because the returned
// chunk data slice references an mmap-ed file which could be closed after the
// function returns but while the chunk is still in use.
chkDataCopy := make([]byte, len(chkData))
copy(chkDataCopy, chkData)
chk, err := cdm.pool.Get(chunkenc.Encoding(chkEnc), chkDataCopy)
if err != nil { if err != nil {
return nil, &CorruptionErr{ return nil, &CorruptionErr{
Dir: cdm.dir.Name(), Dir: cdm.dir.Name(),

View File

@ -82,6 +82,7 @@ type LeveledCompactor struct {
chunkPool chunkenc.Pool chunkPool chunkenc.Pool
ctx context.Context ctx context.Context
maxBlockChunkSegmentSize int64 maxBlockChunkSegmentSize int64
mergeFunc storage.VerticalChunkSeriesMergeFunc
} }
type compactorMetrics struct { type compactorMetrics struct {
@ -145,11 +146,11 @@ func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics {
} }
// NewLeveledCompactor returns a LeveledCompactor. // NewLeveledCompactor returns a LeveledCompactor.
func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool) (*LeveledCompactor, error) { func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) {
return NewLeveledCompactorWithChunkSize(ctx, r, l, ranges, pool, chunks.DefaultChunkSegmentSize) return NewLeveledCompactorWithChunkSize(ctx, r, l, ranges, pool, chunks.DefaultChunkSegmentSize, mergeFunc)
} }
func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64) (*LeveledCompactor, error) { func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) {
if len(ranges) == 0 { if len(ranges) == 0 {
return nil, errors.Errorf("at least one range must be provided") return nil, errors.Errorf("at least one range must be provided")
} }
@ -159,6 +160,9 @@ func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Register
if l == nil { if l == nil {
l = log.NewNopLogger() l = log.NewNopLogger()
} }
if mergeFunc == nil {
mergeFunc = storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge)
}
return &LeveledCompactor{ return &LeveledCompactor{
ranges: ranges, ranges: ranges,
chunkPool: pool, chunkPool: pool,
@ -166,6 +170,7 @@ func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Register
metrics: newCompactorMetrics(r), metrics: newCompactorMetrics(r),
ctx: ctx, ctx: ctx,
maxBlockChunkSegmentSize: maxBlockChunkSegmentSize, maxBlockChunkSegmentSize: maxBlockChunkSegmentSize,
mergeFunc: mergeFunc,
}, nil }, nil
} }
@ -746,8 +751,9 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
set := sets[0] set := sets[0]
if len(sets) > 1 { if len(sets) > 1 {
// Merge series using compacting chunk series merger. // Merge series using specified chunk series merger.
set = storage.NewMergeChunkSeriesSet(sets, storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge)) // The default one is the compacting series merger.
set = storage.NewMergeChunkSeriesSet(sets, c.mergeFunc)
} }
// Iterate over all sorted chunk series. // Iterate over all sorted chunk series.

View File

@ -158,7 +158,7 @@ func TestNoPanicFor0Tombstones(t *testing.T) {
}, },
} }
c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{50}, nil) c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{50}, nil, nil)
require.NoError(t, err) require.NoError(t, err)
c.plan(metas) c.plan(metas)
@ -172,7 +172,7 @@ func TestLeveledCompactor_plan(t *testing.T) {
180, 180,
540, 540,
1620, 1620,
}, nil) }, nil, nil)
require.NoError(t, err) require.NoError(t, err)
cases := map[string]struct { cases := map[string]struct {
@ -381,7 +381,7 @@ func TestRangeWithFailedCompactionWontGetSelected(t *testing.T) {
240, 240,
720, 720,
2160, 2160,
}, nil) }, nil, nil)
require.NoError(t, err) require.NoError(t, err)
cases := []struct { cases := []struct {
@ -431,7 +431,7 @@ func TestCompactionFailWillCleanUpTempDir(t *testing.T) {
240, 240,
720, 720,
2160, 2160,
}, nil) }, nil, nil)
require.NoError(t, err) require.NoError(t, err)
tmpdir, err := ioutil.TempDir("", "test") tmpdir, err := ioutil.TempDir("", "test")
@ -940,7 +940,7 @@ func TestCompaction_populateBlock(t *testing.T) {
blocks = append(blocks, &mockBReader{ir: ir, cr: cr, mint: mint, maxt: maxt}) blocks = append(blocks, &mockBReader{ir: ir, cr: cr, mint: mint, maxt: maxt})
} }
c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{0}, nil) c, err := NewLeveledCompactor(context.Background(), nil, nil, []int64{0}, nil, nil)
require.NoError(t, err) require.NoError(t, err)
meta := &BlockMeta{ meta := &BlockMeta{
@ -1065,7 +1065,7 @@ func BenchmarkCompaction(b *testing.B) {
blockDirs = append(blockDirs, block.Dir()) blockDirs = append(blockDirs, block.Dir())
} }
c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil) c, err := NewLeveledCompactor(context.Background(), nil, log.NewNopLogger(), []int64{0}, nil, nil)
require.NoError(b, err) require.NoError(b, err)
b.ResetTimer() b.ResetTimer()

View File

@ -370,6 +370,7 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) {
db.logger, db.logger,
ExponentialBlockRanges(DefaultOptions().MinBlockDuration, 3, 5), ExponentialBlockRanges(DefaultOptions().MinBlockDuration, 3, 5),
chunkenc.NewPool(), chunkenc.NewPool(),
nil,
) )
if err != nil { if err != nil {
return errors.Wrap(err, "create leveled compactor") return errors.Wrap(err, "create leveled compactor")
@ -648,7 +649,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
var err error var err error
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
db.compactor, err = NewLeveledCompactorWithChunkSize(ctx, r, l, rngs, db.chunkPool, opts.MaxBlockChunkSegmentSize) db.compactor, err = NewLeveledCompactorWithChunkSize(ctx, r, l, rngs, db.chunkPool, opts.MaxBlockChunkSegmentSize, nil)
if err != nil { if err != nil {
cancel() cancel()
return nil, errors.Wrap(err, "create leveled compactor") return nil, errors.Wrap(err, "create leveled compactor")

View File

@ -27,6 +27,7 @@ import (
"path/filepath" "path/filepath"
"sort" "sort"
"strconv" "strconv"
"strings"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -41,6 +42,7 @@ import (
"github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/fileutil" "github.com/prometheus/prometheus/tsdb/fileutil"
"github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/index"
@ -3122,3 +3124,259 @@ func TestNoPanicOnTSDBOpenError(t *testing.T) {
require.NoError(t, lockf.Release()) require.NoError(t, lockf.Release())
} }
func TestQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t *testing.T) {
t.Skip("TODO: investigate why process crash in CI")
const numRuns = 5
for i := 1; i <= numRuns; i++ {
t.Run(strconv.Itoa(i), func(t *testing.T) {
testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t)
})
}
}
func testQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t *testing.T) {
const (
numSeries = 1000
numStressIterations = 10000
minStressAllocationBytes = 128 * 1024
maxStressAllocationBytes = 512 * 1024
)
db := openTestDB(t, nil, nil)
defer func() {
require.NoError(t, db.Close())
}()
// Disable compactions so we can control it.
db.DisableCompactions()
// Generate the metrics we're going to append.
metrics := make([]labels.Labels, 0, numSeries)
for i := 0; i < numSeries; i++ {
metrics = append(metrics, labels.Labels{{Name: labels.MetricName, Value: fmt.Sprintf("test_%d", i)}})
}
// Push 1 sample every 15s for 2x the block duration period.
ctx := context.Background()
interval := int64(15 * time.Second / time.Millisecond)
ts := int64(0)
for ; ts < 2*DefaultBlockDuration; ts += interval {
app := db.Appender(ctx)
for _, metric := range metrics {
_, err := app.Append(0, metric, ts, float64(ts))
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
// Compact the TSDB head for the first time. We expect the head chunks file has been cut.
require.NoError(t, db.Compact())
require.Equal(t, float64(1), prom_testutil.ToFloat64(db.Head().metrics.headTruncateTotal))
// Push more samples for another 1x block duration period.
for ; ts < 3*DefaultBlockDuration; ts += interval {
app := db.Appender(ctx)
for _, metric := range metrics {
_, err := app.Append(0, metric, ts, float64(ts))
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
// At this point we expect 2 mmap-ed head chunks.
// Get a querier and make sure it's closed only once the test is over.
querier, err := db.Querier(ctx, 0, math.MaxInt64)
require.NoError(t, err)
defer func() {
require.NoError(t, querier.Close())
}()
// Query back all series.
hints := &storage.SelectHints{Start: 0, End: math.MaxInt64, Step: interval}
seriesSet := querier.Select(true, hints, labels.MustNewMatcher(labels.MatchRegexp, labels.MetricName, ".+"))
// Fetch samples iterators from all series.
var iterators []chunkenc.Iterator
actualSeries := 0
for seriesSet.Next() {
actualSeries++
// Get the iterator and call Next() so that we're sure the chunk is loaded.
it := seriesSet.At().Iterator()
it.Next()
it.At()
iterators = append(iterators, it)
}
require.NoError(t, seriesSet.Err())
require.Equal(t, actualSeries, numSeries)
// Compact the TSDB head again.
require.NoError(t, db.Compact())
require.Equal(t, float64(2), prom_testutil.ToFloat64(db.Head().metrics.headTruncateTotal))
// At this point we expect 1 head chunk has been deleted.
// Stress the memory and call GC. This is required to increase the chances
// the chunk memory area is released to the kernel.
var buf []byte
for i := 0; i < numStressIterations; i++ {
//nolint:staticcheck
buf = append(buf, make([]byte, minStressAllocationBytes+rand.Int31n(maxStressAllocationBytes-minStressAllocationBytes))...)
if i%1000 == 0 {
buf = nil
}
}
// Iterate samples. Here we're summing it just to make sure no golang compiler
// optimization triggers in case we discard the result of it.At().
var sum float64
var firstErr error
for _, it := range iterators {
for it.Next() {
_, v := it.At()
sum += v
}
if err := it.Err(); err != nil {
firstErr = err
}
}
// After having iterated all samples we also want to be sure no error occurred or
// the "cannot populate chunk XXX: not found" error occurred. This error can occur
// when the iterator tries to fetch an head chunk which has been offloaded because
// of the head compaction in the meanwhile.
if firstErr != nil && !strings.Contains(firstErr.Error(), "cannot populate chunk") {
t.Fatalf("unexpected error: %s", firstErr.Error())
}
}
func TestChunkQuerier_ShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t *testing.T) {
t.Skip("TODO: investigate why process crash in CI")
const numRuns = 5
for i := 1; i <= numRuns; i++ {
t.Run(strconv.Itoa(i), func(t *testing.T) {
testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t)
})
}
}
func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChunks(t *testing.T) {
const (
numSeries = 1000
numStressIterations = 10000
minStressAllocationBytes = 128 * 1024
maxStressAllocationBytes = 512 * 1024
)
db := openTestDB(t, nil, nil)
defer func() {
require.NoError(t, db.Close())
}()
// Disable compactions so we can control it.
db.DisableCompactions()
// Generate the metrics we're going to append.
metrics := make([]labels.Labels, 0, numSeries)
for i := 0; i < numSeries; i++ {
metrics = append(metrics, labels.Labels{{Name: labels.MetricName, Value: fmt.Sprintf("test_%d", i)}})
}
// Push 1 sample every 15s for 2x the block duration period.
ctx := context.Background()
interval := int64(15 * time.Second / time.Millisecond)
ts := int64(0)
for ; ts < 2*DefaultBlockDuration; ts += interval {
app := db.Appender(ctx)
for _, metric := range metrics {
_, err := app.Append(0, metric, ts, float64(ts))
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
// Compact the TSDB head for the first time. We expect the head chunks file has been cut.
require.NoError(t, db.Compact())
require.Equal(t, float64(1), prom_testutil.ToFloat64(db.Head().metrics.headTruncateTotal))
// Push more samples for another 1x block duration period.
for ; ts < 3*DefaultBlockDuration; ts += interval {
app := db.Appender(ctx)
for _, metric := range metrics {
_, err := app.Append(0, metric, ts, float64(ts))
require.NoError(t, err)
}
require.NoError(t, app.Commit())
}
// At this point we expect 2 mmap-ed head chunks.
// Get a querier and make sure it's closed only once the test is over.
querier, err := db.ChunkQuerier(ctx, 0, math.MaxInt64)
require.NoError(t, err)
defer func() {
require.NoError(t, querier.Close())
}()
// Query back all series.
hints := &storage.SelectHints{Start: 0, End: math.MaxInt64, Step: interval}
seriesSet := querier.Select(true, hints, labels.MustNewMatcher(labels.MatchRegexp, labels.MetricName, ".+"))
// Iterate all series and get their chunks.
var chunks []chunkenc.Chunk
actualSeries := 0
for seriesSet.Next() {
actualSeries++
for it := seriesSet.At().Iterator(); it.Next(); {
chunks = append(chunks, it.At().Chunk)
}
}
require.NoError(t, seriesSet.Err())
require.Equal(t, actualSeries, numSeries)
// Compact the TSDB head again.
require.NoError(t, db.Compact())
require.Equal(t, float64(2), prom_testutil.ToFloat64(db.Head().metrics.headTruncateTotal))
// At this point we expect 1 head chunk has been deleted.
// Stress the memory and call GC. This is required to increase the chances
// the chunk memory area is released to the kernel.
var buf []byte
for i := 0; i < numStressIterations; i++ {
//nolint:staticcheck
buf = append(buf, make([]byte, minStressAllocationBytes+rand.Int31n(maxStressAllocationBytes-minStressAllocationBytes))...)
if i%1000 == 0 {
buf = nil
}
}
// Iterate chunks and read their bytes slice. Here we're computing the CRC32
// just to iterate through the bytes slice. We don't really care the reason why
// we read this data, we just need to read it to make sure the memory address
// of the []byte is still valid.
chkCRC32 := newCRC32()
for _, chunk := range chunks {
chkCRC32.Reset()
_, err := chkCRC32.Write(chunk.Bytes())
require.NoError(t, err)
}
}

View File

@ -86,3 +86,35 @@ and specify an interval for which samples of a series got deleted.
│ . . . │ │ . . . │
└─────────────────────────────────────────────────────┘ └─────────────────────────────────────────────────────┘
``` ```
### Exemplar records
Exemplar records encode exemplars as a list of triples `(series_id, timestamp, value)`
plus the length of the labels list, and all the labels.
The first row stores the starting id and the starting timestamp.
Series reference and timestamp are encoded as deltas w.r.t the first exemplar.
The first exemplar record begins at the second row.
See: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
```
┌──────────────────────────────────────────────────────────────────┐
│ type = 5 <1b>
├──────────────────────────────────────────────────────────────────┤
│ ┌────────────────────┬───────────────────────────┐ │
│ │ id <8b> │ timestamp <8b> │ │
│ └────────────────────┴───────────────────────────┘ │
│ ┌────────────────────┬───────────────────────────┬─────────────┐ │
│ │ id_delta <uvarint> │ timestamp_delta <uvarint> │ value <8b> │ │
│ ├────────────────────┴───────────────────────────┴─────────────┤ │
│ │ n = len(labels) <uvarint> │ │
│ ├──────────────────────┬───────────────────────────────────────┤ │
│ │ len(str_1) <uvarint> │ str_1 <bytes> │ │
│ ├──────────────────────┴───────────────────────────────────────┤ │
│ │ ... │ │
│ ├───────────────────────┬──────────────────────────────────────┤ │
│ │ len(str_2n) <uvarint> │ str_2n <bytes> │ │ │
│ └───────────────────────┴────────────────┴─────────────────────┘ │
│ . . . │
└──────────────────────────────────────────────────────────────────┘
```

View File

@ -17,6 +17,7 @@ import (
"context" "context"
"sort" "sort"
"sync" "sync"
"unicode/utf8"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/pkg/exemplar" "github.com/prometheus/prometheus/pkg/exemplar"
@ -43,12 +44,13 @@ type CircularExemplarStorage struct {
type indexEntry struct { type indexEntry struct {
oldest int oldest int
newest int newest int
seriesLabels labels.Labels
} }
type circularBufferEntry struct { type circularBufferEntry struct {
exemplar exemplar.Exemplar exemplar exemplar.Exemplar
seriesLabels labels.Labels
next int next int
ref *indexEntry
} }
// NewCircularExemplarStorage creates an circular in memory exemplar storage. // NewCircularExemplarStorage creates an circular in memory exemplar storage.
@ -82,7 +84,7 @@ func NewCircularExemplarStorage(len int, reg prometheus.Registerer) (ExemplarSto
}), }),
outOfOrderExemplars: prometheus.NewCounter(prometheus.CounterOpts{ outOfOrderExemplars: prometheus.NewCounter(prometheus.CounterOpts{
Name: "prometheus_tsdb_exemplar_out_of_order_exemplars_total", Name: "prometheus_tsdb_exemplar_out_of_order_exemplars_total",
Help: "Total number of out of order exemplar ingestion failed attempts", Help: "Total number of out of order exemplar ingestion failed attempts.",
}), }),
} }
if reg != nil { if reg != nil {
@ -121,10 +123,13 @@ func (ce *CircularExemplarStorage) Select(start, end int64, matchers ...[]*label
for _, idx := range ce.index { for _, idx := range ce.index {
var se exemplar.QueryResult var se exemplar.QueryResult
e := ce.exemplars[idx.oldest] e := ce.exemplars[idx.oldest]
if !matchesSomeMatcherSet(e.seriesLabels, matchers) { if e.exemplar.Ts > end || ce.exemplars[idx.newest].exemplar.Ts < start {
continue continue
} }
se.SeriesLabels = e.seriesLabels if !matchesSomeMatcherSet(idx.seriesLabels, matchers) {
continue
}
se.SeriesLabels = idx.seriesLabels
// Loop through all exemplars in the circular buffer for the current series. // Loop through all exemplars in the circular buffer for the current series.
for e.exemplar.Ts <= end { for e.exemplar.Ts <= end {
@ -161,6 +166,51 @@ Outer:
return false return false
} }
func (ce *CircularExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar.Exemplar) error {
seriesLabels := l.String()
// TODO(bwplotka): This lock can lock all scrapers, there might high contention on this on scale.
// Optimize by moving the lock to be per series (& benchmark it).
ce.lock.RLock()
defer ce.lock.RUnlock()
return ce.validateExemplar(seriesLabels, e, false)
}
// Not thread safe. The append parameters tells us whether this is an external validation, or internal
// as a result of an AddExemplar call, in which case we should update any relevant metrics.
func (ce *CircularExemplarStorage) validateExemplar(l string, e exemplar.Exemplar, append bool) error {
// Exemplar label length does not include chars involved in text rendering such as quotes
// equals sign, or commas. See definition of const ExemplarMaxLabelLength.
labelSetLen := 0
for _, l := range e.Labels {
labelSetLen += utf8.RuneCountInString(l.Name)
labelSetLen += utf8.RuneCountInString(l.Value)
if labelSetLen > exemplar.ExemplarMaxLabelSetLength {
return storage.ErrExemplarLabelLength
}
}
idx, ok := ce.index[l]
if !ok {
return nil
}
// Check for duplicate vs last stored exemplar for this series.
// NB these are expected, and appending them is a no-op.
if ce.exemplars[idx.newest].exemplar.Equals(e) {
return storage.ErrDuplicateExemplar
}
if e.Ts <= ce.exemplars[idx.newest].exemplar.Ts {
if append {
ce.outOfOrderExemplars.Inc()
}
return storage.ErrOutOfOrderExemplar
}
return nil
}
func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error { func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) error {
seriesLabels := l.String() seriesLabels := l.String()
@ -169,21 +219,19 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
ce.lock.Lock() ce.lock.Lock()
defer ce.lock.Unlock() defer ce.lock.Unlock()
idx, ok := ce.index[seriesLabels] err := ce.validateExemplar(seriesLabels, e, true)
if !ok { if err != nil {
ce.index[seriesLabels] = &indexEntry{oldest: ce.nextIndex} if err == storage.ErrDuplicateExemplar {
} else { // Duplicate exemplar, noop.
// Check for duplicate vs last stored exemplar for this series.
// NB these are expected, add appending them is a no-op.
if ce.exemplars[idx.newest].exemplar.Equals(e) {
return nil return nil
} }
return err
if e.Ts <= ce.exemplars[idx.newest].exemplar.Ts {
ce.outOfOrderExemplars.Inc()
return storage.ErrOutOfOrderExemplar
} }
_, ok := ce.index[seriesLabels]
if !ok {
ce.index[seriesLabels] = &indexEntry{oldest: ce.nextIndex, seriesLabels: l}
} else {
ce.exemplars[ce.index[seriesLabels].newest].next = ce.nextIndex ce.exemplars[ce.index[seriesLabels].newest].next = ce.nextIndex
} }
@ -192,7 +240,7 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
} else { } else {
// There exists exemplar already on this ce.nextIndex entry, drop it, to make place // There exists exemplar already on this ce.nextIndex entry, drop it, to make place
// for others. // for others.
prevLabels := prev.seriesLabels.String() prevLabels := prev.ref.seriesLabels.String()
if prev.next == -1 { if prev.next == -1 {
// Last item for this series, remove index entry. // Last item for this series, remove index entry.
delete(ce.index, prevLabels) delete(ce.index, prevLabels)
@ -205,7 +253,7 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
// since this is the first exemplar stored for this series. // since this is the first exemplar stored for this series.
ce.exemplars[ce.nextIndex].exemplar = e ce.exemplars[ce.nextIndex].exemplar = e
ce.exemplars[ce.nextIndex].next = -1 ce.exemplars[ce.nextIndex].next = -1
ce.exemplars[ce.nextIndex].seriesLabels = l ce.exemplars[ce.nextIndex].ref = ce.index[seriesLabels]
ce.index[seriesLabels].newest = ce.nextIndex ce.index[seriesLabels].newest = ce.nextIndex
ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars) ce.nextIndex = (ce.nextIndex + 1) % len(ce.exemplars)
@ -214,13 +262,13 @@ func (ce *CircularExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemp
ce.seriesWithExemplarsInStorage.Set(float64(len(ce.index))) ce.seriesWithExemplarsInStorage.Set(float64(len(ce.index)))
if next := ce.exemplars[ce.nextIndex]; next != nil { if next := ce.exemplars[ce.nextIndex]; next != nil {
ce.exemplarsInStorage.Set(float64(len(ce.exemplars))) ce.exemplarsInStorage.Set(float64(len(ce.exemplars)))
ce.lastExemplarsTs.Set(float64(next.exemplar.Ts)) ce.lastExemplarsTs.Set(float64(next.exemplar.Ts) / 1000)
return nil return nil
} }
// We did not yet fill the buffer. // We did not yet fill the buffer.
ce.exemplarsInStorage.Set(float64(ce.nextIndex)) ce.exemplarsInStorage.Set(float64(ce.nextIndex))
ce.lastExemplarsTs.Set(float64(ce.exemplars[0].exemplar.Ts)) ce.lastExemplarsTs.Set(float64(ce.exemplars[0].exemplar.Ts) / 1000)
return nil return nil
} }
@ -230,6 +278,10 @@ func (noopExemplarStorage) AddExemplar(l labels.Labels, e exemplar.Exemplar) err
return nil return nil
} }
func (noopExemplarStorage) ValidateExemplar(l labels.Labels, e exemplar.Exemplar) error {
return nil
}
func (noopExemplarStorage) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) { func (noopExemplarStorage) ExemplarQuerier(context.Context) (storage.ExemplarQuerier, error) {
return &noopExemplarQuerier{}, nil return &noopExemplarQuerier{}, nil
} }

View File

@ -16,6 +16,7 @@ package tsdb
import ( import (
"reflect" "reflect"
"strconv" "strconv"
"strings"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -25,6 +26,66 @@ import (
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
) )
// Tests the same exemplar cases as AddExemplar, but specifically the ValidateExemplar function so it can be relied on externally.
func TestValidateExemplar(t *testing.T) {
exs, err := NewCircularExemplarStorage(2, nil)
require.NoError(t, err)
es := exs.(*CircularExemplarStorage)
l := labels.Labels{
{Name: "service", Value: "asdf"},
}
e := exemplar.Exemplar{
Labels: labels.Labels{
labels.Label{
Name: "traceID",
Value: "qwerty",
},
},
Value: 0.1,
Ts: 1,
}
require.NoError(t, es.ValidateExemplar(l, e))
require.NoError(t, es.AddExemplar(l, e))
e2 := exemplar.Exemplar{
Labels: labels.Labels{
labels.Label{
Name: "traceID",
Value: "zxcvb",
},
},
Value: 0.1,
Ts: 2,
}
require.NoError(t, es.ValidateExemplar(l, e2))
require.NoError(t, es.AddExemplar(l, e2))
require.Equal(t, es.ValidateExemplar(l, e2), storage.ErrDuplicateExemplar, "error is expected attempting to validate duplicate exemplar")
e3 := e2
e3.Ts = 3
require.Equal(t, es.ValidateExemplar(l, e3), storage.ErrDuplicateExemplar, "error is expected when attempting to add duplicate exemplar, even with different timestamp")
e3.Ts = 1
e3.Value = 0.3
require.Equal(t, es.ValidateExemplar(l, e3), storage.ErrOutOfOrderExemplar)
e4 := exemplar.Exemplar{
Labels: labels.Labels{
labels.Label{
Name: "a",
Value: strings.Repeat("b", exemplar.ExemplarMaxLabelSetLength),
},
},
Value: 0.1,
Ts: 2,
}
require.Equal(t, storage.ErrExemplarLabelLength, es.ValidateExemplar(l, e4))
}
func TestAddExemplar(t *testing.T) { func TestAddExemplar(t *testing.T) {
exs, err := NewCircularExemplarStorage(2, nil) exs, err := NewCircularExemplarStorage(2, nil)
require.NoError(t, err) require.NoError(t, err)
@ -44,8 +105,7 @@ func TestAddExemplar(t *testing.T) {
Ts: 1, Ts: 1,
} }
err = es.AddExemplar(l, e) require.NoError(t, es.AddExemplar(l, e))
require.NoError(t, err)
require.Equal(t, es.index[l.String()].newest, 0, "exemplar was not stored correctly") require.Equal(t, es.index[l.String()].newest, 0, "exemplar was not stored correctly")
e2 := exemplar.Exemplar{ e2 := exemplar.Exemplar{
@ -59,23 +119,31 @@ func TestAddExemplar(t *testing.T) {
Ts: 2, Ts: 2,
} }
err = es.AddExemplar(l, e2) require.NoError(t, es.AddExemplar(l, e2))
require.NoError(t, err)
require.Equal(t, es.index[l.String()].newest, 1, "exemplar was not stored correctly, location of newest exemplar for series in index did not update") require.Equal(t, es.index[l.String()].newest, 1, "exemplar was not stored correctly, location of newest exemplar for series in index did not update")
require.True(t, es.exemplars[es.index[l.String()].newest].exemplar.Equals(e2), "exemplar was not stored correctly, expected %+v got: %+v", e2, es.exemplars[es.index[l.String()].newest].exemplar) require.True(t, es.exemplars[es.index[l.String()].newest].exemplar.Equals(e2), "exemplar was not stored correctly, expected %+v got: %+v", e2, es.exemplars[es.index[l.String()].newest].exemplar)
err = es.AddExemplar(l, e2) require.NoError(t, es.AddExemplar(l, e2), "no error is expected attempting to add duplicate exemplar")
require.NoError(t, err, "no error is expected attempting to add duplicate exemplar")
e3 := e2 e3 := e2
e3.Ts = 3 e3.Ts = 3
err = es.AddExemplar(l, e3) require.NoError(t, es.AddExemplar(l, e3), "no error is expected when attempting to add duplicate exemplar, even with different timestamp")
require.NoError(t, err, "no error is expected when attempting to add duplicate exemplar, even with different timestamp")
e3.Ts = 1 e3.Ts = 1
e3.Value = 0.3 e3.Value = 0.3
err = es.AddExemplar(l, e3) require.Equal(t, storage.ErrOutOfOrderExemplar, es.AddExemplar(l, e3))
require.Equal(t, err, storage.ErrOutOfOrderExemplar)
e4 := exemplar.Exemplar{
Labels: labels.Labels{
labels.Label{
Name: "a",
Value: strings.Repeat("b", exemplar.ExemplarMaxLabelSetLength),
},
},
Value: 0.1,
Ts: 2,
}
require.Equal(t, storage.ErrExemplarLabelLength, es.AddExemplar(l, e4))
} }
func TestStorageOverflow(t *testing.T) { func TestStorageOverflow(t *testing.T) {
@ -304,11 +372,11 @@ func TestIndexOverwrite(t *testing.T) {
// index entry for series l1 since we just wrote two exemplars for series l2. // index entry for series l1 since we just wrote two exemplars for series l2.
_, ok := es.index[l1.String()] _, ok := es.index[l1.String()]
require.False(t, ok) require.False(t, ok)
require.Equal(t, &indexEntry{1, 0}, es.index[l2.String()]) require.Equal(t, &indexEntry{1, 0, l2}, es.index[l2.String()])
err = es.AddExemplar(l1, exemplar.Exemplar{Value: 4, Ts: 4}) err = es.AddExemplar(l1, exemplar.Exemplar{Value: 4, Ts: 4})
require.NoError(t, err) require.NoError(t, err)
i := es.index[l2.String()] i := es.index[l2.String()]
require.Equal(t, &indexEntry{0, 0}, i) require.Equal(t, &indexEntry{0, 0, l2}, i)
} }

View File

@ -58,6 +58,7 @@ var (
type ExemplarStorage interface { type ExemplarStorage interface {
storage.ExemplarQueryable storage.ExemplarQueryable
AddExemplar(labels.Labels, exemplar.Exemplar) error AddExemplar(labels.Labels, exemplar.Exemplar) error
ValidateExemplar(labels.Labels, exemplar.Exemplar) error
} }
// Head handles reads and writes of time series data within a time window. // Head handles reads and writes of time series data within a time window.
@ -459,6 +460,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks
// Track number of samples that referenced a series we don't know about // Track number of samples that referenced a series we don't know about
// for error reporting. // for error reporting.
var unknownRefs atomic.Uint64 var unknownRefs atomic.Uint64
var unknownExemplarRefs atomic.Uint64
// Start workers that each process samples for a partition of the series ID space. // Start workers that each process samples for a partition of the series ID space.
// They are connected through a ring of channels which ensures that all sample batches // They are connected through a ring of channels which ensures that all sample batches
@ -468,6 +470,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks
n = runtime.GOMAXPROCS(0) n = runtime.GOMAXPROCS(0)
inputs = make([]chan []record.RefSample, n) inputs = make([]chan []record.RefSample, n)
outputs = make([]chan []record.RefSample, n) outputs = make([]chan []record.RefSample, n)
exemplarsInput chan record.RefExemplar
dec record.Decoder dec record.Decoder
shards = make([][]record.RefSample, n) shards = make([][]record.RefSample, n)
@ -489,6 +492,11 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks
return []tombstones.Stone{} return []tombstones.Stone{}
}, },
} }
exemplarsPool = sync.Pool{
New: func() interface{} {
return []record.RefExemplar{}
},
}
) )
defer func() { defer func() {
@ -500,6 +508,7 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks
for range outputs[i] { for range outputs[i] {
} }
} }
close(exemplarsInput)
wg.Wait() wg.Wait()
} }
}() }()
@ -516,6 +525,29 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks
}(inputs[i], outputs[i]) }(inputs[i], outputs[i])
} }
wg.Add(1)
exemplarsInput = make(chan record.RefExemplar, 300)
go func(input <-chan record.RefExemplar) {
defer wg.Done()
for e := range input {
ms := h.series.getByID(e.Ref)
if ms == nil {
unknownExemplarRefs.Inc()
continue
}
if e.T < h.minValidTime.Load() {
continue
}
// At the moment the only possible error here is out of order exemplars, which we shouldn't see when
// replaying the WAL, so lets just log the error if it's not that type.
err = h.exemplars.AddExemplar(ms.lset, exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels})
if err != nil && err == storage.ErrOutOfOrderExemplar {
level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err)
}
}
}(exemplarsInput)
go func() { go func() {
defer close(decoded) defer close(decoded)
for r.Next() { for r.Next() {
@ -557,6 +589,18 @@ func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks
return return
} }
decoded <- tstones decoded <- tstones
case record.Exemplars:
exemplars := exemplarsPool.Get().([]record.RefExemplar)[:0]
exemplars, err = dec.Exemplars(rec, exemplars)
if err != nil {
decodeErr = &wal.CorruptionErr{
Err: errors.Wrap(err, "decode exemplars"),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decoded <- exemplars
default: default:
// Noop. // Noop.
} }
@ -646,6 +690,12 @@ Outer:
} }
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification. //nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
tstonesPool.Put(v) tstonesPool.Put(v)
case []record.RefExemplar:
for _, e := range v {
exemplarsInput <- e
}
//nolint:staticcheck // Ignore SA6002 relax staticcheck verification.
exemplarsPool.Put(v)
default: default:
panic(fmt.Errorf("unexpected decoded type: %T", d)) panic(fmt.Errorf("unexpected decoded type: %T", d))
} }
@ -667,14 +717,15 @@ Outer:
for range outputs[i] { for range outputs[i] {
} }
} }
close(exemplarsInput)
wg.Wait() wg.Wait()
if r.Err() != nil { if r.Err() != nil {
return errors.Wrap(r.Err(), "read records") return errors.Wrap(r.Err(), "read records")
} }
if unknownRefs.Load() > 0 { if unknownRefs.Load() > 0 || unknownExemplarRefs.Load() > 0 {
level.Warn(h.logger).Log("msg", "Unknown series references", "count", unknownRefs.Load()) level.Warn(h.logger).Log("msg", "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load())
} }
return nil return nil
} }
@ -1339,6 +1390,15 @@ func (a *headAppender) AppendExemplar(ref uint64, _ labels.Labels, e exemplar.Ex
// Ensure no empty labels have gotten through. // Ensure no empty labels have gotten through.
e.Labels = e.Labels.WithoutEmpty() e.Labels = e.Labels.WithoutEmpty()
err := a.exemplarAppender.ValidateExemplar(s.lset, e)
if err != nil {
if err == storage.ErrDuplicateExemplar {
// Duplicate, don't return an error but don't accept the exemplar.
return 0, nil
}
return 0, err
}
a.exemplars = append(a.exemplars, exemplarWithSeriesRef{ref, e}) a.exemplars = append(a.exemplars, exemplarWithSeriesRef{ref, e})
return s.ref, nil return s.ref, nil
@ -1382,14 +1442,36 @@ func (a *headAppender) log() error {
return errors.Wrap(err, "log samples") return errors.Wrap(err, "log samples")
} }
} }
if len(a.exemplars) > 0 {
rec = enc.Exemplars(exemplarsForEncoding(a.exemplars), buf)
buf = rec[:0]
if err := a.head.wal.Log(rec); err != nil {
return errors.Wrap(err, "log exemplars")
}
}
return nil return nil
} }
func exemplarsForEncoding(es []exemplarWithSeriesRef) []record.RefExemplar {
ret := make([]record.RefExemplar, 0, len(es))
for _, e := range es {
ret = append(ret, record.RefExemplar{
Ref: e.ref,
T: e.exemplar.Ts,
V: e.exemplar.Value,
Labels: e.exemplar.Labels,
})
}
return ret
}
func (a *headAppender) Commit() (err error) { func (a *headAppender) Commit() (err error) {
if a.closed { if a.closed {
return ErrAppenderClosed return ErrAppenderClosed
} }
defer func() { a.closed = true }() defer func() { a.closed = true }()
if err := a.log(); err != nil { if err := a.log(); err != nil {
_ = a.Rollback() // Most likely the same error will happen again. _ = a.Rollback() // Most likely the same error will happen again.
return errors.Wrap(err, "write to WAL") return errors.Wrap(err, "write to WAL")
@ -1404,7 +1486,6 @@ func (a *headAppender) Commit() (err error) {
continue continue
} }
level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err) level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err)
continue
} }
} }
@ -1458,7 +1539,9 @@ func (a *headAppender) Rollback() (err error) {
series.Unlock() series.Unlock()
} }
a.head.putAppendBuffer(a.samples) a.head.putAppendBuffer(a.samples)
a.head.putExemplarBuffer(a.exemplars)
a.samples = nil a.samples = nil
a.exemplars = nil
// Series are created in the head memory regardless of rollback. Thus we have // Series are created in the head memory regardless of rollback. Thus we have
// to log them to the WAL in any case. // to log them to the WAL in any case.

View File

@ -51,6 +51,7 @@ func newTestHead(t testing.TB, chunkRange int64, compressWAL bool) (*Head, *wal.
opts := DefaultHeadOptions() opts := DefaultHeadOptions()
opts.ChunkRange = chunkRange opts.ChunkRange = chunkRange
opts.ChunkDirRoot = dir opts.ChunkDirRoot = dir
opts.NumExemplars = 10
h, err := NewHead(nil, nil, wlog, opts) h, err := NewHead(nil, nil, wlog, opts)
require.NoError(t, err) require.NoError(t, err)
@ -87,6 +88,8 @@ func populateTestWAL(t testing.TB, w *wal.WAL, recs []interface{}) {
require.NoError(t, w.Log(enc.Samples(v, nil))) require.NoError(t, w.Log(enc.Samples(v, nil)))
case []tombstones.Stone: case []tombstones.Stone:
require.NoError(t, w.Log(enc.Tombstones(v, nil))) require.NoError(t, w.Log(enc.Tombstones(v, nil)))
case []record.RefExemplar:
require.NoError(t, w.Log(enc.Exemplars(v, nil)))
} }
} }
} }
@ -148,8 +151,20 @@ func BenchmarkLoadWAL(b *testing.B) {
} }
labelsPerSeries := 5 labelsPerSeries := 5
// Rough estimates of most common % of samples that have an exemplar for each scrape.
exemplarsPercentages := []float64{0, 0.5, 1, 5}
lastExemplarsPerSeries := -1
for _, c := range cases { for _, c := range cases {
b.Run(fmt.Sprintf("batches=%d,seriesPerBatch=%d,samplesPerSeries=%d", c.batches, c.seriesPerBatch, c.samplesPerSeries), for _, p := range exemplarsPercentages {
exemplarsPerSeries := int(math.RoundToEven(float64(c.samplesPerSeries) * p / 100))
// For tests with low samplesPerSeries we could end up testing with 0 exemplarsPerSeries
// multiple times without this check.
if exemplarsPerSeries == lastExemplarsPerSeries {
continue
}
lastExemplarsPerSeries = exemplarsPerSeries
// fmt.Println("exemplars per series: ", exemplarsPerSeries)
b.Run(fmt.Sprintf("batches=%d,seriesPerBatch=%d,samplesPerSeries=%d,exemplarsPerSeries=%d", c.batches, c.seriesPerBatch, c.samplesPerSeries, exemplarsPerSeries),
func(b *testing.B) { func(b *testing.B) {
dir, err := ioutil.TempDir("", "test_load_wal") dir, err := ioutil.TempDir("", "test_load_wal")
require.NoError(b, err) require.NoError(b, err)
@ -191,6 +206,23 @@ func BenchmarkLoadWAL(b *testing.B) {
} }
} }
// Write samples.
refExemplars := make([]record.RefExemplar, 0, c.seriesPerBatch)
for i := 0; i < exemplarsPerSeries; i++ {
for j := 0; j < c.batches; j++ {
refExemplars = refExemplars[:0]
for k := j * c.seriesPerBatch; k < (j+1)*c.seriesPerBatch; k++ {
refExemplars = append(refExemplars, record.RefExemplar{
Ref: uint64(k) * 100,
T: int64(i) * 10,
V: float64(i) * 100,
Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", i)),
})
}
populateTestWAL(b, w, []interface{}{refExemplars})
}
}
b.ResetTimer() b.ResetTimer()
// Load the WAL. // Load the WAL.
@ -204,6 +236,7 @@ func BenchmarkLoadWAL(b *testing.B) {
} }
}) })
} }
}
} }
func TestHead_ReadWAL(t *testing.T) { func TestHead_ReadWAL(t *testing.T) {
@ -233,6 +266,9 @@ func TestHead_ReadWAL(t *testing.T) {
[]tombstones.Stone{ []tombstones.Stone{
{Ref: 0, Intervals: []tombstones.Interval{{Mint: 99, Maxt: 101}}}, {Ref: 0, Intervals: []tombstones.Interval{{Mint: 99, Maxt: 101}}},
}, },
[]record.RefExemplar{
{Ref: 10, T: 100, V: 1, Labels: labels.FromStrings("traceID", "asdf")},
},
} }
head, w := newTestHead(t, 1000, compress) head, w := newTestHead(t, 1000, compress)
@ -266,6 +302,12 @@ func TestHead_ReadWAL(t *testing.T) {
require.Equal(t, []sample{{100, 2}, {101, 5}}, expandChunk(s10.iterator(0, nil, head.chunkDiskMapper, nil))) require.Equal(t, []sample{{100, 2}, {101, 5}}, expandChunk(s10.iterator(0, nil, head.chunkDiskMapper, nil)))
require.Equal(t, []sample{{101, 6}}, expandChunk(s50.iterator(0, nil, head.chunkDiskMapper, nil))) require.Equal(t, []sample{{101, 6}}, expandChunk(s50.iterator(0, nil, head.chunkDiskMapper, nil)))
require.Equal(t, []sample{{100, 3}, {101, 7}}, expandChunk(s100.iterator(0, nil, head.chunkDiskMapper, nil))) require.Equal(t, []sample{{100, 3}, {101, 7}}, expandChunk(s100.iterator(0, nil, head.chunkDiskMapper, nil)))
q, err := head.ExemplarQuerier(context.Background())
require.NoError(t, err)
e, err := q.Select(0, 1000, []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "a", "1")})
require.NoError(t, err)
require.Equal(t, e[0].Exemplars[0], exemplar.Exemplar{Ts: 100, Value: 1, Labels: labels.FromStrings("traceID", "asdf")})
}) })
} }
} }
@ -2000,7 +2042,7 @@ func TestHeadMintAfterTruncation(t *testing.T) {
require.Equal(t, int64(4000), head.MinTime()) require.Equal(t, int64(4000), head.MinTime())
require.Equal(t, int64(4000), head.minValidTime.Load()) require.Equal(t, int64(4000), head.minValidTime.Load())
// After truncation outside the appendable windown if the actual min time // After truncation outside the appendable window if the actual min time
// is in the appendable window then we should leave mint at the start of appendable window. // is in the appendable window then we should leave mint at the start of appendable window.
require.NoError(t, head.Truncate(5000)) require.NoError(t, head.Truncate(5000))
require.Equal(t, head.appendableMinValidTime(), head.MinTime()) require.Equal(t, head.appendableMinValidTime(), head.MinTime())

View File

@ -217,7 +217,7 @@ func TestIndexRW_Postings(t *testing.T) {
} }
require.NoError(t, p.Err()) require.NoError(t, p.Err())
// The label incides are no longer used, so test them by hand here. // The label indices are no longer used, so test them by hand here.
labelIndices := map[string][]string{} labelIndices := map[string][]string{}
require.NoError(t, ReadOffsetTable(ir.b, ir.toc.LabelIndicesTable, func(key []string, off uint64, _ int) error { require.NoError(t, ReadOffsetTable(ir.b, ir.toc.LabelIndicesTable, func(key []string, off uint64, _ int) error {
if len(key) != 1 { if len(key) != 1 {

View File

@ -37,6 +37,8 @@ const (
Samples Type = 2 Samples Type = 2
// Tombstones is used to match WAL records of type Tombstones. // Tombstones is used to match WAL records of type Tombstones.
Tombstones Type = 3 Tombstones Type = 3
// Exemplars is used to match WAL records of type Exemplars.
Exemplars Type = 4
) )
var ( var (
@ -57,6 +59,14 @@ type RefSample struct {
V float64 V float64
} }
// RefExemplar is an exemplar with it's labels, timestamp, value the exemplar was collected/observed with, and a reference to a series.
type RefExemplar struct {
Ref uint64
T int64
V float64
Labels labels.Labels
}
// Decoder decodes series, sample, and tombstone records. // Decoder decodes series, sample, and tombstone records.
// The zero value is ready to use. // The zero value is ready to use.
type Decoder struct { type Decoder struct {
@ -69,7 +79,7 @@ func (d *Decoder) Type(rec []byte) Type {
return Unknown return Unknown
} }
switch t := Type(rec[0]); t { switch t := Type(rec[0]); t {
case Series, Samples, Tombstones: case Series, Samples, Tombstones, Exemplars:
return t return t
} }
return Unknown return Unknown
@ -166,6 +176,48 @@ func (d *Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombston
return tstones, nil return tstones, nil
} }
func (d *Decoder) Exemplars(rec []byte, exemplars []RefExemplar) ([]RefExemplar, error) {
dec := encoding.Decbuf{B: rec}
t := Type(dec.Byte())
if t != Exemplars {
return nil, errors.New("invalid record type")
}
if dec.Len() == 0 {
return exemplars, nil
}
var (
baseRef = dec.Be64()
baseTime = dec.Be64int64()
)
for len(dec.B) > 0 && dec.Err() == nil {
dref := dec.Varint64()
dtime := dec.Varint64()
val := dec.Be64()
lset := make(labels.Labels, dec.Uvarint())
for i := range lset {
lset[i].Name = dec.UvarintStr()
lset[i].Value = dec.UvarintStr()
}
sort.Sort(lset)
exemplars = append(exemplars, RefExemplar{
Ref: baseRef + uint64(dref),
T: baseTime + dtime,
V: math.Float64frombits(val),
Labels: lset,
})
}
if dec.Err() != nil {
return nil, errors.Wrapf(dec.Err(), "decode error after %d exemplars", len(exemplars))
}
if len(dec.B) > 0 {
return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
}
return exemplars, nil
}
// Encoder encodes series, sample, and tombstones records. // Encoder encodes series, sample, and tombstones records.
// The zero value is ready to use. // The zero value is ready to use.
type Encoder struct { type Encoder struct {
@ -226,3 +278,33 @@ func (e *Encoder) Tombstones(tstones []tombstones.Stone, b []byte) []byte {
} }
return buf.Get() return buf.Get()
} }
func (e *Encoder) Exemplars(exemplars []RefExemplar, b []byte) []byte {
buf := encoding.Encbuf{B: b}
buf.PutByte(byte(Exemplars))
if len(exemplars) == 0 {
return buf.Get()
}
// Store base timestamp and base reference number of first sample.
// All samples encode their timestamp and ref as delta to those.
first := exemplars[0]
buf.PutBE64(first.Ref)
buf.PutBE64int64(first.T)
for _, ex := range exemplars {
buf.PutVarint64(int64(ex.Ref) - int64(first.Ref))
buf.PutVarint64(ex.T - first.T)
buf.PutBE64(math.Float64bits(ex.V))
buf.PutUvarint(len(ex.Labels))
for _, l := range ex.Labels {
buf.PutUvarintStr(l.Name)
buf.PutUvarintStr(l.Value)
}
}
return buf.Get()
}

View File

@ -74,6 +74,15 @@ func TestRecord_EncodeDecode(t *testing.T) {
{Ref: 13, Intervals: tombstones.Intervals{{Mint: -1000, Maxt: -11}}}, {Ref: 13, Intervals: tombstones.Intervals{{Mint: -1000, Maxt: -11}}},
{Ref: 13, Intervals: tombstones.Intervals{{Mint: 5000, Maxt: 1000}}}, {Ref: 13, Intervals: tombstones.Intervals{{Mint: 5000, Maxt: 1000}}},
}, decTstones) }, decTstones)
exemplars := []RefExemplar{
{Ref: 0, T: 12423423, V: 1.2345, Labels: labels.FromStrings("traceID", "qwerty")},
{Ref: 123, T: -1231, V: -123, Labels: labels.FromStrings("traceID", "asdf")},
{Ref: 2, T: 0, V: 99999, Labels: labels.FromStrings("traceID", "zxcv")},
}
decExemplars, err := dec.Exemplars(enc.Exemplars(exemplars, nil), nil)
require.NoError(t, err)
require.Equal(t, exemplars, decExemplars)
} }
// TestRecord_Corrupted ensures that corrupted records return the correct error. // TestRecord_Corrupted ensures that corrupted records return the correct error.
@ -117,6 +126,16 @@ func TestRecord_Corrupted(t *testing.T) {
_, err := dec.Tombstones(corrupted, nil) _, err := dec.Tombstones(corrupted, nil)
require.Equal(t, err, encoding.ErrInvalidSize) require.Equal(t, err, encoding.ErrInvalidSize)
}) })
t.Run("Test corrupted exemplar record", func(t *testing.T) {
exemplars := []RefExemplar{
{Ref: 0, T: 12423423, V: 1.2345, Labels: labels.FromStrings("traceID", "asdf")},
}
corrupted := enc.Exemplars(exemplars, nil)[:8]
_, err := dec.Exemplars(corrupted, nil)
require.Equal(t, errors.Cause(err), encoding.ErrInvalidSize)
})
} }
func TestRecord_Type(t *testing.T) { func TestRecord_Type(t *testing.T) {

View File

@ -65,3 +65,15 @@ func PopulatedChunk(numSamples int, minTime int64) chunks.Meta {
} }
return ChunkFromSamples(samples) return ChunkFromSamples(samples)
} }
// GenerateSamples starting at start and counting up numSamples.
func GenerateSamples(start int, numSamples int) []Sample {
samples := make([]Sample, 0, numSamples)
for i := start; i < start+numSamples; i++ {
samples = append(samples, sample{
t: int64(i),
v: float64(i),
})
}
return samples
}

View File

@ -64,7 +64,7 @@ type walMetrics struct {
corruptions prometheus.Counter corruptions prometheus.Counter
} }
func newWalMetrics(wal *SegmentWAL, r prometheus.Registerer) *walMetrics { func newWalMetrics(r prometheus.Registerer) *walMetrics {
m := &walMetrics{} m := &walMetrics{}
m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{
@ -192,7 +192,7 @@ func OpenSegmentWAL(dir string, logger log.Logger, flushInterval time.Duration,
segmentSize: walSegmentSizeBytes, segmentSize: walSegmentSizeBytes,
crc32: newCRC32(), crc32: newCRC32(),
} }
w.metrics = newWalMetrics(w, r) w.metrics = newWalMetrics(r)
fns, err := sequenceFiles(w.dirFile.Name()) fns, err := sequenceFiles(w.dirFile.Name())
if err != nil { if err != nil {

View File

@ -40,9 +40,11 @@ type CheckpointStats struct {
DroppedSeries int DroppedSeries int
DroppedSamples int DroppedSamples int
DroppedTombstones int DroppedTombstones int
DroppedExemplars int
TotalSeries int // Processed series including dropped ones. TotalSeries int // Processed series including dropped ones.
TotalSamples int // Processed samples including dropped ones. TotalSamples int // Processed samples including dropped ones.
TotalTombstones int // Processed tombstones including dropped ones. TotalTombstones int // Processed tombstones including dropped ones.
TotalExemplars int // Processed exemplars including dropped ones.
} }
// LastCheckpoint returns the directory name and index of the most recent checkpoint. // LastCheckpoint returns the directory name and index of the most recent checkpoint.
@ -147,13 +149,14 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id uint64) bo
series []record.RefSeries series []record.RefSeries
samples []record.RefSample samples []record.RefSample
tstones []tombstones.Stone tstones []tombstones.Stone
exemplars []record.RefExemplar
dec record.Decoder dec record.Decoder
enc record.Encoder enc record.Encoder
buf []byte buf []byte
recs [][]byte recs [][]byte
) )
for r.Next() { for r.Next() {
series, samples, tstones = series[:0], samples[:0], tstones[:0] series, samples, tstones, exemplars = series[:0], samples[:0], tstones[:0], exemplars[:0]
// We don't reset the buffer since we batch up multiple records // We don't reset the buffer since we batch up multiple records
// before writing them to the checkpoint. // before writing them to the checkpoint.
@ -219,6 +222,23 @@ func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id uint64) bo
stats.TotalTombstones += len(tstones) stats.TotalTombstones += len(tstones)
stats.DroppedTombstones += len(tstones) - len(repl) stats.DroppedTombstones += len(tstones) - len(repl)
case record.Exemplars:
exemplars, err = dec.Exemplars(rec, exemplars)
if err != nil {
return nil, errors.Wrap(err, "decode exemplars")
}
// Drop irrelevant exemplars in place.
repl := exemplars[:0]
for _, e := range exemplars {
if e.T >= mint {
repl = append(repl, e)
}
}
if len(repl) > 0 {
buf = enc.Exemplars(repl, buf)
}
stats.TotalExemplars += len(exemplars)
stats.DroppedExemplars += len(exemplars) - len(repl)
default: default:
// Unknown record type, probably from a future Prometheus version. // Unknown record type, probably from a future Prometheus version.
continue continue

View File

@ -177,6 +177,11 @@ func TestCheckpoint(t *testing.T) {
}, nil) }, nil)
require.NoError(t, w.Log(b)) require.NoError(t, w.Log(b))
b = enc.Exemplars([]record.RefExemplar{
{Ref: 1, T: last, V: float64(i), Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", i))},
}, nil)
require.NoError(t, w.Log(b))
last += 100 last += 100
} }
require.NoError(t, w.Close()) require.NoError(t, w.Close())
@ -215,6 +220,12 @@ func TestCheckpoint(t *testing.T) {
for _, s := range samples { for _, s := range samples {
require.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp") require.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp")
} }
case record.Exemplars:
exemplars, err := dec.Exemplars(rec, nil)
require.NoError(t, err)
for _, e := range exemplars {
require.GreaterOrEqual(t, e.T, last/2, "exemplar with wrong timestamp")
}
} }
} }
require.NoError(t, r.Err()) require.NoError(t, r.Err())

View File

@ -616,7 +616,10 @@ func (w *WAL) log(rec []byte, final bool) error {
// Compress the record before calculating if a new segment is needed. // Compress the record before calculating if a new segment is needed.
compressed := false compressed := false
if w.compress && len(rec) > 0 { if w.compress &&
len(rec) > 0 &&
// If MaxEncodedLen is less than 0 the record is too large to be compressed.
snappy.MaxEncodedLen(len(rec)) >= 0 {
// The snappy library uses `len` to calculate if we need a new buffer. // The snappy library uses `len` to calculate if we need a new buffer.
// In order to allocate as few buffers as possible make the length // In order to allocate as few buffers as possible make the length
// equal to the capacity. // equal to the capacity.

View File

@ -46,6 +46,7 @@ const (
// and it is left to the implementer to make sure they are safe. // and it is left to the implementer to make sure they are safe.
type WriteTo interface { type WriteTo interface {
Append([]record.RefSample) bool Append([]record.RefSample) bool
AppendExemplars([]record.RefExemplar) bool
StoreSeries([]record.RefSeries, int) StoreSeries([]record.RefSeries, int)
// SeriesReset is called after reading a checkpoint to allow the deletion // SeriesReset is called after reading a checkpoint to allow the deletion
// of all series created in a segment lower than the argument. // of all series created in a segment lower than the argument.
@ -66,6 +67,7 @@ type Watcher struct {
logger log.Logger logger log.Logger
walDir string walDir string
lastCheckpoint string lastCheckpoint string
sendExemplars bool
metrics *WatcherMetrics metrics *WatcherMetrics
readerMetrics *LiveReaderMetrics readerMetrics *LiveReaderMetrics
@ -136,7 +138,7 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
} }
// NewWatcher creates a new WAL watcher for a given WriteTo. // NewWatcher creates a new WAL watcher for a given WriteTo.
func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, walDir string) *Watcher { func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, walDir string, sendExemplars bool) *Watcher {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
@ -147,6 +149,8 @@ func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logge
readerMetrics: readerMetrics, readerMetrics: readerMetrics,
walDir: path.Join(walDir, "wal"), walDir: path.Join(walDir, "wal"),
name: name, name: name,
sendExemplars: sendExemplars,
quit: make(chan struct{}), quit: make(chan struct{}),
done: make(chan struct{}), done: make(chan struct{}),
@ -466,6 +470,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
series []record.RefSeries series []record.RefSeries
samples []record.RefSample samples []record.RefSample
send []record.RefSample send []record.RefSample
exemplars []record.RefExemplar
) )
for r.Next() && !isClosed(w.quit) { for r.Next() && !isClosed(w.quit) {
rec := r.Record() rec := r.Record()
@ -507,6 +512,23 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
send = send[:0] send = send[:0]
} }
case record.Exemplars:
// Skip if experimental "exemplars over remote write" is not enabled.
if !w.sendExemplars {
break
}
// If we're not tailing a segment we can ignore any exemplars records we see.
// This speeds up replay of the WAL significantly.
if !tail {
break
}
exemplars, err := dec.Exemplars(rec, exemplars[:0])
if err != nil {
w.recordDecodeFailsMetric.Inc()
return err
}
w.writer.AppendExemplars(exemplars)
case record.Tombstones: case record.Tombstones:
default: default:

View File

@ -50,6 +50,7 @@ func retry(t *testing.T, interval time.Duration, n int, f func() bool) {
type writeToMock struct { type writeToMock struct {
samplesAppended int samplesAppended int
exemplarsAppended int
seriesLock sync.Mutex seriesLock sync.Mutex
seriesSegmentIndexes map[uint64]int seriesSegmentIndexes map[uint64]int
} }
@ -59,6 +60,11 @@ func (wtm *writeToMock) Append(s []record.RefSample) bool {
return true return true
} }
func (wtm *writeToMock) AppendExemplars(e []record.RefExemplar) bool {
wtm.exemplarsAppended += len(e)
return true
}
func (wtm *writeToMock) StoreSeries(series []record.RefSeries, index int) { func (wtm *writeToMock) StoreSeries(series []record.RefSeries, index int) {
wtm.seriesLock.Lock() wtm.seriesLock.Lock()
defer wtm.seriesLock.Unlock() defer wtm.seriesLock.Unlock()
@ -95,6 +101,7 @@ func TestTailSamples(t *testing.T) {
pageSize := 32 * 1024 pageSize := 32 * 1024
const seriesCount = 10 const seriesCount = 10
const samplesCount = 250 const samplesCount = 250
const exemplarsCount = 25
for _, compress := range []bool{false, true} { for _, compress := range []bool{false, true} {
t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) { t.Run(fmt.Sprintf("compress=%t", compress), func(t *testing.T) {
now := time.Now() now := time.Now()
@ -138,6 +145,19 @@ func TestTailSamples(t *testing.T) {
}, nil) }, nil)
require.NoError(t, w.Log(sample)) require.NoError(t, w.Log(sample))
} }
for j := 0; j < exemplarsCount; j++ {
inner := rand.Intn(ref + 1)
exemplar := enc.Exemplars([]record.RefExemplar{
{
Ref: uint64(inner),
T: now.UnixNano() + 1,
V: float64(i),
Labels: labels.FromStrings("traceID", fmt.Sprintf("trace-%d", inner)),
},
}, nil)
require.NoError(t, w.Log(exemplar))
}
} }
// Start read after checkpoint, no more data written. // Start read after checkpoint, no more data written.
@ -145,7 +165,7 @@ func TestTailSamples(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
wt := newWriteToMock() wt := newWriteToMock()
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, true)
watcher.SetStartTime(now) watcher.SetStartTime(now)
// Set the Watcher's metrics so they're not nil pointers. // Set the Watcher's metrics so they're not nil pointers.
@ -162,11 +182,13 @@ func TestTailSamples(t *testing.T) {
expectedSeries := seriesCount expectedSeries := seriesCount
expectedSamples := seriesCount * samplesCount expectedSamples := seriesCount * samplesCount
expectedExemplars := seriesCount * exemplarsCount
retry(t, defaultRetryInterval, defaultRetries, func() bool { retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumLabels() >= expectedSeries return wt.checkNumLabels() >= expectedSeries
}) })
require.Equal(t, expectedSeries, wt.checkNumLabels()) require.Equal(t, expectedSeries, wt.checkNumLabels(), "did not receive the expected number of series")
require.Equal(t, expectedSamples, wt.samplesAppended) require.Equal(t, expectedSamples, wt.samplesAppended, "did not receive the expected number of samples")
require.Equal(t, expectedExemplars, wt.exemplarsAppended, "did not receive the expected number of exemplars")
}) })
} }
} }
@ -229,7 +251,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
wt := newWriteToMock() wt := newWriteToMock()
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false)
go watcher.Start() go watcher.Start()
expected := seriesCount expected := seriesCount
@ -322,7 +344,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
_, _, err = Segments(w.Dir()) _, _, err = Segments(w.Dir())
require.NoError(t, err) require.NoError(t, err)
wt := newWriteToMock() wt := newWriteToMock()
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false)
go watcher.Start() go watcher.Start()
expected := seriesCount * 2 expected := seriesCount * 2
@ -392,7 +414,7 @@ func TestReadCheckpoint(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
wt := newWriteToMock() wt := newWriteToMock()
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false)
go watcher.Start() go watcher.Start()
expectedSeries := seriesCount expectedSeries := seriesCount
@ -465,7 +487,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) {
} }
wt := newWriteToMock() wt := newWriteToMock()
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false)
watcher.MaxSegment = -1 watcher.MaxSegment = -1
// Set the Watcher's metrics so they're not nil pointers. // Set the Watcher's metrics so they're not nil pointers.
@ -541,7 +563,7 @@ func TestCheckpointSeriesReset(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
wt := newWriteToMock() wt := newWriteToMock()
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false)
watcher.MaxSegment = -1 watcher.MaxSegment = -1
go watcher.Start() go watcher.Start()

View File

@ -21,7 +21,7 @@
"@fortawesome/react-fontawesome": "^0.1.4", "@fortawesome/react-fontawesome": "^0.1.4",
"@reach/router": "^1.2.1", "@reach/router": "^1.2.1",
"bootstrap": "^4.6.0", "bootstrap": "^4.6.0",
"codemirror-promql": "^0.15.0", "codemirror-promql": "^0.16.0",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^3.4.8", "downshift": "^3.4.8",
"enzyme-to-json": "^3.4.3", "enzyme-to-json": "^3.4.3",
@ -32,7 +32,6 @@
"jsdom": "^16.4.0", "jsdom": "^16.4.0",
"moment": "^2.24.0", "moment": "^2.24.0",
"moment-timezone": "^0.5.23", "moment-timezone": "^0.5.23",
"sass": "1.32.10",
"popper.js": "^1.14.3", "popper.js": "^1.14.3",
"react": "^16.7.0", "react": "^16.7.0",
"react-copy-to-clipboard": "^5.0.1", "react-copy-to-clipboard": "^5.0.1",
@ -42,6 +41,7 @@
"react-test-renderer": "^16.9.0", "react-test-renderer": "^16.9.0",
"reactstrap": "^8.9.0", "reactstrap": "^8.9.0",
"sanitize-html": "^2.3.3", "sanitize-html": "^2.3.3",
"sass": "1.32.10",
"tempusdominus-bootstrap-4": "^5.1.2", "tempusdominus-bootstrap-4": "^5.1.2",
"tempusdominus-core": "^5.0.3", "tempusdominus-core": "^5.0.3",
"typescript": "^3.3.3", "typescript": "^3.3.3",

File diff suppressed because it is too large Load Diff

View File

@ -354,7 +354,7 @@ func New(logger log.Logger, o *Options) *Handler {
// Redirect the original React UI's path (under "/new") to its new path at the root. // Redirect the original React UI's path (under "/new") to its new path at the root.
router.Get("/new/*path", func(w http.ResponseWriter, r *http.Request) { router.Get("/new/*path", func(w http.ResponseWriter, r *http.Request) {
p := route.Param(r.Context(), "path") p := route.Param(r.Context(), "path")
http.Redirect(w, r, path.Join(o.ExternalURL.Path, strings.TrimPrefix(p, "/new"))+"?"+r.URL.RawQuery, http.StatusFound) http.Redirect(w, r, path.Join(o.ExternalURL.Path, p)+"?"+r.URL.RawQuery, http.StatusFound)
}) })
router.Get("/classic/alerts", readyf(h.alerts)) router.Get("/classic/alerts", readyf(h.alerts))