diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go
index 7544f276a..1d844ddba 100644
--- a/cmd/prometheus/main.go
+++ b/cmd/prometheus/main.go
@@ -194,6 +194,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "extra-scrape-metrics":
c.scrape.ExtraMetrics = true
level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled")
+ case "metadata-wal-records":
+ c.scrape.AppendMetadata = true
+ level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0")
case "new-service-discovery-manager":
c.enableNewSDManager = true
level.Info(logger).Log("msg", "Experimental service discovery manager")
@@ -322,9 +325,15 @@ func main() {
a.Flag("web.enable-admin-api", "Enable API endpoints for admin control actions.").
Default("false").BoolVar(&cfg.web.EnableAdminAPI)
+ // TODO(bwplotka): Consider allowing those remote receive flags to be changed in config.
+ // See https://github.com/prometheus/prometheus/issues/14410
a.Flag("web.enable-remote-write-receiver", "Enable API endpoint accepting remote write requests.").
Default("false").BoolVar(&cfg.web.EnableRemoteWriteReceiver)
+ supportedRemoteWriteProtoMsgs := config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2}
+ a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())).
+ Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs))
+
a.Flag("web.console.templates", "Path to the console template directory, available at /consoles.").
Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath)
@@ -646,7 +655,7 @@ func main() {
var (
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
scraper = &readyScrapeManager{}
- remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper)
+ remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata)
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
)
@@ -1767,3 +1776,39 @@ type discoveryManager interface {
Run() error
SyncCh() <-chan map[string][]*targetgroup.Group
}
+
+// rwProtoMsgFlagParser is a custom parser for config.RemoteWriteProtoMsg enum.
+type rwProtoMsgFlagParser struct {
+ msgs *[]config.RemoteWriteProtoMsg
+}
+
+func rwProtoMsgFlagValue(msgs *[]config.RemoteWriteProtoMsg) kingpin.Value {
+ return &rwProtoMsgFlagParser{msgs: msgs}
+}
+
+// IsCumulative is used by kingpin to tell if it's an array or not.
+func (p *rwProtoMsgFlagParser) IsCumulative() bool {
+ return true
+}
+
+func (p *rwProtoMsgFlagParser) String() string {
+ ss := make([]string, 0, len(*p.msgs))
+ for _, t := range *p.msgs {
+ ss = append(ss, string(t))
+ }
+ return strings.Join(ss, ",")
+}
+
+func (p *rwProtoMsgFlagParser) Set(opt string) error {
+ t := config.RemoteWriteProtoMsg(opt)
+ if err := t.Validate(); err != nil {
+ return err
+ }
+ for _, prev := range *p.msgs {
+ if prev == t {
+ return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs)
+ }
+ }
+ *p.msgs = append(*p.msgs, t)
+ return nil
+}
diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go
index 89c171bb5..c827812e6 100644
--- a/cmd/prometheus/main_test.go
+++ b/cmd/prometheus/main_test.go
@@ -30,11 +30,13 @@ import (
"testing"
"time"
+ "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
+ "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/rules"
@@ -499,3 +501,65 @@ func TestDocumentation(t *testing.T) {
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
}
+
+func TestRwProtoMsgFlagParser(t *testing.T) {
+ defaultOpts := config.RemoteWriteProtoMsgs{
+ config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2,
+ }
+
+ for _, tcase := range []struct {
+ args []string
+ expected []config.RemoteWriteProtoMsg
+ expectedErr error
+ }{
+ {
+ args: nil,
+ expected: defaultOpts,
+ },
+ {
+ args: []string{"--test-proto-msgs", "test"},
+ expectedErr: errors.New("unknown remote write protobuf message test, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request"),
+ },
+ {
+ args: []string{"--test-proto-msgs", "io.prometheus.write.v2.Request"},
+ expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2},
+ },
+ {
+ args: []string{
+ "--test-proto-msgs", "io.prometheus.write.v2.Request",
+ "--test-proto-msgs", "io.prometheus.write.v2.Request",
+ },
+ expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request] already"),
+ },
+ {
+ args: []string{
+ "--test-proto-msgs", "io.prometheus.write.v2.Request",
+ "--test-proto-msgs", "prometheus.WriteRequest",
+ },
+ expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2, config.RemoteWriteProtoMsgV1},
+ },
+ {
+ args: []string{
+ "--test-proto-msgs", "io.prometheus.write.v2.Request",
+ "--test-proto-msgs", "prometheus.WriteRequest",
+ "--test-proto-msgs", "io.prometheus.write.v2.Request",
+ },
+ expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request prometheus.WriteRequest] already"),
+ },
+ } {
+ t.Run(strings.Join(tcase.args, ","), func(t *testing.T) {
+ a := kingpin.New("test", "")
+ var opt []config.RemoteWriteProtoMsg
+ a.Flag("test-proto-msgs", "").Default(defaultOpts.Strings()...).SetValue(rwProtoMsgFlagValue(&opt))
+
+ _, err := a.Parse(tcase.args)
+ if tcase.expectedErr != nil {
+ require.Error(t, err)
+ require.Equal(t, tcase.expectedErr, err)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tcase.expected, opt)
+ }
+ })
+ }
+}
diff --git a/config/config.go b/config/config.go
index 9defa10d4..c924e3098 100644
--- a/config/config.go
+++ b/config/config.go
@@ -180,6 +180,7 @@ var (
// DefaultRemoteWriteConfig is the default remote write configuration.
DefaultRemoteWriteConfig = RemoteWriteConfig{
RemoteTimeout: model.Duration(30 * time.Second),
+ ProtobufMessage: RemoteWriteProtoMsgV1,
QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig,
HTTPClientConfig: config.DefaultHTTPClientConfig,
@@ -279,7 +280,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
jobNames := map[string]string{}
for i, scfg := range c.ScrapeConfigs {
- // We do these checks for library users that would not call Validate in
+ // We do these checks for library users that would not call validate in
// Unmarshal.
if err := scfg.Validate(c.GlobalConfig); err != nil {
return nil, err
@@ -1055,6 +1056,49 @@ func CheckTargetAddress(address model.LabelValue) error {
return nil
}
+// RemoteWriteProtoMsg represents the known protobuf message for the remote write
+// 1.0 and 2.0 specs.
+type RemoteWriteProtoMsg string
+
+// Validate returns error if the given reference for the protobuf message is not supported.
+func (s RemoteWriteProtoMsg) Validate() error {
+ switch s {
+ case RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2:
+ return nil
+ default:
+ return fmt.Errorf("unknown remote write protobuf message %v, supported: %v", s, RemoteWriteProtoMsgs{RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2}.String())
+ }
+}
+
+type RemoteWriteProtoMsgs []RemoteWriteProtoMsg
+
+func (m RemoteWriteProtoMsgs) Strings() []string {
+ ret := make([]string, 0, len(m))
+ for _, typ := range m {
+ ret = append(ret, string(typ))
+ }
+ return ret
+}
+
+func (m RemoteWriteProtoMsgs) String() string {
+ return strings.Join(m.Strings(), ", ")
+}
+
+var (
+ // RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf
+ // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/.
+ //
+ // NOTE: This string is used for both HTTP header values and config value, so don't change
+ // this reference.
+ RemoteWriteProtoMsgV1 RemoteWriteProtoMsg = "prometheus.WriteRequest"
+ // RemoteWriteProtoMsgV2 represents the `io.prometheus.write.v2.Request` protobuf
+ // message introduced in https://prometheus.io/docs/specs/remote_write_spec_2_0/
+ //
+ // NOTE: This string is used for both HTTP header values and config value, so don't change
+ // this reference.
+ RemoteWriteProtoMsgV2 RemoteWriteProtoMsg = "io.prometheus.write.v2.Request"
+)
+
// RemoteWriteConfig is the configuration for writing to remote storage.
type RemoteWriteConfig struct {
URL *config.URL `yaml:"url"`
@@ -1064,6 +1108,9 @@ type RemoteWriteConfig struct {
Name string `yaml:"name,omitempty"`
SendExemplars bool `yaml:"send_exemplars,omitempty"`
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
+ // ProtobufMessage specifies the protobuf message to use against the remote
+ // receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/
+ ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
@@ -1098,6 +1145,10 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
return err
}
+ if err := c.ProtobufMessage.Validate(); err != nil {
+ return fmt.Errorf("invalid protobuf_message value: %w", err)
+ }
+
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
diff --git a/config/config_test.go b/config/config_test.go
index d84059b48..3c4907a46 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -108,9 +108,10 @@ var expectedConf = &Config{
RemoteWriteConfigs: []*RemoteWriteConfig{
{
- URL: mustParseURL("http://remote1/push"),
- RemoteTimeout: model.Duration(30 * time.Second),
- Name: "drop_expensive",
+ URL: mustParseURL("http://remote1/push"),
+ ProtobufMessage: RemoteWriteProtoMsgV1,
+ RemoteTimeout: model.Duration(30 * time.Second),
+ Name: "drop_expensive",
WriteRelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"__name__"},
@@ -137,11 +138,12 @@ var expectedConf = &Config{
},
},
{
- URL: mustParseURL("http://remote2/push"),
- RemoteTimeout: model.Duration(30 * time.Second),
- QueueConfig: DefaultQueueConfig,
- MetadataConfig: DefaultMetadataConfig,
- Name: "rw_tls",
+ URL: mustParseURL("http://remote2/push"),
+ ProtobufMessage: RemoteWriteProtoMsgV2,
+ RemoteTimeout: model.Duration(30 * time.Second),
+ QueueConfig: DefaultQueueConfig,
+ MetadataConfig: DefaultMetadataConfig,
+ Name: "rw_tls",
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
@@ -1800,6 +1802,10 @@ var expectedErrors = []struct {
filename: "remote_write_authorization_header.bad.yml",
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`,
},
+ {
+ filename: "remote_write_wrong_msg.bad.yml",
+ errMsg: `invalid protobuf_message value: unknown remote write protobuf message io.prometheus.writet.v2.Request, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request`,
+ },
{
filename: "remote_write_url_missing.bad.yml",
errMsg: `url for remote_write is empty`,
diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml
index 184e6363c..0e0aa2bd5 100644
--- a/config/testdata/conf.good.yml
+++ b/config/testdata/conf.good.yml
@@ -37,6 +37,7 @@ remote_write:
key_file: valid_key_file
- url: http://remote2/push
+ protobuf_message: io.prometheus.write.v2.Request
name: rw_tls
tls_config:
cert_file: valid_cert_file
diff --git a/config/testdata/remote_write_wrong_msg.bad.yml b/config/testdata/remote_write_wrong_msg.bad.yml
new file mode 100644
index 000000000..091830954
--- /dev/null
+++ b/config/testdata/remote_write_wrong_msg.bad.yml
@@ -0,0 +1,3 @@
+remote_write:
+ - url: localhost:9090
+ protobuf_message: io.prometheus.writet.v2.Request # typo in 'write"
diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md
index 1fc032d09..223260243 100644
--- a/docs/command-line/prometheus.md
+++ b/docs/command-line/prometheus.md
@@ -26,6 +26,7 @@ The Prometheus monitoring server
| --web.enable-lifecycle
| Enable shutdown and reload via HTTP request. | `false` |
| --web.enable-admin-api
| Enable API endpoints for admin control actions. | `false` |
| --web.enable-remote-write-receiver
| Enable API endpoint accepting remote write requests. | `false` |
+| --web.remote-write-receiver.accepted-protobuf-messages
| List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` |
| --web.console.templates
| Path to the console template directory, available at /consoles. | `consoles` |
| --web.console.libraries
| Path to the console library directory. | `console_libraries` |
| --web.page-title
| Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md
index c03ed4971..35976871b 100644
--- a/docs/configuration/configuration.md
+++ b/docs/configuration/configuration.md
@@ -3575,6 +3575,17 @@ this functionality.
# The URL of the endpoint to send samples to.
url:
+# protobuf message to use when writing to the remote write endpoint.
+#
+# * The `prometheus.WriteRequest` represents the message introduced in Remote Write 1.0, which
+# will be deprecated eventually.
+# * The `io.prometheus.write.v2.Request` was introduced in Remote Write 2.0 and replaces the former,
+# by improving efficiency and sending metadata, created timestamp and native histograms by default.
+#
+# Before changing this value, consult with your remote storage provider (or test) what message it supports.
+# Read more on https://prometheus.io/docs/specs/remote_write_spec_2_0/#io-prometheus-write-v2-request
+[ protobuf_message: | default = prometheus.WriteRequest ]
+
# Timeout for requests to the remote write endpoint.
[ remote_timeout: | default = 30s ]
@@ -3596,6 +3607,7 @@ write_relabel_configs:
[ send_exemplars: | default = false ]
# Enables sending of native histograms, also known as sparse histograms, over remote write.
+# For the `io.prometheus.write.v2.Request` message, this option is noop (always true).
[ send_native_histograms: | default = false ]
# Sets the `Authorization` header on every remote write request with the
@@ -3609,7 +3621,7 @@ basic_auth:
# Optional `Authorization` header configuration.
authorization:
# Sets the authentication type.
- [ type: | default: Bearer ]
+ [ type: | default = Bearer ]
# Sets the credentials. It is mutually exclusive with
# `credentials_file`.
[ credentials: ]
@@ -3673,7 +3685,7 @@ tls_config:
# contain port numbers.
[ no_proxy: ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
-[ proxy_from_environment: | default: false ]
+[ proxy_from_environment: | default = false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ : [, ...] ] ]
@@ -3682,7 +3694,7 @@ tls_config:
[ follow_redirects: | default = true ]
# Whether to enable HTTP2.
-[ enable_http2: | default: true ]
+[ enable_http2: | default = true ]
# Configures the queue used to write to remote storage.
queue_config:
@@ -3712,7 +3724,10 @@ queue_config:
# which means that all samples are sent.
[ sample_age_limit: | default = 0s ]
-# Configures the sending of series metadata to remote storage.
+# Configures the sending of series metadata to remote storage
+# if the `prometheus.WriteRequest` message was chosen. When
+# `io.prometheus.write.v2.Request` is used, metadata is always sent.
+#
# Metadata configuration is subject to change at any point
# or be removed in future releases.
metadata_config:
diff --git a/docs/feature_flags.md b/docs/feature_flags.md
index a5dc69a71..24d70647f 100644
--- a/docs/feature_flags.md
+++ b/docs/feature_flags.md
@@ -224,3 +224,13 @@ When the `concurrent-rule-eval` feature flag is enabled, rules without any depen
This has the potential to improve rule group evaluation latency and resource utilization at the expense of adding more concurrent query load.
The number of concurrent rule evaluations can be configured with `--rules.max-concurrent-rule-evals`, which is set to `4` by default.
+
+## Metadata WAL Records
+
+`--enable-feature=metadata-wal-records`
+
+When enabled, Prometheus will store metadata in-memory and keep track of
+metadata changes as WAL records on a per-series basis.
+
+This must be used if
+you are also using remote write 2.0 as it will only gather metadata from the WAL.
diff --git a/documentation/examples/remote_storage/example_write_adapter/README.md b/documentation/examples/remote_storage/example_write_adapter/README.md
index 9748c448d..739cf3be3 100644
--- a/documentation/examples/remote_storage/example_write_adapter/README.md
+++ b/documentation/examples/remote_storage/example_write_adapter/README.md
@@ -7,6 +7,7 @@ To use it:
```
go build
+
./example_write_adapter
```
@@ -15,10 +16,19 @@ go build
```yaml
remote_write:
- url: "http://localhost:1234/receive"
+ protobuf_message: "io.prometheus.write.v2.Request"
+```
+
+or for deprecated Remote Write 1.0 message:
+
+```yaml
+remote_write:
+ - url: "http://localhost:1234/receive"
+ protobuf_message: "prometheus.WriteRequest"
```
-Then start Prometheus:
+Then start Prometheus (in separate terminal):
```
-./prometheus
+./prometheus --enable-feature=metadata-wal-records
```
diff --git a/documentation/examples/remote_storage/example_write_adapter/server.go b/documentation/examples/remote_storage/example_write_adapter/server.go
index 48c0a9571..727a3056d 100644
--- a/documentation/examples/remote_storage/example_write_adapter/server.go
+++ b/documentation/examples/remote_storage/example_write_adapter/server.go
@@ -18,44 +18,103 @@ import (
"log"
"net/http"
- "github.com/prometheus/common/model"
-
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/prompb"
+ writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/storage/remote"
)
func main() {
http.HandleFunc("/receive", func(w http.ResponseWriter, r *http.Request) {
- req, err := remote.DecodeWriteRequest(r.Body)
- if err != nil {
- http.Error(w, err.Error(), http.StatusBadRequest)
+ enc := r.Header.Get("Content-Encoding")
+ if enc == "" {
+ http.Error(w, "missing Content-Encoding header", http.StatusUnsupportedMediaType)
+ return
+ }
+ if enc != "snappy" {
+ http.Error(w, "unknown encoding, only snappy supported", http.StatusUnsupportedMediaType)
return
}
- for _, ts := range req.Timeseries {
- m := make(model.Metric, len(ts.Labels))
- for _, l := range ts.Labels {
- m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
- }
- fmt.Println(m)
+ contentType := r.Header.Get("Content-Type")
+ if contentType == "" {
+ http.Error(w, "missing Content-Type header", http.StatusUnsupportedMediaType)
+ }
- for _, s := range ts.Samples {
- fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
- }
+ defer func() { _ = r.Body.Close() }()
- for _, e := range ts.Exemplars {
- m := make(model.Metric, len(e.Labels))
- for _, l := range e.Labels {
- m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
- }
- fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp)
+ // Very simplistic content parsing, see
+ // storage/remote/write_handler.go#WriteHandler.ServeHTTP for production example.
+ switch contentType {
+ case "application/x-protobuf", "application/x-protobuf;proto=prometheus.WriteRequest":
+ req, err := remote.DecodeWriteRequest(r.Body)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
}
+ printV1(req)
+ case "application/x-protobuf;proto=io.prometheus.write.v2.Request":
+ req, err := remote.DecodeWriteV2Request(r.Body)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ printV2(req)
+ default:
+ msg := fmt.Sprintf("Unknown remote write content type: %s", contentType)
+ fmt.Println(msg)
+ http.Error(w, msg, http.StatusBadRequest)
+ }
+ })
+ log.Fatal(http.ListenAndServe(":1234", nil))
+}
+
+func printV1(req *prompb.WriteRequest) {
+ b := labels.NewScratchBuilder(0)
+ for _, ts := range req.Timeseries {
+ fmt.Println(ts.ToLabels(&b, nil))
- for _, hp := range ts.Histograms {
- h := remote.HistogramProtoToHistogram(hp)
+ for _, s := range ts.Samples {
+ fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
+ }
+ for _, ep := range ts.Exemplars {
+ e := ep.ToExemplar(&b, nil)
+ fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp)
+ }
+ for _, hp := range ts.Histograms {
+ if hp.IsFloatHistogram() {
+ h := hp.ToFloatHistogram()
fmt.Printf("\tHistogram: %s\n", h.String())
+ continue
}
+ h := hp.ToIntHistogram()
+ fmt.Printf("\tHistogram: %s\n", h.String())
}
- })
+ }
+}
- log.Fatal(http.ListenAndServe(":1234", nil))
+func printV2(req *writev2.Request) {
+ b := labels.NewScratchBuilder(0)
+ for _, ts := range req.Timeseries {
+ l := ts.ToLabels(&b, req.Symbols)
+ m := ts.ToMetadata(req.Symbols)
+ fmt.Println(l, m)
+
+ for _, s := range ts.Samples {
+ fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
+ }
+ for _, ep := range ts.Exemplars {
+ e := ep.ToExemplar(&b, req.Symbols)
+ fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp)
+ }
+ for _, hp := range ts.Histograms {
+ if hp.IsFloatHistogram() {
+ h := hp.ToFloatHistogram()
+ fmt.Printf("\tHistogram: %s\n", h.String())
+ continue
+ }
+ h := hp.ToIntHistogram()
+ fmt.Printf("\tHistogram: %s\n", h.String())
+ }
+ }
}
diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod
index d4f19749d..2491bbe2d 100644
--- a/documentation/examples/remote_storage/go.mod
+++ b/documentation/examples/remote_storage/go.mod
@@ -17,10 +17,10 @@ require (
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
- github.com/aws/aws-sdk-go v1.51.25 // indirect
+ github.com/aws/aws-sdk-go v1.53.16 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@@ -31,8 +31,7 @@ require (
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
- github.com/hashicorp/go-version v1.6.0 // indirect
+ github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
@@ -49,13 +48,12 @@ require (
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
- go.opentelemetry.io/collector/featuregate v1.5.0 // indirect
- go.opentelemetry.io/collector/pdata v1.5.0 // indirect
- go.opentelemetry.io/collector/semconv v0.98.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect
- go.opentelemetry.io/otel v1.25.0 // indirect
- go.opentelemetry.io/otel/metric v1.25.0 // indirect
- go.opentelemetry.io/otel/trace v1.25.0 // indirect
+ go.opentelemetry.io/collector/pdata v1.8.0 // indirect
+ go.opentelemetry.io/collector/semconv v0.101.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
+ go.opentelemetry.io/otel v1.27.0 // indirect
+ go.opentelemetry.io/otel/metric v1.27.0 // indirect
+ go.opentelemetry.io/otel/trace v1.27.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
@@ -64,8 +62,8 @@ require (
golang.org/x/sys v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect
- google.golang.org/grpc v1.63.2 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
+ google.golang.org/grpc v1.64.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
@@ -82,4 +80,10 @@ exclude (
cloud.google.com/go v0.34.0
cloud.google.com/go v0.65.0
cloud.google.com/go v0.82.0
+
+ // Fixing ambiguous import: found package google.golang.org/genproto/googleapis/api/annotations in multiple modules.
+ google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
)
+
+// TODO(bwplotka): Move to main branch commit or perhaps released version.
+replace github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c
diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum
index ec0434711..9898d75d7 100644
--- a/documentation/examples/remote_storage/go.sum
+++ b/documentation/examples/remote_storage/go.sum
@@ -2,10 +2,10 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqb
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
-github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE=
-github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
@@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
-github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls=
-github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc=
+github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -37,8 +37,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
-github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
+github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
+github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -46,14 +46,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
-github.com/digitalocean/godo v1.113.0 h1:CLtCxlP4wDAjKIQ+Hshht/UNbgAp8/J/XBH1ZtDCF9Y=
-github.com/digitalocean/godo v1.113.0/go.mod h1:Z2mTP848Vi3IXXl5YbPekUgr4j4tOePomA+OE1Ag98w=
+github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw=
+github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
-github.com/docker/docker v26.0.1+incompatible h1:t39Hm6lpXuXtgkF0dm1t9a5HkbUfdGy6XbWexmGr+hA=
-github.com/docker/docker v26.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
+github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -68,8 +68,8 @@ github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
-github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
-github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
+github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
@@ -95,8 +95,8 @@ github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdX
github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
-github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA=
-github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0=
+github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g=
+github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
@@ -135,40 +135,38 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuENaAIfEx7OM=
-github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
+github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g=
+github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
-github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
-github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8=
-github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE=
+github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
+github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
+github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc=
+github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
-github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
-github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
+github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
+github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
-github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
-github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
+github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
+github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
-github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
-github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7 h1:pjE59CS2C9Bg+Xby0ROrnZSSBWtKwx3Sf9gqsrvIFSA=
-github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
+github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc=
+github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
-github.com/hetznercloud/hcloud-go/v2 v2.7.2 h1:UlE7n1GQZacCfyjv9tDVUN7HZfOXErPIfM/M039u9A0=
-github.com/hetznercloud/hcloud-go/v2 v2.7.2/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k=
+github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY=
+github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/influxdata/influxdb v1.11.5 h1:+em5VOl6lhAZubXj5o6SobCwvrRs3XDlBx/MUI4schI=
@@ -208,14 +206,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/linode/linodego v1.32.0 h1:OmZzB3iON6uu84VtLFf64uKmAQqJJarvmsVguroioPI=
-github.com/linode/linodego v1.32.0/go.mod h1:y8GDP9uLVH4jTB9qyrgw79qfKdYJmNCGUOJmfuiOcmI=
+github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do=
+github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
-github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
-github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
@@ -243,8 +241,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0=
-github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY=
+github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI=
+github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -279,12 +277,12 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/prometheus/prometheus v0.52.1 h1:BrQ29YG+mzdGh8DgHPirHbeMGNqtL+INe0rqg7ttBJ4=
-github.com/prometheus/prometheus v0.52.1/go.mod h1:3z74cVsmVH0iXOR5QBjB7Pa6A0KJeEAK5A6UsmAFb1g=
+github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc=
+github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 h1:F+GIVtGqCFxPxO46ujf8cEOP574MBoRm3gNbPXECbxs=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@@ -306,20 +304,18 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM=
-go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w=
-go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE=
-go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw=
-go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY=
-go.opentelemetry.io/collector/semconv v0.98.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8=
-go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k=
-go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg=
-go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA=
-go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s=
-go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM=
-go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I=
+go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764=
+go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY=
+go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q=
+go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
+go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
+go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
+go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
+go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
+go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
+go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -336,8 +332,8 @@ golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRj
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
-golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
+golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -397,21 +393,20 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
+golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
-google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU=
-google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
-google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
-google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
+google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
+google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
+google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
diff --git a/prompb/codec.go b/prompb/codec.go
new file mode 100644
index 000000000..ad30cd5e7
--- /dev/null
+++ b/prompb/codec.go
@@ -0,0 +1,201 @@
+// Copyright 2024 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prompb
+
+import (
+ "strings"
+
+ "github.com/prometheus/common/model"
+
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon.
+
+// ToLabels return model labels.Labels from timeseries' remote labels.
+func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels {
+ return labelProtosToLabels(b, m.GetLabels())
+}
+
+// ToLabels return model labels.Labels from timeseries' remote labels.
+func (m ChunkedSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels {
+ return labelProtosToLabels(b, m.GetLabels())
+}
+
+func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []Label) labels.Labels {
+ b.Reset()
+ for _, l := range labelPairs {
+ b.Add(l.Name, l.Value)
+ }
+ b.Sort()
+ return b.Labels()
+}
+
+// FromLabels transforms labels into prompb labels. The buffer slice
+// will be used to avoid allocations if it is big enough to store the labels.
+func FromLabels(lbls labels.Labels, buf []Label) []Label {
+ result := buf[:0]
+ lbls.Range(func(l labels.Label) {
+ result = append(result, Label{
+ Name: l.Name,
+ Value: l.Value,
+ })
+ })
+ return result
+}
+
+// FromMetadataType transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum.
+func FromMetadataType(t model.MetricType) MetricMetadata_MetricType {
+ mt := strings.ToUpper(string(t))
+ v, ok := MetricMetadata_MetricType_value[mt]
+ if !ok {
+ return MetricMetadata_UNKNOWN
+ }
+ return MetricMetadata_MetricType(v)
+}
+
+// IsFloatHistogram returns true if the histogram is float.
+func (h Histogram) IsFloatHistogram() bool {
+ _, ok := h.GetCount().(*Histogram_CountFloat)
+ return ok
+}
+
+// ToIntHistogram returns integer Prometheus histogram from the remote implementation
+// of integer histogram. If it's a float histogram, the method returns nil.
+func (h Histogram) ToIntHistogram() *histogram.Histogram {
+ if h.IsFloatHistogram() {
+ return nil
+ }
+ return &histogram.Histogram{
+ CounterResetHint: histogram.CounterResetHint(h.ResetHint),
+ Schema: h.Schema,
+ ZeroThreshold: h.ZeroThreshold,
+ ZeroCount: h.GetZeroCountInt(),
+ Count: h.GetCountInt(),
+ Sum: h.Sum,
+ PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
+ PositiveBuckets: h.GetPositiveDeltas(),
+ NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
+ NegativeBuckets: h.GetNegativeDeltas(),
+ }
+}
+
+// ToFloatHistogram returns float Prometheus histogram from the remote implementation
+// of float histogram. If the underlying implementation is an integer histogram, a
+// conversion is performed.
+func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
+ if h.IsFloatHistogram() {
+ return &histogram.FloatHistogram{
+ CounterResetHint: histogram.CounterResetHint(h.ResetHint),
+ Schema: h.Schema,
+ ZeroThreshold: h.ZeroThreshold,
+ ZeroCount: h.GetZeroCountFloat(),
+ Count: h.GetCountFloat(),
+ Sum: h.Sum,
+ PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
+ PositiveBuckets: h.GetPositiveCounts(),
+ NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
+ NegativeBuckets: h.GetNegativeCounts(),
+ }
+ }
+ // Conversion from integer histogram.
+ return &histogram.FloatHistogram{
+ CounterResetHint: histogram.CounterResetHint(h.ResetHint),
+ Schema: h.Schema,
+ ZeroThreshold: h.ZeroThreshold,
+ ZeroCount: float64(h.GetZeroCountInt()),
+ Count: float64(h.GetCountInt()),
+ Sum: h.Sum,
+ PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
+ PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
+ NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
+ NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
+ }
+}
+
+func spansProtoToSpans(s []BucketSpan) []histogram.Span {
+ spans := make([]histogram.Span, len(s))
+ for i := 0; i < len(s); i++ {
+ spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
+ }
+
+ return spans
+}
+
+func deltasToCounts(deltas []int64) []float64 {
+ counts := make([]float64, len(deltas))
+ var cur float64
+ for i, d := range deltas {
+ cur += float64(d)
+ counts[i] = cur
+ }
+ return counts
+}
+
+// FromIntHistogram returns remote Histogram from the integer Histogram.
+func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
+ return Histogram{
+ Count: &Histogram_CountInt{CountInt: h.Count},
+ Sum: h.Sum,
+ Schema: h.Schema,
+ ZeroThreshold: h.ZeroThreshold,
+ ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
+ NegativeSpans: spansToSpansProto(h.NegativeSpans),
+ NegativeDeltas: h.NegativeBuckets,
+ PositiveSpans: spansToSpansProto(h.PositiveSpans),
+ PositiveDeltas: h.PositiveBuckets,
+ ResetHint: Histogram_ResetHint(h.CounterResetHint),
+ Timestamp: timestamp,
+ }
+}
+
+// FromFloatHistogram returns remote Histogram from the float Histogram.
+func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram {
+ return Histogram{
+ Count: &Histogram_CountFloat{CountFloat: fh.Count},
+ Sum: fh.Sum,
+ Schema: fh.Schema,
+ ZeroThreshold: fh.ZeroThreshold,
+ ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
+ NegativeSpans: spansToSpansProto(fh.NegativeSpans),
+ NegativeCounts: fh.NegativeBuckets,
+ PositiveSpans: spansToSpansProto(fh.PositiveSpans),
+ PositiveCounts: fh.PositiveBuckets,
+ ResetHint: Histogram_ResetHint(fh.CounterResetHint),
+ Timestamp: timestamp,
+ }
+}
+
+func spansToSpansProto(s []histogram.Span) []BucketSpan {
+ spans := make([]BucketSpan, len(s))
+ for i := 0; i < len(s); i++ {
+ spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
+ }
+
+ return spans
+}
+
+// ToExemplar converts remote exemplar to model exemplar.
+func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, _ []string) exemplar.Exemplar {
+ timestamp := m.Timestamp
+
+ return exemplar.Exemplar{
+ Labels: labelProtosToLabels(b, m.GetLabels()),
+ Value: m.Value,
+ Ts: timestamp,
+ HasTs: timestamp != 0,
+ }
+}
diff --git a/prompb/custom.go b/prompb/custom.go
index 13d6e0f0c..f73ddd446 100644
--- a/prompb/custom.go
+++ b/prompb/custom.go
@@ -17,14 +17,6 @@ import (
"sync"
)
-func (m Sample) T() int64 { return m.Timestamp }
-func (m Sample) V() float64 { return m.Value }
-
-func (h Histogram) IsFloatHistogram() bool {
- _, ok := h.GetCount().(*Histogram_CountFloat)
- return ok
-}
-
func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) {
size := r.Size()
data, ok := p.Get().(*[]byte)
diff --git a/prompb/io/prometheus/write/v2/codec.go b/prompb/io/prometheus/write/v2/codec.go
new file mode 100644
index 000000000..2939941a8
--- /dev/null
+++ b/prompb/io/prometheus/write/v2/codec.go
@@ -0,0 +1,213 @@
+// Copyright 2024 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package writev2
+
+import (
+ "github.com/prometheus/common/model"
+
+ "github.com/prometheus/prometheus/model/exemplar"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
+)
+
+// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon.
+
+// ToLabels return model labels.Labels from timeseries' remote labels.
+func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
+ return desymbolizeLabels(b, m.GetLabelsRefs(), symbols)
+}
+
+// ToMetadata return model metadata from timeseries' remote metadata.
+func (m TimeSeries) ToMetadata(symbols []string) metadata.Metadata {
+ typ := model.MetricTypeUnknown
+ switch m.Metadata.Type {
+ case Metadata_METRIC_TYPE_COUNTER:
+ typ = model.MetricTypeCounter
+ case Metadata_METRIC_TYPE_GAUGE:
+ typ = model.MetricTypeGauge
+ case Metadata_METRIC_TYPE_HISTOGRAM:
+ typ = model.MetricTypeHistogram
+ case Metadata_METRIC_TYPE_GAUGEHISTOGRAM:
+ typ = model.MetricTypeGaugeHistogram
+ case Metadata_METRIC_TYPE_SUMMARY:
+ typ = model.MetricTypeSummary
+ case Metadata_METRIC_TYPE_INFO:
+ typ = model.MetricTypeInfo
+ case Metadata_METRIC_TYPE_STATESET:
+ typ = model.MetricTypeStateset
+ }
+ return metadata.Metadata{
+ Type: typ,
+ Unit: symbols[m.Metadata.UnitRef],
+ Help: symbols[m.Metadata.HelpRef],
+ }
+}
+
+// FromMetadataType transforms a Prometheus metricType into writev2 metricType.
+// Since the former is a string we need to transform it to an enum.
+func FromMetadataType(t model.MetricType) Metadata_MetricType {
+ switch t {
+ case model.MetricTypeCounter:
+ return Metadata_METRIC_TYPE_COUNTER
+ case model.MetricTypeGauge:
+ return Metadata_METRIC_TYPE_GAUGE
+ case model.MetricTypeHistogram:
+ return Metadata_METRIC_TYPE_HISTOGRAM
+ case model.MetricTypeGaugeHistogram:
+ return Metadata_METRIC_TYPE_GAUGEHISTOGRAM
+ case model.MetricTypeSummary:
+ return Metadata_METRIC_TYPE_SUMMARY
+ case model.MetricTypeInfo:
+ return Metadata_METRIC_TYPE_INFO
+ case model.MetricTypeStateset:
+ return Metadata_METRIC_TYPE_STATESET
+ default:
+ return Metadata_METRIC_TYPE_UNSPECIFIED
+ }
+}
+
+// IsFloatHistogram returns true if the histogram is float.
+func (h Histogram) IsFloatHistogram() bool {
+ _, ok := h.GetCount().(*Histogram_CountFloat)
+ return ok
+}
+
+// ToIntHistogram returns integer Prometheus histogram from the remote implementation
+// of integer histogram. If it's a float histogram, the method returns nil.
+// TODO(bwplotka): Add support for incoming NHCB.
+func (h Histogram) ToIntHistogram() *histogram.Histogram {
+ if h.IsFloatHistogram() {
+ return nil
+ }
+ return &histogram.Histogram{
+ CounterResetHint: histogram.CounterResetHint(h.ResetHint),
+ Schema: h.Schema,
+ ZeroThreshold: h.ZeroThreshold,
+ ZeroCount: h.GetZeroCountInt(),
+ Count: h.GetCountInt(),
+ Sum: h.Sum,
+ PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
+ PositiveBuckets: h.GetPositiveDeltas(),
+ NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
+ NegativeBuckets: h.GetNegativeDeltas(),
+ }
+}
+
+// ToFloatHistogram returns float Prometheus histogram from the remote implementation
+// of float histogram. If the underlying implementation is an integer histogram, a
+// conversion is performed.
+// TODO(bwplotka): Add support for incoming NHCB.
+func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
+ if h.IsFloatHistogram() {
+ return &histogram.FloatHistogram{
+ CounterResetHint: histogram.CounterResetHint(h.ResetHint),
+ Schema: h.Schema,
+ ZeroThreshold: h.ZeroThreshold,
+ ZeroCount: h.GetZeroCountFloat(),
+ Count: h.GetCountFloat(),
+ Sum: h.Sum,
+ PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
+ PositiveBuckets: h.GetPositiveCounts(),
+ NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
+ NegativeBuckets: h.GetNegativeCounts(),
+ }
+ }
+ // Conversion from integer histogram.
+ return &histogram.FloatHistogram{
+ CounterResetHint: histogram.CounterResetHint(h.ResetHint),
+ Schema: h.Schema,
+ ZeroThreshold: h.ZeroThreshold,
+ ZeroCount: float64(h.GetZeroCountInt()),
+ Count: float64(h.GetCountInt()),
+ Sum: h.Sum,
+ PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
+ PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
+ NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
+ NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
+ }
+}
+
+func spansProtoToSpans(s []BucketSpan) []histogram.Span {
+ spans := make([]histogram.Span, len(s))
+ for i := 0; i < len(s); i++ {
+ spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
+ }
+
+ return spans
+}
+
+func deltasToCounts(deltas []int64) []float64 {
+ counts := make([]float64, len(deltas))
+ var cur float64
+ for i, d := range deltas {
+ cur += float64(d)
+ counts[i] = cur
+ }
+ return counts
+}
+
+// FromIntHistogram returns remote Histogram from the integer Histogram.
+func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
+ return Histogram{
+ Count: &Histogram_CountInt{CountInt: h.Count},
+ Sum: h.Sum,
+ Schema: h.Schema,
+ ZeroThreshold: h.ZeroThreshold,
+ ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
+ NegativeSpans: spansToSpansProto(h.NegativeSpans),
+ NegativeDeltas: h.NegativeBuckets,
+ PositiveSpans: spansToSpansProto(h.PositiveSpans),
+ PositiveDeltas: h.PositiveBuckets,
+ ResetHint: Histogram_ResetHint(h.CounterResetHint),
+ Timestamp: timestamp,
+ }
+}
+
+// FromFloatHistogram returns remote Histogram from the float Histogram.
+func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram {
+ return Histogram{
+ Count: &Histogram_CountFloat{CountFloat: fh.Count},
+ Sum: fh.Sum,
+ Schema: fh.Schema,
+ ZeroThreshold: fh.ZeroThreshold,
+ ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
+ NegativeSpans: spansToSpansProto(fh.NegativeSpans),
+ NegativeCounts: fh.NegativeBuckets,
+ PositiveSpans: spansToSpansProto(fh.PositiveSpans),
+ PositiveCounts: fh.PositiveBuckets,
+ ResetHint: Histogram_ResetHint(fh.CounterResetHint),
+ Timestamp: timestamp,
+ }
+}
+
+func spansToSpansProto(s []histogram.Span) []BucketSpan {
+ spans := make([]BucketSpan, len(s))
+ for i := 0; i < len(s); i++ {
+ spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
+ }
+
+ return spans
+}
+
+func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, symbols []string) exemplar.Exemplar {
+ timestamp := m.Timestamp
+
+ return exemplar.Exemplar{
+ Labels: desymbolizeLabels(b, m.LabelsRefs, symbols),
+ Value: m.Value,
+ Ts: timestamp,
+ HasTs: timestamp != 0,
+ }
+}
diff --git a/prompb/io/prometheus/write/v2/custom.go b/prompb/io/prometheus/write/v2/custom.go
new file mode 100644
index 000000000..3aa778eb6
--- /dev/null
+++ b/prompb/io/prometheus/write/v2/custom.go
@@ -0,0 +1,165 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package writev2
+
+import (
+ "slices"
+)
+
+func (m Sample) T() int64 { return m.Timestamp }
+func (m Sample) V() float64 { return m.Value }
+
+func (m *Request) OptimizedMarshal(dst []byte) ([]byte, error) {
+ siz := m.Size()
+ if cap(dst) < siz {
+ dst = make([]byte, siz)
+ }
+ n, err := m.OptimizedMarshalToSizedBuffer(dst[:siz])
+ if err != nil {
+ return nil, err
+ }
+ return dst[:n], nil
+}
+
+// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer,
+// but calls OptimizedMarshalToSizedBuffer on the timeseries.
+func (m *Request) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Timeseries) > 0 {
+ for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Timeseries[iNdEx].OptimizedMarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if len(m.Symbols) > 0 {
+ for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Symbols[iNdEx])
+ copy(dAtA[i:], m.Symbols[iNdEx])
+ i = encodeVarintTypes(dAtA, i, uint64(len(m.Symbols[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer,
+// but marshals m.LabelsRefs in place without extra allocations.
+func (m *TimeSeries) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.CreatedTimestamp != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp))
+ i--
+ dAtA[i] = 0x30
+ }
+ {
+ size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ if len(m.Histograms) > 0 {
+ for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Exemplars) > 0 {
+ for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.Samples) > 0 {
+ for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+
+ if len(m.LabelsRefs) > 0 {
+ // This is the trick: encode the varints in reverse order to make it easier
+ // to do it in place. Then reverse the whole thing.
+ var j10 int
+ start := i
+ for _, num := range m.LabelsRefs {
+ for num >= 1<<7 {
+ dAtA[i-1] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ i--
+ j10++
+ }
+ dAtA[i-1] = uint8(num)
+ i--
+ j10++
+ }
+ slices.Reverse(dAtA[i:start])
+ // --- end of trick
+
+ i = encodeVarintTypes(dAtA, i, uint64(j10))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
diff --git a/prompb/io/prometheus/write/v2/custom_test.go b/prompb/io/prometheus/write/v2/custom_test.go
new file mode 100644
index 000000000..139cbfb22
--- /dev/null
+++ b/prompb/io/prometheus/write/v2/custom_test.go
@@ -0,0 +1,97 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package writev2
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestOptimizedMarshal(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ m *Request
+ }{
+ {
+ name: "empty",
+ m: &Request{},
+ },
+ {
+ name: "simple",
+ m: &Request{
+ Timeseries: []TimeSeries{
+ {
+ LabelsRefs: []uint32{
+ 0, 1,
+ 2, 3,
+ 4, 5,
+ 6, 7,
+ 8, 9,
+ 10, 11,
+ 12, 13,
+ 14, 15,
+ },
+
+ Samples: []Sample{{Value: 1, Timestamp: 0}},
+ Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 1, Timestamp: 0}},
+ Histograms: nil,
+ },
+ {
+ LabelsRefs: []uint32{
+ 0, 1,
+ 2, 3,
+ 4, 5,
+ 6, 7,
+ 8, 9,
+ 10, 11,
+ 12, 13,
+ 14, 15,
+ },
+ Samples: []Sample{{Value: 2, Timestamp: 1}},
+ Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 2, Timestamp: 1}},
+ Histograms: nil,
+ },
+ },
+ Symbols: []string{
+ "a", "b",
+ "c", "d",
+ "e", "f",
+ "g", "h",
+ "i", "j",
+ "k", "l",
+ "m", "n",
+ "o", "p",
+ },
+ },
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ // Keep the slice allocated to mimic what std Marshal
+ // would give to sized Marshal.
+ got := make([]byte, 0)
+
+ // Should be the same as the standard marshal.
+ expected, err := tt.m.Marshal()
+ require.NoError(t, err)
+ got, err = tt.m.OptimizedMarshal(got)
+ require.NoError(t, err)
+ require.Equal(t, expected, got)
+
+ // Unmarshal should work too.
+ m := &Request{}
+ require.NoError(t, m.Unmarshal(got))
+ require.Equal(t, tt.m, m)
+ })
+ }
+}
diff --git a/prompb/io/prometheus/write/v2/symbols.go b/prompb/io/prometheus/write/v2/symbols.go
new file mode 100644
index 000000000..f316a976f
--- /dev/null
+++ b/prompb/io/prometheus/write/v2/symbols.go
@@ -0,0 +1,83 @@
+// Copyright 2024 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package writev2
+
+import "github.com/prometheus/prometheus/model/labels"
+
+// SymbolsTable implements table for easy symbol use.
+type SymbolsTable struct {
+ strings []string
+ symbolsMap map[string]uint32
+}
+
+// NewSymbolTable returns a symbol table.
+func NewSymbolTable() SymbolsTable {
+ return SymbolsTable{
+ // Empty string is required as a first element.
+ symbolsMap: map[string]uint32{"": 0},
+ strings: []string{""},
+ }
+}
+
+// Symbolize adds (if not added before) a string to the symbols table,
+// while returning its reference number.
+func (t *SymbolsTable) Symbolize(str string) uint32 {
+ if ref, ok := t.symbolsMap[str]; ok {
+ return ref
+ }
+ ref := uint32(len(t.strings))
+ t.strings = append(t.strings, str)
+ t.symbolsMap[str] = ref
+ return ref
+}
+
+// SymbolizeLabels symbolize Prometheus labels.
+func (t *SymbolsTable) SymbolizeLabels(lbls labels.Labels, buf []uint32) []uint32 {
+ result := buf[:0]
+ lbls.Range(func(l labels.Label) {
+ off := t.Symbolize(l.Name)
+ result = append(result, off)
+ off = t.Symbolize(l.Value)
+ result = append(result, off)
+ })
+ return result
+}
+
+// Symbols returns computes symbols table to put in e.g. Request.Symbols.
+// As per spec, order does not matter.
+func (t *SymbolsTable) Symbols() []string {
+ return t.strings
+}
+
+// Reset clears symbols table.
+func (t *SymbolsTable) Reset() {
+ // NOTE: Make sure to keep empty symbol.
+ t.strings = t.strings[:1]
+ for k := range t.symbolsMap {
+ if k == "" {
+ continue
+ }
+ delete(t.symbolsMap, k)
+ }
+}
+
+// desymbolizeLabels decodes label references, with given symbols to labels.
+func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels {
+ b.Reset()
+ for i := 0; i < len(labelRefs); i += 2 {
+ b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]])
+ }
+ b.Sort()
+ return b.Labels()
+}
diff --git a/prompb/io/prometheus/write/v2/symbols_test.go b/prompb/io/prometheus/write/v2/symbols_test.go
new file mode 100644
index 000000000..3d852e88f
--- /dev/null
+++ b/prompb/io/prometheus/write/v2/symbols_test.go
@@ -0,0 +1,60 @@
+// Copyright 2024 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package writev2
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+func TestSymbolsTable(t *testing.T) {
+ s := NewSymbolTable()
+ require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist")
+ require.Equal(t, uint32(0), s.Symbolize(""))
+ require.Equal(t, []string{""}, s.Symbols())
+
+ require.Equal(t, uint32(1), s.Symbolize("abc"))
+ require.Equal(t, []string{"", "abc"}, s.Symbols())
+
+ require.Equal(t, uint32(2), s.Symbolize("__name__"))
+ require.Equal(t, []string{"", "abc", "__name__"}, s.Symbols())
+
+ require.Equal(t, uint32(3), s.Symbolize("foo"))
+ require.Equal(t, []string{"", "abc", "__name__", "foo"}, s.Symbols())
+
+ s.Reset()
+ require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist")
+ require.Equal(t, uint32(0), s.Symbolize(""))
+
+ require.Equal(t, uint32(1), s.Symbolize("__name__"))
+ require.Equal(t, []string{"", "__name__"}, s.Symbols())
+
+ require.Equal(t, uint32(2), s.Symbolize("abc"))
+ require.Equal(t, []string{"", "__name__", "abc"}, s.Symbols())
+
+ ls := labels.FromStrings("__name__", "qwer", "zxcv", "1234")
+ encoded := s.SymbolizeLabels(ls, nil)
+ require.Equal(t, []uint32{1, 3, 4, 5}, encoded)
+ b := labels.NewScratchBuilder(len(encoded))
+ decoded := desymbolizeLabels(&b, encoded, s.Symbols())
+ require.Equal(t, ls, decoded)
+
+ // Different buf.
+ ls = labels.FromStrings("__name__", "qwer", "zxcv2222", "1234")
+ encoded = s.SymbolizeLabels(ls, []uint32{1, 3, 4, 5})
+ require.Equal(t, []uint32{1, 3, 6, 5}, encoded)
+}
diff --git a/prompb/io/prometheus/write/v2/types.pb.go b/prompb/io/prometheus/write/v2/types.pb.go
new file mode 100644
index 000000000..d6ea8398f
--- /dev/null
+++ b/prompb/io/prometheus/write/v2/types.pb.go
@@ -0,0 +1,3241 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: io/prometheus/write/v2/types.proto
+
+package writev2
+
+import (
+ encoding_binary "encoding/binary"
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+type Metadata_MetricType int32
+
+const (
+ Metadata_METRIC_TYPE_UNSPECIFIED Metadata_MetricType = 0
+ Metadata_METRIC_TYPE_COUNTER Metadata_MetricType = 1
+ Metadata_METRIC_TYPE_GAUGE Metadata_MetricType = 2
+ Metadata_METRIC_TYPE_HISTOGRAM Metadata_MetricType = 3
+ Metadata_METRIC_TYPE_GAUGEHISTOGRAM Metadata_MetricType = 4
+ Metadata_METRIC_TYPE_SUMMARY Metadata_MetricType = 5
+ Metadata_METRIC_TYPE_INFO Metadata_MetricType = 6
+ Metadata_METRIC_TYPE_STATESET Metadata_MetricType = 7
+)
+
+var Metadata_MetricType_name = map[int32]string{
+ 0: "METRIC_TYPE_UNSPECIFIED",
+ 1: "METRIC_TYPE_COUNTER",
+ 2: "METRIC_TYPE_GAUGE",
+ 3: "METRIC_TYPE_HISTOGRAM",
+ 4: "METRIC_TYPE_GAUGEHISTOGRAM",
+ 5: "METRIC_TYPE_SUMMARY",
+ 6: "METRIC_TYPE_INFO",
+ 7: "METRIC_TYPE_STATESET",
+}
+
+var Metadata_MetricType_value = map[string]int32{
+ "METRIC_TYPE_UNSPECIFIED": 0,
+ "METRIC_TYPE_COUNTER": 1,
+ "METRIC_TYPE_GAUGE": 2,
+ "METRIC_TYPE_HISTOGRAM": 3,
+ "METRIC_TYPE_GAUGEHISTOGRAM": 4,
+ "METRIC_TYPE_SUMMARY": 5,
+ "METRIC_TYPE_INFO": 6,
+ "METRIC_TYPE_STATESET": 7,
+}
+
+func (x Metadata_MetricType) String() string {
+ return proto.EnumName(Metadata_MetricType_name, int32(x))
+}
+
+func (Metadata_MetricType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_f139519efd9fa8d7, []int{4, 0}
+}
+
+type Histogram_ResetHint int32
+
+const (
+ Histogram_RESET_HINT_UNSPECIFIED Histogram_ResetHint = 0
+ Histogram_RESET_HINT_YES Histogram_ResetHint = 1
+ Histogram_RESET_HINT_NO Histogram_ResetHint = 2
+ Histogram_RESET_HINT_GAUGE Histogram_ResetHint = 3
+)
+
+var Histogram_ResetHint_name = map[int32]string{
+ 0: "RESET_HINT_UNSPECIFIED",
+ 1: "RESET_HINT_YES",
+ 2: "RESET_HINT_NO",
+ 3: "RESET_HINT_GAUGE",
+}
+
+var Histogram_ResetHint_value = map[string]int32{
+ "RESET_HINT_UNSPECIFIED": 0,
+ "RESET_HINT_YES": 1,
+ "RESET_HINT_NO": 2,
+ "RESET_HINT_GAUGE": 3,
+}
+
+func (x Histogram_ResetHint) String() string {
+ return proto.EnumName(Histogram_ResetHint_name, int32(x))
+}
+
+func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_f139519efd9fa8d7, []int{5, 0}
+}
+
+// Request represents a request to write the given timeseries to a remote destination.
+// This message was introduced in the Remote Write 2.0 specification:
+// https://prometheus.io/docs/concepts/remote_write_spec_2_0/
+//
+// The canonical Content-Type request header value for this message is
+// "application/x-protobuf;proto=io.prometheus.write.v2.Request"
+//
+// NOTE: gogoproto options might change in future for this file, they
+// are not part of the spec proto (they only modify the generated Go code, not
+// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908
+type Request struct {
+ // symbols contains a de-duplicated array of string elements used for various
+ // items in a Request message, like labels and metadata items. For the sender's convenience
+ // around empty values for optional fields like unit_ref, symbols array MUST start with
+ // empty string.
+ //
+ // To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you
+ // need to lookup the actual string by index from symbols array. The order of
+ // strings is up to the sender. The receiver should not assume any particular encoding.
+ Symbols []string `protobuf:"bytes,4,rep,name=symbols,proto3" json:"symbols,omitempty"`
+ // timeseries represents an array of distinct series with 0 or more samples.
+ Timeseries []TimeSeries `protobuf:"bytes,5,rep,name=timeseries,proto3" json:"timeseries"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+func (*Request) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f139519efd9fa8d7, []int{0}
+}
+func (m *Request) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Request.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Request) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Request.Merge(m, src)
+}
+func (m *Request) XXX_Size() int {
+ return m.Size()
+}
+func (m *Request) XXX_DiscardUnknown() {
+ xxx_messageInfo_Request.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Request proto.InternalMessageInfo
+
+func (m *Request) GetSymbols() []string {
+ if m != nil {
+ return m.Symbols
+ }
+ return nil
+}
+
+func (m *Request) GetTimeseries() []TimeSeries {
+ if m != nil {
+ return m.Timeseries
+ }
+ return nil
+}
+
+// TimeSeries represents a single series.
+type TimeSeries struct {
+ // labels_refs is a list of label name-value pair references, encoded
+ // as indices to the Request.symbols array. This list's length is always
+ // a multiple of two, and the underlying labels should be sorted lexicographically.
+ //
+ // Note that there might be multiple TimeSeries objects in the same
+ // Requests with the same labels e.g. for different exemplars, metadata
+ // or created timestamp.
+ LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"`
+ // Timeseries messages can either specify samples or (native) histogram samples
+ // (histogram field), but not both. For a typical sender (real-time metric
+ // streaming), in healthy cases, there will be only one sample or histogram.
+ //
+ // Samples and histograms are sorted by timestamp (older first).
+ Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
+ Histograms []Histogram `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms"`
+ // exemplars represents an optional set of exemplars attached to this series' samples.
+ Exemplars []Exemplar `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars"`
+ // metadata represents the metadata associated with the given series' samples.
+ Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"`
+ // created_timestamp represents an optional created timestamp associated with
+ // this series' samples in ms format, typically for counter or histogram type
+ // metrics. Created timestamp represents the time when the counter started
+ // counting (sometimes referred to as start timestamp), which can increase
+ // the accuracy of query results.
+ //
+ // Note that some receivers might require this and in return fail to
+ // ingest such samples within the Request.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ //
+ // Note that the "optional" keyword is omitted due to
+ // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
+ // Zero value means value not set. If you need to use exactly zero value for
+ // the timestamp, use 1 millisecond before or after.
+ CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TimeSeries) Reset() { *m = TimeSeries{} }
+func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
+func (*TimeSeries) ProtoMessage() {}
+func (*TimeSeries) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f139519efd9fa8d7, []int{1}
+}
+func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *TimeSeries) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimeSeries.Merge(m, src)
+}
+func (m *TimeSeries) XXX_Size() int {
+ return m.Size()
+}
+func (m *TimeSeries) XXX_DiscardUnknown() {
+ xxx_messageInfo_TimeSeries.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
+
+func (m *TimeSeries) GetLabelsRefs() []uint32 {
+ if m != nil {
+ return m.LabelsRefs
+ }
+ return nil
+}
+
+func (m *TimeSeries) GetSamples() []Sample {
+ if m != nil {
+ return m.Samples
+ }
+ return nil
+}
+
+func (m *TimeSeries) GetHistograms() []Histogram {
+ if m != nil {
+ return m.Histograms
+ }
+ return nil
+}
+
+func (m *TimeSeries) GetExemplars() []Exemplar {
+ if m != nil {
+ return m.Exemplars
+ }
+ return nil
+}
+
+func (m *TimeSeries) GetMetadata() Metadata {
+ if m != nil {
+ return m.Metadata
+ }
+ return Metadata{}
+}
+
+func (m *TimeSeries) GetCreatedTimestamp() int64 {
+ if m != nil {
+ return m.CreatedTimestamp
+ }
+ return 0
+}
+
+// Exemplar is an additional information attached to some series' samples.
+// It is typically used to attach an example trace or request ID associated with
+// the metric changes.
+type Exemplar struct {
+ // labels_refs is an optional list of label name-value pair references, encoded
+ // as indices to the Request.symbols array. This list's len is always
+ // a multiple of 2, and the underlying labels should be sorted lexicographically.
+ // If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
+ LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"`
+ // value represents an exact example value. This can be useful when the exemplar
+ // is attached to a histogram, which only gives an estimated value through buckets.
+ Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
+ // timestamp represents an optional timestamp of the sample in ms.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ //
+ // Note that the "optional" keyword is omitted due to
+ // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
+ // Zero value means value not set. If you need to use exactly zero value for
+ // the timestamp, use 1 millisecond before or after.
+ Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Exemplar) Reset() { *m = Exemplar{} }
+func (m *Exemplar) String() string { return proto.CompactTextString(m) }
+func (*Exemplar) ProtoMessage() {}
+func (*Exemplar) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f139519efd9fa8d7, []int{2}
+}
+func (m *Exemplar) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Exemplar) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Exemplar.Merge(m, src)
+}
+func (m *Exemplar) XXX_Size() int {
+ return m.Size()
+}
+func (m *Exemplar) XXX_DiscardUnknown() {
+ xxx_messageInfo_Exemplar.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Exemplar proto.InternalMessageInfo
+
+func (m *Exemplar) GetLabelsRefs() []uint32 {
+ if m != nil {
+ return m.LabelsRefs
+ }
+ return nil
+}
+
+func (m *Exemplar) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+func (m *Exemplar) GetTimestamp() int64 {
+ if m != nil {
+ return m.Timestamp
+ }
+ return 0
+}
+
+// Sample represents series sample.
+type Sample struct {
+ // value of the sample.
+ Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+ // timestamp represents timestamp of the sample in ms.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Sample) Reset() { *m = Sample{} }
+func (m *Sample) String() string { return proto.CompactTextString(m) }
+func (*Sample) ProtoMessage() {}
+func (*Sample) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f139519efd9fa8d7, []int{3}
+}
+func (m *Sample) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Sample.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Sample) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Sample.Merge(m, src)
+}
+func (m *Sample) XXX_Size() int {
+ return m.Size()
+}
+func (m *Sample) XXX_DiscardUnknown() {
+ xxx_messageInfo_Sample.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Sample proto.InternalMessageInfo
+
+func (m *Sample) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+func (m *Sample) GetTimestamp() int64 {
+ if m != nil {
+ return m.Timestamp
+ }
+ return 0
+}
+
+// Metadata represents the metadata associated with the given series' samples.
+type Metadata struct {
+ Type Metadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=io.prometheus.write.v2.Metadata_MetricType" json:"type,omitempty"`
+ // help_ref is a reference to the Request.symbols array representing help
+ // text for the metric. Help is optional, reference should point to an empty string in
+ // such a case.
+ HelpRef uint32 `protobuf:"varint,3,opt,name=help_ref,json=helpRef,proto3" json:"help_ref,omitempty"`
+ // unit_ref is a reference to the Request.symbols array representing a unit
+ // for the metric. Unit is optional, reference should point to an empty string in
+ // such a case.
+ UnitRef uint32 `protobuf:"varint,4,opt,name=unit_ref,json=unitRef,proto3" json:"unit_ref,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Metadata) Reset() { *m = Metadata{} }
+func (m *Metadata) String() string { return proto.CompactTextString(m) }
+func (*Metadata) ProtoMessage() {}
+func (*Metadata) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f139519efd9fa8d7, []int{4}
+}
+func (m *Metadata) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Metadata) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Metadata.Merge(m, src)
+}
+func (m *Metadata) XXX_Size() int {
+ return m.Size()
+}
+func (m *Metadata) XXX_DiscardUnknown() {
+ xxx_messageInfo_Metadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metadata proto.InternalMessageInfo
+
+func (m *Metadata) GetType() Metadata_MetricType {
+ if m != nil {
+ return m.Type
+ }
+ return Metadata_METRIC_TYPE_UNSPECIFIED
+}
+
+func (m *Metadata) GetHelpRef() uint32 {
+ if m != nil {
+ return m.HelpRef
+ }
+ return 0
+}
+
+func (m *Metadata) GetUnitRef() uint32 {
+ if m != nil {
+ return m.UnitRef
+ }
+ return 0
+}
+
+// A native histogram, also known as a sparse histogram.
+// Original design doc:
+// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
+// The appendix of this design doc also explains the concept of float
+// histograms. This Histogram message can represent both, the usual
+// integer histogram as well as a float histogram.
+type Histogram struct {
+ // Types that are valid to be assigned to Count:
+ //
+ // *Histogram_CountInt
+ // *Histogram_CountFloat
+ Count isHistogram_Count `protobuf_oneof:"count"`
+ Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"`
+ // The schema defines the bucket schema. Currently, valid numbers
+ // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be
+ // added in future for new bucketing layouts.
+ //
+ // The schema equal to -53 means custom buckets. See
+ // custom_values field description for more details.
+ //
+ // Values between -4 and 8 represent base-2 bucket schema, where 1
+ // is a bucket boundary in each case, and then each power of two is
+ // divided into 2^n (n is schema value) logarithmic buckets. Or in other words,
+ // each bucket boundary is the previous boundary times 2^(2^-n).
+ Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"`
+ ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
+ // Types that are valid to be assigned to ZeroCount:
+ //
+ // *Histogram_ZeroCountInt
+ // *Histogram_ZeroCountFloat
+ ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"`
+ // Negative Buckets.
+ NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"`
+ // Use either "negative_deltas" or "negative_counts", the former for
+ // regular histograms with integer counts, the latter for
+ // float histograms.
+ NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"`
+ NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"`
+ // Positive Buckets.
+ //
+ // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows:
+ // * The span offset+length points to an the index of the custom_values array
+ // or +Inf if pointing to the len of the array.
+ // * The counts and deltas have the same meaning as for exponential histograms.
+ PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"`
+ // Use either "positive_deltas" or "positive_counts", the former for
+ // regular histograms with integer counts, the latter for
+ // float histograms.
+ PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"`
+ PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"`
+ ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=io.prometheus.write.v2.Histogram_ResetHint" json:"reset_hint,omitempty"`
+ // timestamp represents timestamp of the sample in ms.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ // custom_values is an additional field used by non-exponential bucketing layouts.
+ //
+ // For custom buckets (-53 schema value) custom_values specify monotonically
+ // increasing upper inclusive boundaries for the bucket counts with arbitrary
+ // widths for this histogram. In other words, custom_values represents custom,
+ // explicit bucketing that could have been converted from the classic histograms.
+ //
+ // Those bounds are then referenced by spans in positive_spans with corresponding positive
+ // counts of deltas (refer to positive_spans for more details). This way we can
+ // have encode sparse histograms with custom bucketing (many buckets are often
+ // not used).
+ //
+ // Note that for custom bounds, even negative observations are placed in the positive
+ // counts to simplify the implementation and avoid ambiguity of where to place
+ // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and
+ // the zero bucket are unused, if the schema indicates custom bucketing.
+ //
+ // For each upper boundary the previous boundary represent the lower exclusive
+ // boundary for that bucket. The first element is the upper inclusive boundary
+ // for the first bucket, which implicitly has a lower inclusive bound of -Inf.
+ // This is similar to "le" label semantics on classic histograms. You may add a
+ // bucket with an upper bound of 0 to make sure that you really have no negative
+ // observations, but in practice, native histogram rendering will show both with
+ // or without first upper boundary 0 and no negative counts as the same case.
+ //
+ // The last element is not only the upper inclusive bound of the last regular
+ // bucket, but implicitly the lower exclusive bound of the +Inf bucket.
+ CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+func (*Histogram) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f139519efd9fa8d7, []int{5}
+}
+func (m *Histogram) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Histogram) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Histogram.Merge(m, src)
+}
+func (m *Histogram) XXX_Size() int {
+ return m.Size()
+}
+func (m *Histogram) XXX_DiscardUnknown() {
+ xxx_messageInfo_Histogram.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Histogram proto.InternalMessageInfo
+
+type isHistogram_Count interface {
+ isHistogram_Count()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+type isHistogram_ZeroCount interface {
+ isHistogram_ZeroCount()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type Histogram_CountInt struct {
+ CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"`
+}
+type Histogram_CountFloat struct {
+ CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"`
+}
+type Histogram_ZeroCountInt struct {
+ ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"`
+}
+type Histogram_ZeroCountFloat struct {
+ ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"`
+}
+
+func (*Histogram_CountInt) isHistogram_Count() {}
+func (*Histogram_CountFloat) isHistogram_Count() {}
+func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {}
+func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {}
+
+func (m *Histogram) GetCount() isHistogram_Count {
+ if m != nil {
+ return m.Count
+ }
+ return nil
+}
+func (m *Histogram) GetZeroCount() isHistogram_ZeroCount {
+ if m != nil {
+ return m.ZeroCount
+ }
+ return nil
+}
+
+func (m *Histogram) GetCountInt() uint64 {
+ if x, ok := m.GetCount().(*Histogram_CountInt); ok {
+ return x.CountInt
+ }
+ return 0
+}
+
+func (m *Histogram) GetCountFloat() float64 {
+ if x, ok := m.GetCount().(*Histogram_CountFloat); ok {
+ return x.CountFloat
+ }
+ return 0
+}
+
+func (m *Histogram) GetSum() float64 {
+ if m != nil {
+ return m.Sum
+ }
+ return 0
+}
+
+func (m *Histogram) GetSchema() int32 {
+ if m != nil {
+ return m.Schema
+ }
+ return 0
+}
+
+func (m *Histogram) GetZeroThreshold() float64 {
+ if m != nil {
+ return m.ZeroThreshold
+ }
+ return 0
+}
+
+func (m *Histogram) GetZeroCountInt() uint64 {
+ if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok {
+ return x.ZeroCountInt
+ }
+ return 0
+}
+
+func (m *Histogram) GetZeroCountFloat() float64 {
+ if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok {
+ return x.ZeroCountFloat
+ }
+ return 0
+}
+
+func (m *Histogram) GetNegativeSpans() []BucketSpan {
+ if m != nil {
+ return m.NegativeSpans
+ }
+ return nil
+}
+
+func (m *Histogram) GetNegativeDeltas() []int64 {
+ if m != nil {
+ return m.NegativeDeltas
+ }
+ return nil
+}
+
+func (m *Histogram) GetNegativeCounts() []float64 {
+ if m != nil {
+ return m.NegativeCounts
+ }
+ return nil
+}
+
+func (m *Histogram) GetPositiveSpans() []BucketSpan {
+ if m != nil {
+ return m.PositiveSpans
+ }
+ return nil
+}
+
+func (m *Histogram) GetPositiveDeltas() []int64 {
+ if m != nil {
+ return m.PositiveDeltas
+ }
+ return nil
+}
+
+func (m *Histogram) GetPositiveCounts() []float64 {
+ if m != nil {
+ return m.PositiveCounts
+ }
+ return nil
+}
+
+func (m *Histogram) GetResetHint() Histogram_ResetHint {
+ if m != nil {
+ return m.ResetHint
+ }
+ return Histogram_RESET_HINT_UNSPECIFIED
+}
+
+func (m *Histogram) GetTimestamp() int64 {
+ if m != nil {
+ return m.Timestamp
+ }
+ return 0
+}
+
+func (m *Histogram) GetCustomValues() []float64 {
+ if m != nil {
+ return m.CustomValues
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Histogram) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*Histogram_CountInt)(nil),
+ (*Histogram_CountFloat)(nil),
+ (*Histogram_ZeroCountInt)(nil),
+ (*Histogram_ZeroCountFloat)(nil),
+ }
+}
+
+// A BucketSpan defines a number of consecutive buckets with their
+// offset. Logically, it would be more straightforward to include the
+// bucket counts in the Span. However, the protobuf representation is
+// more compact in the way the data is structured here (with all the
+// buckets in a single array separate from the Spans).
+type BucketSpan struct {
+ Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
+ Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BucketSpan) Reset() { *m = BucketSpan{} }
+func (m *BucketSpan) String() string { return proto.CompactTextString(m) }
+func (*BucketSpan) ProtoMessage() {}
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f139519efd9fa8d7, []int{6}
+}
+func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *BucketSpan) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BucketSpan.Merge(m, src)
+}
+func (m *BucketSpan) XXX_Size() int {
+ return m.Size()
+}
+func (m *BucketSpan) XXX_DiscardUnknown() {
+ xxx_messageInfo_BucketSpan.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
+
+func (m *BucketSpan) GetOffset() int32 {
+ if m != nil {
+ return m.Offset
+ }
+ return 0
+}
+
+func (m *BucketSpan) GetLength() uint32 {
+ if m != nil {
+ return m.Length
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("io.prometheus.write.v2.Metadata_MetricType", Metadata_MetricType_name, Metadata_MetricType_value)
+ proto.RegisterEnum("io.prometheus.write.v2.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value)
+ proto.RegisterType((*Request)(nil), "io.prometheus.write.v2.Request")
+ proto.RegisterType((*TimeSeries)(nil), "io.prometheus.write.v2.TimeSeries")
+ proto.RegisterType((*Exemplar)(nil), "io.prometheus.write.v2.Exemplar")
+ proto.RegisterType((*Sample)(nil), "io.prometheus.write.v2.Sample")
+ proto.RegisterType((*Metadata)(nil), "io.prometheus.write.v2.Metadata")
+ proto.RegisterType((*Histogram)(nil), "io.prometheus.write.v2.Histogram")
+ proto.RegisterType((*BucketSpan)(nil), "io.prometheus.write.v2.BucketSpan")
+}
+
+func init() {
+ proto.RegisterFile("io/prometheus/write/v2/types.proto", fileDescriptor_f139519efd9fa8d7)
+}
+
+var fileDescriptor_f139519efd9fa8d7 = []byte{
+ // 926 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x5d, 0x6f, 0xe3, 0x44,
+ 0x14, 0xed, 0xc4, 0x69, 0x3e, 0x6e, 0x9a, 0xac, 0x33, 0xb4, 0x5d, 0x6f, 0x81, 0x6c, 0xd6, 0x08,
+ 0x88, 0x58, 0x29, 0x91, 0xc2, 0xeb, 0x0a, 0xd4, 0xb4, 0x6e, 0x93, 0x95, 0x92, 0xac, 0x26, 0x2e,
+ 0x52, 0x79, 0xb1, 0xdc, 0x64, 0x92, 0x58, 0xd8, 0xb1, 0xf1, 0x4c, 0x02, 0xe5, 0xf7, 0xf1, 0xb0,
+ 0x8f, 0xfc, 0x01, 0x10, 0xf4, 0x9d, 0xff, 0x80, 0x66, 0xfc, 0xd9, 0x42, 0xbb, 0xe2, 0x6d, 0xe6,
+ 0xdc, 0x73, 0xee, 0x3d, 0xb9, 0xbe, 0x77, 0x02, 0xba, 0xe3, 0xf7, 0x82, 0xd0, 0xf7, 0x28, 0x5f,
+ 0xd3, 0x2d, 0xeb, 0xfd, 0x14, 0x3a, 0x9c, 0xf6, 0x76, 0xfd, 0x1e, 0xbf, 0x0d, 0x28, 0xeb, 0x06,
+ 0xa1, 0xcf, 0x7d, 0x7c, 0xec, 0xf8, 0xdd, 0x8c, 0xd3, 0x95, 0x9c, 0xee, 0xae, 0x7f, 0x72, 0xb8,
+ 0xf2, 0x57, 0xbe, 0xa4, 0xf4, 0xc4, 0x29, 0x62, 0xeb, 0x0c, 0xca, 0x84, 0xfe, 0xb8, 0xa5, 0x8c,
+ 0x63, 0x0d, 0xca, 0xec, 0xd6, 0xbb, 0xf1, 0x5d, 0xa6, 0x15, 0xdb, 0x4a, 0xa7, 0x4a, 0x92, 0x2b,
+ 0x1e, 0x02, 0x70, 0xc7, 0xa3, 0x8c, 0x86, 0x0e, 0x65, 0xda, 0x7e, 0x5b, 0xe9, 0xd4, 0xfa, 0x7a,
+ 0xf7, 0xbf, 0xeb, 0x74, 0x4d, 0xc7, 0xa3, 0x33, 0xc9, 0x1c, 0x14, 0xdf, 0xff, 0xf1, 0x72, 0x8f,
+ 0xe4, 0xb4, 0x6f, 0x8b, 0x15, 0xa4, 0x16, 0xf5, 0xbf, 0x0b, 0x00, 0x19, 0x0d, 0xbf, 0x84, 0x9a,
+ 0x6b, 0xdf, 0x50, 0x97, 0x59, 0x21, 0x5d, 0x32, 0x0d, 0xb5, 0x95, 0x4e, 0x9d, 0x40, 0x04, 0x11,
+ 0xba, 0x64, 0xf8, 0x1b, 0x28, 0x33, 0xdb, 0x0b, 0x5c, 0xca, 0xb4, 0x82, 0x2c, 0xde, 0x7a, 0xac,
+ 0xf8, 0x4c, 0xd2, 0xe2, 0xc2, 0x89, 0x08, 0x5f, 0x02, 0xac, 0x1d, 0xc6, 0xfd, 0x55, 0x68, 0x7b,
+ 0x4c, 0x53, 0x64, 0x8a, 0x57, 0x8f, 0xa5, 0x18, 0x26, 0xcc, 0xc4, 0x7e, 0x26, 0xc5, 0xe7, 0x50,
+ 0xa5, 0x3f, 0x53, 0x2f, 0x70, 0xed, 0x30, 0x6a, 0x52, 0xad, 0xdf, 0x7e, 0x2c, 0x8f, 0x11, 0x13,
+ 0xe3, 0x34, 0x99, 0x10, 0x0f, 0xa0, 0xe2, 0x51, 0x6e, 0x2f, 0x6c, 0x6e, 0x6b, 0xfb, 0x6d, 0xf4,
+ 0x54, 0x92, 0x71, 0xcc, 0x8b, 0x93, 0xa4, 0x3a, 0xfc, 0x1a, 0x9a, 0xf3, 0x90, 0xda, 0x9c, 0x2e,
+ 0x2c, 0xd9, 0x5e, 0x6e, 0x7b, 0x81, 0x56, 0x6a, 0xa3, 0x8e, 0x42, 0xd4, 0x38, 0x60, 0x26, 0xb8,
+ 0x6e, 0x41, 0x25, 0x71, 0xf3, 0xe1, 0x66, 0x1f, 0xc2, 0xfe, 0xce, 0x76, 0xb7, 0x54, 0x2b, 0xb4,
+ 0x51, 0x07, 0x91, 0xe8, 0x82, 0x3f, 0x81, 0x6a, 0x56, 0x47, 0x91, 0x75, 0x32, 0x40, 0x7f, 0x03,
+ 0xa5, 0xa8, 0xf3, 0x99, 0x1a, 0x3d, 0xaa, 0x2e, 0x3c, 0x54, 0xff, 0x55, 0x80, 0x4a, 0xf2, 0x43,
+ 0xf1, 0xb7, 0x50, 0x14, 0xd3, 0x2c, 0xf5, 0x8d, 0xfe, 0xeb, 0x0f, 0x35, 0x46, 0x1c, 0x42, 0x67,
+ 0x6e, 0xde, 0x06, 0x94, 0x48, 0x21, 0x7e, 0x01, 0x95, 0x35, 0x75, 0x03, 0xf1, 0xf3, 0xa4, 0xd1,
+ 0x3a, 0x29, 0x8b, 0x3b, 0xa1, 0x4b, 0x11, 0xda, 0x6e, 0x1c, 0x2e, 0x43, 0xc5, 0x28, 0x24, 0xee,
+ 0x84, 0x2e, 0xf5, 0xdf, 0x11, 0x40, 0x96, 0x0a, 0x7f, 0x0c, 0xcf, 0xc7, 0x86, 0x49, 0x46, 0x67,
+ 0x96, 0x79, 0xfd, 0xce, 0xb0, 0xae, 0x26, 0xb3, 0x77, 0xc6, 0xd9, 0xe8, 0x62, 0x64, 0x9c, 0xab,
+ 0x7b, 0xf8, 0x39, 0x7c, 0x94, 0x0f, 0x9e, 0x4d, 0xaf, 0x26, 0xa6, 0x41, 0x54, 0x84, 0x8f, 0xa0,
+ 0x99, 0x0f, 0x5c, 0x9e, 0x5e, 0x5d, 0x1a, 0x6a, 0x01, 0xbf, 0x80, 0xa3, 0x3c, 0x3c, 0x1c, 0xcd,
+ 0xcc, 0xe9, 0x25, 0x39, 0x1d, 0xab, 0x0a, 0x6e, 0xc1, 0xc9, 0xbf, 0x14, 0x59, 0xbc, 0xf8, 0xb0,
+ 0xd4, 0xec, 0x6a, 0x3c, 0x3e, 0x25, 0xd7, 0xea, 0x3e, 0x3e, 0x04, 0x35, 0x1f, 0x18, 0x4d, 0x2e,
+ 0xa6, 0x6a, 0x09, 0x6b, 0x70, 0x78, 0x8f, 0x6e, 0x9e, 0x9a, 0xc6, 0xcc, 0x30, 0xd5, 0xb2, 0xfe,
+ 0x6b, 0x09, 0xaa, 0xe9, 0x64, 0xe3, 0x4f, 0xa1, 0x3a, 0xf7, 0xb7, 0x1b, 0x6e, 0x39, 0x1b, 0x2e,
+ 0x3b, 0x5d, 0x1c, 0xee, 0x91, 0x8a, 0x84, 0x46, 0x1b, 0x8e, 0x5f, 0x41, 0x2d, 0x0a, 0x2f, 0x5d,
+ 0xdf, 0xe6, 0xd1, 0x20, 0x0c, 0xf7, 0x08, 0x48, 0xf0, 0x42, 0x60, 0x58, 0x05, 0x85, 0x6d, 0x3d,
+ 0xd9, 0x60, 0x44, 0xc4, 0x11, 0x1f, 0x43, 0x89, 0xcd, 0xd7, 0xd4, 0xb3, 0x65, 0x6b, 0x9b, 0x24,
+ 0xbe, 0xe1, 0xcf, 0xa1, 0xf1, 0x0b, 0x0d, 0x7d, 0x8b, 0xaf, 0x43, 0xca, 0xd6, 0xbe, 0xbb, 0x90,
+ 0x33, 0x8f, 0x48, 0x5d, 0xa0, 0x66, 0x02, 0xe2, 0x2f, 0x62, 0x5a, 0xe6, 0xab, 0x24, 0x7d, 0x21,
+ 0x72, 0x20, 0xf0, 0xb3, 0xc4, 0xdb, 0x57, 0xa0, 0xe6, 0x78, 0x91, 0xc1, 0xb2, 0x34, 0x88, 0x48,
+ 0x23, 0x65, 0x46, 0x26, 0xa7, 0xd0, 0xd8, 0xd0, 0x95, 0xcd, 0x9d, 0x1d, 0xb5, 0x58, 0x60, 0x6f,
+ 0x98, 0x56, 0x79, 0xfa, 0xed, 0x1a, 0x6c, 0xe7, 0x3f, 0x50, 0x3e, 0x0b, 0xec, 0x4d, 0xbc, 0x70,
+ 0xf5, 0x44, 0x2f, 0x30, 0x86, 0xbf, 0x84, 0x67, 0x69, 0xc2, 0x05, 0x75, 0xb9, 0xcd, 0xb4, 0x6a,
+ 0x5b, 0xe9, 0x60, 0x92, 0xd6, 0x39, 0x97, 0xe8, 0x3d, 0xa2, 0x74, 0xca, 0x34, 0x68, 0x2b, 0x1d,
+ 0x94, 0x11, 0xa5, 0x4d, 0x26, 0x2c, 0x06, 0x3e, 0x73, 0x72, 0x16, 0x6b, 0xff, 0xd7, 0x62, 0xa2,
+ 0x4f, 0x2d, 0xa6, 0x09, 0x63, 0x8b, 0x07, 0x91, 0xc5, 0x04, 0xce, 0x2c, 0xa6, 0xc4, 0xd8, 0x62,
+ 0x3d, 0xb2, 0x98, 0xc0, 0xb1, 0xc5, 0xb7, 0x00, 0x21, 0x65, 0x94, 0x5b, 0x6b, 0xf1, 0x55, 0x1a,
+ 0x4f, 0xef, 0x65, 0x3a, 0x63, 0x5d, 0x22, 0x34, 0x43, 0x67, 0xc3, 0x49, 0x35, 0x4c, 0x8e, 0xf7,
+ 0x1f, 0x82, 0x67, 0x0f, 0x1e, 0x02, 0xfc, 0x19, 0xd4, 0xe7, 0x5b, 0xc6, 0x7d, 0xcf, 0x92, 0xcf,
+ 0x06, 0xd3, 0x54, 0x69, 0xe8, 0x20, 0x02, 0xbf, 0x93, 0x98, 0xbe, 0x80, 0x6a, 0x9a, 0x1a, 0x9f,
+ 0xc0, 0x31, 0x11, 0x13, 0x6e, 0x0d, 0x47, 0x13, 0xf3, 0xc1, 0x9a, 0x62, 0x68, 0xe4, 0x62, 0xd7,
+ 0xc6, 0x4c, 0x45, 0xb8, 0x09, 0xf5, 0x1c, 0x36, 0x99, 0xaa, 0x05, 0xb1, 0x49, 0x39, 0x28, 0xda,
+ 0x59, 0x65, 0x50, 0x86, 0x7d, 0xd9, 0x94, 0xc1, 0x01, 0x40, 0x36, 0x6f, 0xfa, 0x1b, 0x80, 0xec,
+ 0x03, 0x88, 0x91, 0xf7, 0x97, 0x4b, 0x46, 0xa3, 0x1d, 0x6a, 0x92, 0xf8, 0x26, 0x70, 0x97, 0x6e,
+ 0x56, 0x7c, 0x2d, 0x57, 0xa7, 0x4e, 0xe2, 0xdb, 0xe0, 0xe8, 0xfd, 0x5d, 0x0b, 0xfd, 0x76, 0xd7,
+ 0x42, 0x7f, 0xde, 0xb5, 0xd0, 0xf7, 0x65, 0xd9, 0xb4, 0x5d, 0xff, 0xa6, 0x24, 0xff, 0x8a, 0xbf,
+ 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xfc, 0x93, 0x1c, 0xde, 0x07, 0x00, 0x00,
+}
+
+func (m *Request) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Request) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Timeseries) > 0 {
+ for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if len(m.Symbols) > 0 {
+ for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Symbols[iNdEx])
+ copy(dAtA[i:], m.Symbols[iNdEx])
+ i = encodeVarintTypes(dAtA, i, uint64(len(m.Symbols[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *TimeSeries) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.CreatedTimestamp != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp))
+ i--
+ dAtA[i] = 0x30
+ }
+ {
+ size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ if len(m.Exemplars) > 0 {
+ for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.Histograms) > 0 {
+ for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Samples) > 0 {
+ for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.LabelsRefs) > 0 {
+ dAtA3 := make([]byte, len(m.LabelsRefs)*10)
+ var j2 int
+ for _, num := range m.LabelsRefs {
+ for num >= 1<<7 {
+ dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j2++
+ }
+ dAtA3[j2] = uint8(num)
+ j2++
+ }
+ i -= j2
+ copy(dAtA[i:], dAtA3[:j2])
+ i = encodeVarintTypes(dAtA, i, uint64(j2))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Exemplar) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Timestamp != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Value != 0 {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
+ i--
+ dAtA[i] = 0x11
+ }
+ if len(m.LabelsRefs) > 0 {
+ dAtA5 := make([]byte, len(m.LabelsRefs)*10)
+ var j4 int
+ for _, num := range m.LabelsRefs {
+ for num >= 1<<7 {
+ dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j4++
+ }
+ dAtA5[j4] = uint8(num)
+ j4++
+ }
+ i -= j4
+ copy(dAtA[i:], dAtA5[:j4])
+ i = encodeVarintTypes(dAtA, i, uint64(j4))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Sample) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Sample) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Timestamp != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Value != 0 {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
+ i--
+ dAtA[i] = 0x9
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Metadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.UnitRef != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.UnitRef))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.HelpRef != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.HelpRef))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Type != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Histogram) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Histogram) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.CustomValues) > 0 {
+ for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- {
+ f6 := math.Float64bits(float64(m.CustomValues[iNdEx]))
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6))
+ }
+ i = encodeVarintTypes(dAtA, i, uint64(len(m.CustomValues)*8))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x82
+ }
+ if m.Timestamp != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x78
+ }
+ if m.ResetHint != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.ResetHint))
+ i--
+ dAtA[i] = 0x70
+ }
+ if len(m.PositiveCounts) > 0 {
+ for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- {
+ f7 := math.Float64bits(float64(m.PositiveCounts[iNdEx]))
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f7))
+ }
+ i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8))
+ i--
+ dAtA[i] = 0x6a
+ }
+ if len(m.PositiveDeltas) > 0 {
+ var j8 int
+ dAtA10 := make([]byte, len(m.PositiveDeltas)*10)
+ for _, num := range m.PositiveDeltas {
+ x9 := (uint64(num) << 1) ^ uint64((num >> 63))
+ for x9 >= 1<<7 {
+ dAtA10[j8] = uint8(uint64(x9)&0x7f | 0x80)
+ j8++
+ x9 >>= 7
+ }
+ dAtA10[j8] = uint8(x9)
+ j8++
+ }
+ i -= j8
+ copy(dAtA[i:], dAtA10[:j8])
+ i = encodeVarintTypes(dAtA, i, uint64(j8))
+ i--
+ dAtA[i] = 0x62
+ }
+ if len(m.PositiveSpans) > 0 {
+ for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
+ }
+ if len(m.NegativeCounts) > 0 {
+ for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- {
+ f11 := math.Float64bits(float64(m.NegativeCounts[iNdEx]))
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f11))
+ }
+ i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8))
+ i--
+ dAtA[i] = 0x52
+ }
+ if len(m.NegativeDeltas) > 0 {
+ var j12 int
+ dAtA14 := make([]byte, len(m.NegativeDeltas)*10)
+ for _, num := range m.NegativeDeltas {
+ x13 := (uint64(num) << 1) ^ uint64((num >> 63))
+ for x13 >= 1<<7 {
+ dAtA14[j12] = uint8(uint64(x13)&0x7f | 0x80)
+ j12++
+ x13 >>= 7
+ }
+ dAtA14[j12] = uint8(x13)
+ j12++
+ }
+ i -= j12
+ copy(dAtA[i:], dAtA14[:j12])
+ i = encodeVarintTypes(dAtA, i, uint64(j12))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.NegativeSpans) > 0 {
+ for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTypes(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if m.ZeroCount != nil {
+ {
+ size := m.ZeroCount.Size()
+ i -= size
+ if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ if m.ZeroThreshold != 0 {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold))))
+ i--
+ dAtA[i] = 0x29
+ }
+ if m.Schema != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31))))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.Sum != 0 {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
+ i--
+ dAtA[i] = 0x19
+ }
+ if m.Count != nil {
+ {
+ size := m.Count.Size()
+ i -= size
+ if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i = encodeVarintTypes(dAtA, i, uint64(m.CountInt))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat))))
+ i--
+ dAtA[i] = 0x11
+ return len(dAtA) - i, nil
+}
+func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i = encodeVarintTypes(dAtA, i, uint64(m.ZeroCountInt))
+ i--
+ dAtA[i] = 0x30
+ return len(dAtA) - i, nil
+}
+func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat))))
+ i--
+ dAtA[i] = 0x39
+ return len(dAtA) - i, nil
+}
+func (m *BucketSpan) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Length != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64(m.Length))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Offset != 0 {
+ i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31))))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintTypes(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTypes(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Request) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Symbols) > 0 {
+ for _, s := range m.Symbols {
+ l = len(s)
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ if len(m.Timeseries) > 0 {
+ for _, e := range m.Timeseries {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *TimeSeries) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.LabelsRefs) > 0 {
+ l = 0
+ for _, e := range m.LabelsRefs {
+ l += sovTypes(uint64(e))
+ }
+ n += 1 + sovTypes(uint64(l)) + l
+ }
+ if len(m.Samples) > 0 {
+ for _, e := range m.Samples {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ if len(m.Histograms) > 0 {
+ for _, e := range m.Histograms {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ if len(m.Exemplars) > 0 {
+ for _, e := range m.Exemplars {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ l = m.Metadata.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ if m.CreatedTimestamp != 0 {
+ n += 1 + sovTypes(uint64(m.CreatedTimestamp))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Exemplar) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.LabelsRefs) > 0 {
+ l = 0
+ for _, e := range m.LabelsRefs {
+ l += sovTypes(uint64(e))
+ }
+ n += 1 + sovTypes(uint64(l)) + l
+ }
+ if m.Value != 0 {
+ n += 9
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovTypes(uint64(m.Timestamp))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Sample) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Value != 0 {
+ n += 9
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovTypes(uint64(m.Timestamp))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Metadata) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + sovTypes(uint64(m.Type))
+ }
+ if m.HelpRef != 0 {
+ n += 1 + sovTypes(uint64(m.HelpRef))
+ }
+ if m.UnitRef != 0 {
+ n += 1 + sovTypes(uint64(m.UnitRef))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Histogram) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Count != nil {
+ n += m.Count.Size()
+ }
+ if m.Sum != 0 {
+ n += 9
+ }
+ if m.Schema != 0 {
+ n += 1 + sozTypes(uint64(m.Schema))
+ }
+ if m.ZeroThreshold != 0 {
+ n += 9
+ }
+ if m.ZeroCount != nil {
+ n += m.ZeroCount.Size()
+ }
+ if len(m.NegativeSpans) > 0 {
+ for _, e := range m.NegativeSpans {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ if len(m.NegativeDeltas) > 0 {
+ l = 0
+ for _, e := range m.NegativeDeltas {
+ l += sozTypes(uint64(e))
+ }
+ n += 1 + sovTypes(uint64(l)) + l
+ }
+ if len(m.NegativeCounts) > 0 {
+ n += 1 + sovTypes(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8
+ }
+ if len(m.PositiveSpans) > 0 {
+ for _, e := range m.PositiveSpans {
+ l = e.Size()
+ n += 1 + l + sovTypes(uint64(l))
+ }
+ }
+ if len(m.PositiveDeltas) > 0 {
+ l = 0
+ for _, e := range m.PositiveDeltas {
+ l += sozTypes(uint64(e))
+ }
+ n += 1 + sovTypes(uint64(l)) + l
+ }
+ if len(m.PositiveCounts) > 0 {
+ n += 1 + sovTypes(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8
+ }
+ if m.ResetHint != 0 {
+ n += 1 + sovTypes(uint64(m.ResetHint))
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovTypes(uint64(m.Timestamp))
+ }
+ if len(m.CustomValues) > 0 {
+ n += 2 + sovTypes(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Histogram_CountInt) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovTypes(uint64(m.CountInt))
+ return n
+}
+func (m *Histogram_CountFloat) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 9
+ return n
+}
+func (m *Histogram_ZeroCountInt) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovTypes(uint64(m.ZeroCountInt))
+ return n
+}
+func (m *Histogram_ZeroCountFloat) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 9
+ return n
+}
+func (m *BucketSpan) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Offset != 0 {
+ n += 1 + sozTypes(uint64(m.Offset))
+ }
+ if m.Length != 0 {
+ n += 1 + sovTypes(uint64(m.Length))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovTypes(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozTypes(x uint64) (n int) {
+ return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Request) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Request: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Symbols = append(m.Symbols, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Timeseries = append(m.Timeseries, TimeSeries{})
+ if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TimeSeries) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType == 0 {
+ var v uint32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LabelsRefs = append(m.LabelsRefs, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.LabelsRefs) == 0 {
+ m.LabelsRefs = make([]uint32, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LabelsRefs = append(m.LabelsRefs, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType)
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Samples = append(m.Samples, Sample{})
+ if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Histograms = append(m.Histograms, Histogram{})
+ if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Exemplars = append(m.Exemplars, Exemplar{})
+ if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
+ }
+ m.CreatedTimestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CreatedTimestamp |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Exemplar) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Exemplar: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType == 0 {
+ var v uint32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LabelsRefs = append(m.LabelsRefs, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.LabelsRefs) == 0 {
+ m.LabelsRefs = make([]uint32, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LabelsRefs = append(m.LabelsRefs, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType)
+ }
+ case 2:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.Value = float64(math.Float64frombits(v))
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Sample) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Sample: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.Value = float64(math.Float64frombits(v))
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Metadata) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= Metadata_MetricType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HelpRef", wireType)
+ }
+ m.HelpRef = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.HelpRef |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnitRef", wireType)
+ }
+ m.UnitRef = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UnitRef |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Histogram) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Histogram: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType)
+ }
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Count = &Histogram_CountInt{v}
+ case 2:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))}
+ case 3:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.Sum = float64(math.Float64frombits(v))
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
+ m.Schema = v
+ case 5:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.ZeroThreshold = float64(math.Float64frombits(v))
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType)
+ }
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ZeroCount = &Histogram_ZeroCountInt{v}
+ case 7:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))}
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NegativeSpans = append(m.NegativeSpans, BucketSpan{})
+ if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType == 0 {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
+ m.NegativeDeltas = append(m.NegativeDeltas, int64(v))
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.NegativeDeltas) == 0 {
+ m.NegativeDeltas = make([]int64, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
+ m.NegativeDeltas = append(m.NegativeDeltas, int64(v))
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType)
+ }
+ case 10:
+ if wireType == 1 {
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.NegativeCounts = append(m.NegativeCounts, v2)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ elementCount = packedLen / 8
+ if elementCount != 0 && len(m.NegativeCounts) == 0 {
+ m.NegativeCounts = make([]float64, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.NegativeCounts = append(m.NegativeCounts, v2)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType)
+ }
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PositiveSpans = append(m.PositiveSpans, BucketSpan{})
+ if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType == 0 {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
+ m.PositiveDeltas = append(m.PositiveDeltas, int64(v))
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.PositiveDeltas) == 0 {
+ m.PositiveDeltas = make([]int64, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63)
+ m.PositiveDeltas = append(m.PositiveDeltas, int64(v))
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType)
+ }
+ case 13:
+ if wireType == 1 {
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.PositiveCounts = append(m.PositiveCounts, v2)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ elementCount = packedLen / 8
+ if elementCount != 0 && len(m.PositiveCounts) == 0 {
+ m.PositiveCounts = make([]float64, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.PositiveCounts = append(m.PositiveCounts, v2)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType)
+ }
+ case 14:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType)
+ }
+ m.ResetHint = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 15:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 16:
+ if wireType == 1 {
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.CustomValues = append(m.CustomValues, v2)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthTypes
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ elementCount = packedLen / 8
+ if elementCount != 0 && len(m.CustomValues) == 0 {
+ m.CustomValues = make([]float64, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ v2 := float64(math.Float64frombits(v))
+ m.CustomValues = append(m.CustomValues, v2)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType)
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BucketSpan) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
+ m.Offset = v
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType)
+ }
+ m.Length = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Length |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTypes(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTypes
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTypes(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTypes
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthTypes
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTypes
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTypes
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/prompb/io/prometheus/write/v2/types.proto b/prompb/io/prometheus/write/v2/types.proto
new file mode 100644
index 000000000..0cc7b8bc4
--- /dev/null
+++ b/prompb/io/prometheus/write/v2/types.proto
@@ -0,0 +1,260 @@
+// Copyright 2024 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// NOTE: This file is also available on https://buf.build/prometheus/prometheus/docs/main:io.prometheus.write.v2
+
+syntax = "proto3";
+package io.prometheus.write.v2;
+
+option go_package = "writev2";
+
+import "gogoproto/gogo.proto";
+
+// Request represents a request to write the given timeseries to a remote destination.
+// This message was introduced in the Remote Write 2.0 specification:
+// https://prometheus.io/docs/concepts/remote_write_spec_2_0/
+//
+// The canonical Content-Type request header value for this message is
+// "application/x-protobuf;proto=io.prometheus.write.v2.Request"
+//
+// NOTE: gogoproto options might change in future for this file, they
+// are not part of the spec proto (they only modify the generated Go code, not
+// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908
+message Request {
+ // Since Request supersedes 1.0 spec's prometheus.WriteRequest, we reserve the top-down message
+ // for the deterministic interop between those two, see types_test.go for details.
+ // Generally it's not needed, because Receivers must use the Content-Type header, but we want to
+ // be sympathetic to adopters with mistaken implementations and have deterministic error (empty
+ // message if you use the wrong proto schema).
+ reserved 1 to 3;
+
+ // symbols contains a de-duplicated array of string elements used for various
+ // items in a Request message, like labels and metadata items. For the sender's convenience
+ // around empty values for optional fields like unit_ref, symbols array MUST start with
+ // empty string.
+ //
+ // To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you
+ // need to lookup the actual string by index from symbols array. The order of
+ // strings is up to the sender. The receiver should not assume any particular encoding.
+ repeated string symbols = 4;
+ // timeseries represents an array of distinct series with 0 or more samples.
+ repeated TimeSeries timeseries = 5 [(gogoproto.nullable) = false];
+}
+
+// TimeSeries represents a single series.
+message TimeSeries {
+ // labels_refs is a list of label name-value pair references, encoded
+ // as indices to the Request.symbols array. This list's length is always
+ // a multiple of two, and the underlying labels should be sorted lexicographically.
+ //
+ // Note that there might be multiple TimeSeries objects in the same
+ // Requests with the same labels e.g. for different exemplars, metadata
+ // or created timestamp.
+ repeated uint32 labels_refs = 1;
+
+ // Timeseries messages can either specify samples or (native) histogram samples
+ // (histogram field), but not both. For a typical sender (real-time metric
+ // streaming), in healthy cases, there will be only one sample or histogram.
+ //
+ // Samples and histograms are sorted by timestamp (older first).
+ repeated Sample samples = 2 [(gogoproto.nullable) = false];
+ repeated Histogram histograms = 3 [(gogoproto.nullable) = false];
+
+ // exemplars represents an optional set of exemplars attached to this series' samples.
+ repeated Exemplar exemplars = 4 [(gogoproto.nullable) = false];
+
+ // metadata represents the metadata associated with the given series' samples.
+ Metadata metadata = 5 [(gogoproto.nullable) = false];
+
+ // created_timestamp represents an optional created timestamp associated with
+ // this series' samples in ms format, typically for counter or histogram type
+ // metrics. Created timestamp represents the time when the counter started
+ // counting (sometimes referred to as start timestamp), which can increase
+ // the accuracy of query results.
+ //
+ // Note that some receivers might require this and in return fail to
+ // ingest such samples within the Request.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ //
+ // Note that the "optional" keyword is omitted due to
+ // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
+ // Zero value means value not set. If you need to use exactly zero value for
+ // the timestamp, use 1 millisecond before or after.
+ int64 created_timestamp = 6;
+}
+
+// Exemplar is an additional information attached to some series' samples.
+// It is typically used to attach an example trace or request ID associated with
+// the metric changes.
+message Exemplar {
+ // labels_refs is an optional list of label name-value pair references, encoded
+ // as indices to the Request.symbols array. This list's len is always
+ // a multiple of 2, and the underlying labels should be sorted lexicographically.
+ // If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
+ repeated uint32 labels_refs = 1;
+ // value represents an exact example value. This can be useful when the exemplar
+ // is attached to a histogram, which only gives an estimated value through buckets.
+ double value = 2;
+ // timestamp represents an optional timestamp of the sample in ms.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ //
+ // Note that the "optional" keyword is omitted due to
+ // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
+ // Zero value means value not set. If you need to use exactly zero value for
+ // the timestamp, use 1 millisecond before or after.
+ int64 timestamp = 3;
+}
+
+// Sample represents series sample.
+message Sample {
+ // value of the sample.
+ double value = 1;
+ // timestamp represents timestamp of the sample in ms.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ int64 timestamp = 2;
+}
+
+// Metadata represents the metadata associated with the given series' samples.
+message Metadata {
+ enum MetricType {
+ METRIC_TYPE_UNSPECIFIED = 0;
+ METRIC_TYPE_COUNTER = 1;
+ METRIC_TYPE_GAUGE = 2;
+ METRIC_TYPE_HISTOGRAM = 3;
+ METRIC_TYPE_GAUGEHISTOGRAM = 4;
+ METRIC_TYPE_SUMMARY = 5;
+ METRIC_TYPE_INFO = 6;
+ METRIC_TYPE_STATESET = 7;
+ }
+ MetricType type = 1;
+ // help_ref is a reference to the Request.symbols array representing help
+ // text for the metric. Help is optional, reference should point to an empty string in
+ // such a case.
+ uint32 help_ref = 3;
+ // unit_ref is a reference to the Request.symbols array representing a unit
+ // for the metric. Unit is optional, reference should point to an empty string in
+ // such a case.
+ uint32 unit_ref = 4;
+}
+
+// A native histogram, also known as a sparse histogram.
+// Original design doc:
+// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
+// The appendix of this design doc also explains the concept of float
+// histograms. This Histogram message can represent both, the usual
+// integer histogram as well as a float histogram.
+message Histogram {
+ enum ResetHint {
+ RESET_HINT_UNSPECIFIED = 0; // Need to test for a counter reset explicitly.
+ RESET_HINT_YES = 1; // This is the 1st histogram after a counter reset.
+ RESET_HINT_NO = 2; // There was no counter reset between this and the previous Histogram.
+ RESET_HINT_GAUGE = 3; // This is a gauge histogram where counter resets don't happen.
+ }
+
+ oneof count { // Count of observations in the histogram.
+ uint64 count_int = 1;
+ double count_float = 2;
+ }
+ double sum = 3; // Sum of observations in the histogram.
+
+ // The schema defines the bucket schema. Currently, valid numbers
+ // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be
+ // added in future for new bucketing layouts.
+ //
+ // The schema equal to -53 means custom buckets. See
+ // custom_values field description for more details.
+ //
+ // Values between -4 and 8 represent base-2 bucket schema, where 1
+ // is a bucket boundary in each case, and then each power of two is
+ // divided into 2^n (n is schema value) logarithmic buckets. Or in other words,
+ // each bucket boundary is the previous boundary times 2^(2^-n).
+ sint32 schema = 4;
+ double zero_threshold = 5; // Breadth of the zero bucket.
+ oneof zero_count { // Count in zero bucket.
+ uint64 zero_count_int = 6;
+ double zero_count_float = 7;
+ }
+
+ // Negative Buckets.
+ repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false];
+ // Use either "negative_deltas" or "negative_counts", the former for
+ // regular histograms with integer counts, the latter for
+ // float histograms.
+ repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ repeated double negative_counts = 10; // Absolute count of each bucket.
+
+ // Positive Buckets.
+ //
+ // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows:
+ // * The span offset+length points to an the index of the custom_values array
+ // or +Inf if pointing to the len of the array.
+ // * The counts and deltas have the same meaning as for exponential histograms.
+ repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false];
+ // Use either "positive_deltas" or "positive_counts", the former for
+ // regular histograms with integer counts, the latter for
+ // float histograms.
+ repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ repeated double positive_counts = 13; // Absolute count of each bucket.
+
+ ResetHint reset_hint = 14;
+ // timestamp represents timestamp of the sample in ms.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ int64 timestamp = 15;
+
+ // custom_values is an additional field used by non-exponential bucketing layouts.
+ //
+ // For custom buckets (-53 schema value) custom_values specify monotonically
+ // increasing upper inclusive boundaries for the bucket counts with arbitrary
+ // widths for this histogram. In other words, custom_values represents custom,
+ // explicit bucketing that could have been converted from the classic histograms.
+ //
+ // Those bounds are then referenced by spans in positive_spans with corresponding positive
+ // counts of deltas (refer to positive_spans for more details). This way we can
+ // have encode sparse histograms with custom bucketing (many buckets are often
+ // not used).
+ //
+ // Note that for custom bounds, even negative observations are placed in the positive
+ // counts to simplify the implementation and avoid ambiguity of where to place
+ // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and
+ // the zero bucket are unused, if the schema indicates custom bucketing.
+ //
+ // For each upper boundary the previous boundary represent the lower exclusive
+ // boundary for that bucket. The first element is the upper inclusive boundary
+ // for the first bucket, which implicitly has a lower inclusive bound of -Inf.
+ // This is similar to "le" label semantics on classic histograms. You may add a
+ // bucket with an upper bound of 0 to make sure that you really have no negative
+ // observations, but in practice, native histogram rendering will show both with
+ // or without first upper boundary 0 and no negative counts as the same case.
+ //
+ // The last element is not only the upper inclusive bound of the last regular
+ // bucket, but implicitly the lower exclusive bound of the +Inf bucket.
+ repeated double custom_values = 16;
+}
+
+// A BucketSpan defines a number of consecutive buckets with their
+// offset. Logically, it would be more straightforward to include the
+// bucket counts in the Span. However, the protobuf representation is
+// more compact in the way the data is structured here (with all the
+// buckets in a single array separate from the Spans).
+message BucketSpan {
+ sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
+ uint32 length = 2; // Length of consecutive buckets.
+}
diff --git a/prompb/io/prometheus/write/v2/types_test.go b/prompb/io/prometheus/write/v2/types_test.go
new file mode 100644
index 000000000..5b7622fc2
--- /dev/null
+++ b/prompb/io/prometheus/write/v2/types_test.go
@@ -0,0 +1,97 @@
+// Copyright 2024 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package writev2
+
+import (
+ "testing"
+ "time"
+
+ "github.com/gogo/protobuf/proto"
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/prompb"
+)
+
+func TestInteropV2UnmarshalWithV1_DeterministicEmpty(t *testing.T) {
+ expectedV1Empty := &prompb.WriteRequest{}
+ for _, tc := range []struct{ incoming *Request }{
+ {
+ incoming: &Request{}, // Technically wrong, should be at least empty string in symbol.
+ },
+ {
+ incoming: &Request{
+ Symbols: []string{""},
+ }, // NOTE: Without reserved fields, failed with "corrupted" ghost TimeSeries element.
+ },
+ {
+ incoming: &Request{
+ Symbols: []string{"", "__name__", "metric1"},
+ Timeseries: []TimeSeries{
+ {LabelsRefs: []uint32{1, 2}},
+ {Samples: []Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}}},
+ }, // NOTE: Without reserved fields, proto: illegal wireType 7
+ },
+ },
+ } {
+ t.Run("", func(t *testing.T) {
+ in, err := proto.Marshal(tc.incoming)
+ require.NoError(t, err)
+
+ // Test accidental unmarshal of v2 payload with v1 proto.
+ out := &prompb.WriteRequest{}
+ require.NoError(t, proto.Unmarshal(in, out))
+
+ // Drop unknowns, we expect them when incoming payload had some fields.
+ // This field & method will be likely gone after gogo removal.
+ out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables.
+
+ require.Equal(t, expectedV1Empty, out)
+ })
+ }
+}
+
+func TestInteropV1UnmarshalWithV2_DeterministicEmpty(t *testing.T) {
+ expectedV2Empty := &Request{}
+ for _, tc := range []struct{ incoming *prompb.WriteRequest }{
+ {
+ incoming: &prompb.WriteRequest{},
+ },
+ {
+ incoming: &prompb.WriteRequest{
+ Timeseries: []prompb.TimeSeries{
+ {
+ Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}},
+ Samples: []prompb.Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}},
+ },
+ },
+ },
+ // NOTE: Without reserved fields, results in corrupted v2.Request.Symbols.
+ },
+ } {
+ t.Run("", func(t *testing.T) {
+ in, err := proto.Marshal(tc.incoming)
+ require.NoError(t, err)
+
+ // Test accidental unmarshal of v1 payload with v2 proto.
+ out := &Request{}
+ require.NoError(t, proto.Unmarshal(in, out))
+
+ // Drop unknowns, we expect them when incoming payload had some fields.
+ // This field & method will be likely gone after gogo removal.
+ out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables.
+
+ require.Equal(t, expectedV2Empty, out)
+ })
+ }
+}
diff --git a/prompb/rwcommon/codec_test.go b/prompb/rwcommon/codec_test.go
new file mode 100644
index 000000000..08e9e62d2
--- /dev/null
+++ b/prompb/rwcommon/codec_test.go
@@ -0,0 +1,282 @@
+// Copyright 2024 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rwcommon
+
+import (
+ "testing"
+
+ "github.com/prometheus/common/model"
+ "github.com/stretchr/testify/require"
+
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
+ "github.com/prometheus/prometheus/prompb"
+ writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
+)
+
+func TestToLabels(t *testing.T) {
+ expected := labels.FromStrings("__name__", "metric1", "foo", "bar")
+
+ t.Run("v1", func(t *testing.T) {
+ ts := prompb.TimeSeries{Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}, {Name: "foo", Value: "bar"}}}
+ b := labels.NewScratchBuilder(2)
+ require.Equal(t, expected, ts.ToLabels(&b, nil))
+ require.Equal(t, ts.Labels, prompb.FromLabels(expected, nil))
+ require.Equal(t, ts.Labels, prompb.FromLabels(expected, ts.Labels))
+ })
+ t.Run("v2", func(t *testing.T) {
+ v2Symbols := []string{"", "__name__", "metric1", "foo", "bar"}
+ ts := writev2.TimeSeries{LabelsRefs: []uint32{1, 2, 3, 4}}
+ b := labels.NewScratchBuilder(2)
+ require.Equal(t, expected, ts.ToLabels(&b, v2Symbols))
+ // No need for FromLabels in our prod code as we use symbol table to do so.
+ })
+}
+
+func TestFromMetadataType(t *testing.T) {
+ for _, tc := range []struct {
+ desc string
+ input model.MetricType
+ expectedV1 prompb.MetricMetadata_MetricType
+ expectedV2 writev2.Metadata_MetricType
+ }{
+ {
+ desc: "with a single-word metric",
+ input: model.MetricTypeCounter,
+ expectedV1: prompb.MetricMetadata_COUNTER,
+ expectedV2: writev2.Metadata_METRIC_TYPE_COUNTER,
+ },
+ {
+ desc: "with a two-word metric",
+ input: model.MetricTypeStateset,
+ expectedV1: prompb.MetricMetadata_STATESET,
+ expectedV2: writev2.Metadata_METRIC_TYPE_STATESET,
+ },
+ {
+ desc: "with an unknown metric",
+ input: "not-known",
+ expectedV1: prompb.MetricMetadata_UNKNOWN,
+ expectedV2: writev2.Metadata_METRIC_TYPE_UNSPECIFIED,
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Run("v1", func(t *testing.T) {
+ require.Equal(t, tc.expectedV1, prompb.FromMetadataType(tc.input))
+ })
+ t.Run("v2", func(t *testing.T) {
+ require.Equal(t, tc.expectedV2, writev2.FromMetadataType(tc.input))
+ })
+ })
+ }
+}
+
+func TestToMetadata(t *testing.T) {
+ sym := writev2.NewSymbolTable()
+
+ for _, tc := range []struct {
+ input writev2.Metadata
+ expected metadata.Metadata
+ }{
+ {
+ input: writev2.Metadata{},
+ expected: metadata.Metadata{
+ Type: model.MetricTypeUnknown,
+ },
+ },
+ {
+ input: writev2.Metadata{
+ Type: 12414, // Unknown.
+ },
+ expected: metadata.Metadata{
+ Type: model.MetricTypeUnknown,
+ },
+ },
+ {
+ input: writev2.Metadata{
+ Type: writev2.Metadata_METRIC_TYPE_COUNTER,
+ HelpRef: sym.Symbolize("help1"),
+ UnitRef: sym.Symbolize("unit1"),
+ },
+ expected: metadata.Metadata{
+ Type: model.MetricTypeCounter,
+ Help: "help1",
+ Unit: "unit1",
+ },
+ },
+ {
+ input: writev2.Metadata{
+ Type: writev2.Metadata_METRIC_TYPE_STATESET,
+ HelpRef: sym.Symbolize("help2"),
+ },
+ expected: metadata.Metadata{
+ Type: model.MetricTypeStateset,
+ Help: "help2",
+ },
+ },
+ } {
+ t.Run("", func(t *testing.T) {
+ ts := writev2.TimeSeries{Metadata: tc.input}
+ require.Equal(t, tc.expected, ts.ToMetadata(sym.Symbols()))
+ })
+ }
+}
+
+func TestToHistogram_Empty(t *testing.T) {
+ t.Run("v1", func(t *testing.T) {
+ require.NotNilf(t, prompb.Histogram{}.ToIntHistogram(), "")
+ require.NotNilf(t, prompb.Histogram{}.ToFloatHistogram(), "")
+ })
+ t.Run("v2", func(t *testing.T) {
+ require.NotNilf(t, writev2.Histogram{}.ToIntHistogram(), "")
+ require.NotNilf(t, writev2.Histogram{}.ToFloatHistogram(), "")
+ })
+}
+
+func testIntHistogram() histogram.Histogram {
+ return histogram.Histogram{
+ CounterResetHint: histogram.GaugeType,
+ Schema: 0,
+ Count: 19,
+ Sum: 2.7,
+ ZeroThreshold: 1e-128,
+ PositiveSpans: []histogram.Span{
+ {Offset: 0, Length: 4},
+ {Offset: 0, Length: 0},
+ {Offset: 0, Length: 3},
+ },
+ PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
+ NegativeSpans: []histogram.Span{
+ {Offset: 0, Length: 5},
+ {Offset: 1, Length: 0},
+ {Offset: 0, Length: 1},
+ },
+ NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
+ }
+}
+
+func testFloatHistogram() histogram.FloatHistogram {
+ return histogram.FloatHistogram{
+ CounterResetHint: histogram.GaugeType,
+ Schema: 0,
+ Count: 19,
+ Sum: 2.7,
+ ZeroThreshold: 1e-128,
+ PositiveSpans: []histogram.Span{
+ {Offset: 0, Length: 4},
+ {Offset: 0, Length: 0},
+ {Offset: 0, Length: 3},
+ },
+ PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
+ NegativeSpans: []histogram.Span{
+ {Offset: 0, Length: 5},
+ {Offset: 1, Length: 0},
+ {Offset: 0, Length: 1},
+ },
+ NegativeBuckets: []float64{1, 3, 1, 2, 1, 1},
+ }
+}
+
+func TestFromIntToFloatOrIntHistogram(t *testing.T) {
+ testIntHist := testIntHistogram()
+ testFloatHist := testFloatHistogram()
+
+ t.Run("v1", func(t *testing.T) {
+ h := prompb.FromIntHistogram(123, testIntHist.Copy())
+ require.False(t, h.IsFloatHistogram())
+ require.Equal(t, int64(123), h.Timestamp)
+ require.Equal(t, testIntHist, *h.ToIntHistogram())
+ require.Equal(t, testFloatHist, *h.ToFloatHistogram())
+ })
+ t.Run("v2", func(t *testing.T) {
+ h := writev2.FromIntHistogram(123, testIntHist.Copy())
+ require.False(t, h.IsFloatHistogram())
+ require.Equal(t, int64(123), h.Timestamp)
+ require.Equal(t, testIntHist, *h.ToIntHistogram())
+ require.Equal(t, testFloatHist, *h.ToFloatHistogram())
+ })
+}
+
+func TestFromFloatToFloatHistogram(t *testing.T) {
+ testFloatHist := testFloatHistogram()
+
+ t.Run("v1", func(t *testing.T) {
+ h := prompb.FromFloatHistogram(123, testFloatHist.Copy())
+ require.True(t, h.IsFloatHistogram())
+ require.Equal(t, int64(123), h.Timestamp)
+ require.Nil(t, h.ToIntHistogram())
+ require.Equal(t, testFloatHist, *h.ToFloatHistogram())
+ })
+ t.Run("v2", func(t *testing.T) {
+ h := writev2.FromFloatHistogram(123, testFloatHist.Copy())
+ require.True(t, h.IsFloatHistogram())
+ require.Equal(t, int64(123), h.Timestamp)
+ require.Nil(t, h.ToIntHistogram())
+ require.Equal(t, testFloatHist, *h.ToFloatHistogram())
+ })
+}
+
+func TestFromIntOrFloatHistogram_ResetHint(t *testing.T) {
+ for _, tc := range []struct {
+ input histogram.CounterResetHint
+ expectedV1 prompb.Histogram_ResetHint
+ expectedV2 writev2.Histogram_ResetHint
+ }{
+ {
+ input: histogram.UnknownCounterReset,
+ expectedV1: prompb.Histogram_UNKNOWN,
+ expectedV2: writev2.Histogram_RESET_HINT_UNSPECIFIED,
+ },
+ {
+ input: histogram.CounterReset,
+ expectedV1: prompb.Histogram_YES,
+ expectedV2: writev2.Histogram_RESET_HINT_YES,
+ },
+ {
+ input: histogram.NotCounterReset,
+ expectedV1: prompb.Histogram_NO,
+ expectedV2: writev2.Histogram_RESET_HINT_NO,
+ },
+ {
+ input: histogram.GaugeType,
+ expectedV1: prompb.Histogram_GAUGE,
+ expectedV2: writev2.Histogram_RESET_HINT_GAUGE,
+ },
+ } {
+ t.Run("", func(t *testing.T) {
+ t.Run("v1", func(t *testing.T) {
+ h := testIntHistogram()
+ h.CounterResetHint = tc.input
+ got := prompb.FromIntHistogram(1337, &h)
+ require.Equal(t, tc.expectedV1, got.GetResetHint())
+
+ fh := testFloatHistogram()
+ fh.CounterResetHint = tc.input
+ got2 := prompb.FromFloatHistogram(1337, &fh)
+ require.Equal(t, tc.expectedV1, got2.GetResetHint())
+ })
+ t.Run("v2", func(t *testing.T) {
+ h := testIntHistogram()
+ h.CounterResetHint = tc.input
+ got := writev2.FromIntHistogram(1337, &h)
+ require.Equal(t, tc.expectedV2, got.GetResetHint())
+
+ fh := testFloatHistogram()
+ fh.CounterResetHint = tc.input
+ got2 := writev2.FromFloatHistogram(1337, &fh)
+ require.Equal(t, tc.expectedV2, got2.GetResetHint())
+ })
+ })
+ }
+}
diff --git a/scrape/manager.go b/scrape/manager.go
index cb92db5a8..156e949f8 100644
--- a/scrape/manager.go
+++ b/scrape/manager.go
@@ -73,9 +73,11 @@ type Options struct {
// Option used by downstream scraper users like OpenTelemetry Collector
// to help lookup metric metadata. Should be false for Prometheus.
PassMetadataInContext bool
- // Option to enable the experimental in-memory metadata storage and append
- // metadata to the WAL.
- EnableMetadataStorage bool
+ // Option to enable appending of scraped Metadata to the TSDB/other appenders. Individual appenders
+ // can decide what to do with metadata, but for practical purposes this flag exists so that metadata
+ // can be written to the WAL and thus read for remote write.
+ // TODO: implement some form of metadata storage
+ AppendMetadata bool
// Option to increase the interval used by scrape manager to throttle target groups updates.
DiscoveryReloadInterval model.Duration
// Option to enable the ingestion of the created timestamp as a synthetic zero sample.
diff --git a/scrape/scrape.go b/scrape/scrape.go
index a0b681444..17e9913e8 100644
--- a/scrape/scrape.go
+++ b/scrape/scrape.go
@@ -181,7 +181,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
options.EnableNativeHistogramsIngestion,
options.EnableCreatedTimestampZeroIngestion,
options.ExtraMetrics,
- options.EnableMetadataStorage,
+ options.AppendMetadata,
opts.target,
options.PassMetadataInContext,
metrics,
diff --git a/scripts/genproto.sh b/scripts/genproto.sh
index dee51d4aa..4ee337dfa 100755
--- a/scripts/genproto.sh
+++ b/scripts/genproto.sh
@@ -10,8 +10,9 @@ if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then
exit 255
fi
+# TODO(bwplotka): Move to buf, this is not OSS agnostic, likely won't work locally.
if ! [[ $(protoc --version) =~ "3.15.8" ]]; then
- echo "could not find protoc 3.15.8, is it installed + in PATH?"
+ echo "could not find protoc 3.15.8, is it installed + in PATH? Consider commenting out this check for local flow"
exit 255
fi
@@ -40,6 +41,9 @@ for dir in ${DIRS}; do
-I="${PROM_PATH}" \
-I="${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
./*.proto
+ protoc --gogofast_out=plugins=grpc:. -I=. \
+ -I="${GOGOPROTO_PATH}" \
+ ./io/prometheus/write/v2/*.proto
protoc --gogofast_out=Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,paths=source_relative:. -I=. \
-I="${GOGOPROTO_PATH}" \
./io/prometheus/client/*.proto
diff --git a/storage/remote/client.go b/storage/remote/client.go
index e8791b643..eff44c606 100644
--- a/storage/remote/client.go
+++ b/storage/remote/client.go
@@ -35,13 +35,40 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
+ "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage/remote/azuread"
)
const maxErrMsgLen = 1024
-var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
+const (
+ RemoteWriteVersionHeader = "X-Prometheus-Remote-Write-Version"
+ RemoteWriteVersion1HeaderValue = "0.1.0"
+ RemoteWriteVersion20HeaderValue = "2.0.0"
+ appProtoContentType = "application/x-protobuf"
+)
+
+// Compression represents the encoding. Currently remote storage supports only
+// one, but we experiment with more, thus leaving the compression scaffolding
+// for now.
+// NOTE(bwplotka): Keeping it public, as a non-stable help for importers to use.
+type Compression string
+
+const (
+ // SnappyBlockCompression represents https://github.com/google/snappy/blob/2c94e11145f0b7b184b831577c93e5a41c4c0346/format_description.txt
+ SnappyBlockCompression Compression = "snappy"
+)
+
+var (
+ // UserAgent represents Prometheus version to use for user agent header.
+ UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
+
+ remoteWriteContentTypeHeaders = map[config.RemoteWriteProtoMsg]string{
+ config.RemoteWriteProtoMsgV1: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec.
+ config.RemoteWriteProtoMsgV2: appProtoContentType + ";proto=io.prometheus.write.v2.Request",
+ }
+)
var (
remoteReadQueriesTotal = prometheus.NewCounterVec(
@@ -93,6 +120,9 @@ type Client struct {
readQueries prometheus.Gauge
readQueriesTotal *prometheus.CounterVec
readQueriesDuration prometheus.Observer
+
+ writeProtoMsg config.RemoteWriteProtoMsg
+ writeCompression Compression // Not exposed by ClientConfig for now.
}
// ClientConfig configures a client.
@@ -104,6 +134,7 @@ type ClientConfig struct {
AzureADConfig *azuread.AzureADConfig
Headers map[string]string
RetryOnRateLimit bool
+ WriteProtoMsg config.RemoteWriteProtoMsg
}
// ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
@@ -162,14 +193,20 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
}
}
- httpClient.Transport = otelhttp.NewTransport(t)
+ writeProtoMsg := config.RemoteWriteProtoMsgV1
+ if conf.WriteProtoMsg != "" {
+ writeProtoMsg = conf.WriteProtoMsg
+ }
+ httpClient.Transport = otelhttp.NewTransport(t)
return &Client{
remoteName: name,
urlString: conf.URL.String(),
Client: httpClient,
retryOnRateLimit: conf.RetryOnRateLimit,
timeout: time.Duration(conf.Timeout),
+ writeProtoMsg: writeProtoMsg,
+ writeCompression: SnappyBlockCompression,
}, nil
}
@@ -206,10 +243,16 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
return err
}
- httpReq.Header.Add("Content-Encoding", "snappy")
- httpReq.Header.Set("Content-Type", "application/x-protobuf")
+ httpReq.Header.Add("Content-Encoding", string(c.writeCompression))
+ httpReq.Header.Set("Content-Type", remoteWriteContentTypeHeaders[c.writeProtoMsg])
httpReq.Header.Set("User-Agent", UserAgent)
- httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
+ if c.writeProtoMsg == config.RemoteWriteProtoMsgV1 {
+ // Compatibility mode for 1.0.
+ httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue)
+ } else {
+ httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
+ }
+
if attempt > 0 {
httpReq.Header.Set("Retry-Attempt", strconv.Itoa(attempt))
}
@@ -265,12 +308,12 @@ func retryAfterDuration(t string) model.Duration {
}
// Name uniquely identifies the client.
-func (c Client) Name() string {
+func (c *Client) Name() string {
return c.remoteName
}
// Endpoint is the remote read or write endpoint.
-func (c Client) Endpoint() string {
+func (c *Client) Endpoint() string {
return c.urlString
}
diff --git a/storage/remote/codec.go b/storage/remote/codec.go
index 8c569ff03..c9220ca42 100644
--- a/storage/remote/codec.go
+++ b/storage/remote/codec.go
@@ -22,7 +22,6 @@ import (
"net/http"
"slices"
"sort"
- "strings"
"sync"
"github.com/gogo/protobuf/proto"
@@ -30,10 +29,10 @@ import (
"github.com/prometheus/common/model"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
- "github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb"
+ writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
@@ -153,10 +152,10 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
})
case chunkenc.ValHistogram:
ts, h := iter.AtHistogram(nil)
- histograms = append(histograms, HistogramToHistogramProto(ts, h))
+ histograms = append(histograms, prompb.FromIntHistogram(ts, h))
case chunkenc.ValFloatHistogram:
ts, fh := iter.AtFloatHistogram(nil)
- histograms = append(histograms, FloatHistogramToHistogramProto(ts, fh))
+ histograms = append(histograms, prompb.FromFloatHistogram(ts, fh))
default:
return nil, ss.Warnings(), fmt.Errorf("unrecognized value type: %s", valType)
}
@@ -166,7 +165,7 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
}
resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{
- Labels: LabelsToLabelsProto(series.Labels(), nil),
+ Labels: prompb.FromLabels(series.Labels(), nil),
Samples: samples,
Histograms: histograms,
})
@@ -182,7 +181,7 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet
if err := validateLabelsAndMetricName(ts.Labels); err != nil {
return errSeriesSet{err: err}
}
- lbls := LabelProtosToLabels(&b, ts.Labels)
+ lbls := ts.ToLabels(&b, nil)
series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms})
}
@@ -235,7 +234,7 @@ func StreamChunkedReadResponses(
for ss.Next() {
series := ss.At()
iter = series.Iterator(iter)
- lbls = MergeLabels(LabelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels)
+ lbls = MergeLabels(prompb.FromLabels(series.Labels(), lbls), sortedExternalLabels)
maxDataLength := maxBytesInFrame
for _, lbl := range lbls {
@@ -481,21 +480,16 @@ func (c *concreteSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *hist
panic("iterator is not on an integer histogram sample")
}
h := c.series.histograms[c.histogramsCur]
- return h.Timestamp, HistogramProtoToHistogram(h)
+ return h.Timestamp, h.ToIntHistogram()
}
// AtFloatHistogram implements chunkenc.Iterator.
func (c *concreteSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
- switch c.curValType {
- case chunkenc.ValHistogram:
- fh := c.series.histograms[c.histogramsCur]
- return fh.Timestamp, HistogramProtoToFloatHistogram(fh)
- case chunkenc.ValFloatHistogram:
+ if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram {
fh := c.series.histograms[c.histogramsCur]
- return fh.Timestamp, FloatHistogramProtoToFloatHistogram(fh)
- default:
- panic("iterator is not on a histogram sample")
+ return fh.Timestamp, fh.ToFloatHistogram() // integer will be auto-converted.
}
+ panic("iterator is not on a histogram sample")
}
// AtT implements chunkenc.Iterator.
@@ -618,141 +612,6 @@ func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, erro
return result, nil
}
-func exemplarProtoToExemplar(b *labels.ScratchBuilder, ep prompb.Exemplar) exemplar.Exemplar {
- timestamp := ep.Timestamp
-
- return exemplar.Exemplar{
- Labels: LabelProtosToLabels(b, ep.Labels),
- Value: ep.Value,
- Ts: timestamp,
- HasTs: timestamp != 0,
- }
-}
-
-// HistogramProtoToHistogram extracts a (normal integer) Histogram from the
-// provided proto message. The caller has to make sure that the proto message
-// represents an integer histogram and not a float histogram, or it panics.
-func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram {
- if hp.IsFloatHistogram() {
- panic("HistogramProtoToHistogram called with a float histogram")
- }
- return &histogram.Histogram{
- CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
- Schema: hp.Schema,
- ZeroThreshold: hp.ZeroThreshold,
- ZeroCount: hp.GetZeroCountInt(),
- Count: hp.GetCountInt(),
- Sum: hp.Sum,
- PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
- PositiveBuckets: hp.GetPositiveDeltas(),
- NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
- NegativeBuckets: hp.GetNegativeDeltas(),
- }
-}
-
-// FloatHistogramProtoToFloatHistogram extracts a float Histogram from the
-// provided proto message to a Float Histogram. The caller has to make sure that
-// the proto message represents a float histogram and not an integer histogram,
-// or it panics.
-func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
- if !hp.IsFloatHistogram() {
- panic("FloatHistogramProtoToFloatHistogram called with an integer histogram")
- }
- return &histogram.FloatHistogram{
- CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
- Schema: hp.Schema,
- ZeroThreshold: hp.ZeroThreshold,
- ZeroCount: hp.GetZeroCountFloat(),
- Count: hp.GetCountFloat(),
- Sum: hp.Sum,
- PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
- PositiveBuckets: hp.GetPositiveCounts(),
- NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
- NegativeBuckets: hp.GetNegativeCounts(),
- }
-}
-
-// HistogramProtoToFloatHistogram extracts and converts a (normal integer) histogram from the provided proto message
-// to a float histogram. The caller has to make sure that the proto message represents an integer histogram and not a
-// float histogram, or it panics.
-func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
- if hp.IsFloatHistogram() {
- panic("HistogramProtoToFloatHistogram called with a float histogram")
- }
- return &histogram.FloatHistogram{
- CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
- Schema: hp.Schema,
- ZeroThreshold: hp.ZeroThreshold,
- ZeroCount: float64(hp.GetZeroCountInt()),
- Count: float64(hp.GetCountInt()),
- Sum: hp.Sum,
- PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
- PositiveBuckets: deltasToCounts(hp.GetPositiveDeltas()),
- NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
- NegativeBuckets: deltasToCounts(hp.GetNegativeDeltas()),
- }
-}
-
-func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span {
- spans := make([]histogram.Span, len(s))
- for i := 0; i < len(s); i++ {
- spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
- }
-
- return spans
-}
-
-func deltasToCounts(deltas []int64) []float64 {
- counts := make([]float64, len(deltas))
- var cur float64
- for i, d := range deltas {
- cur += float64(d)
- counts[i] = cur
- }
- return counts
-}
-
-func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.Histogram {
- return prompb.Histogram{
- Count: &prompb.Histogram_CountInt{CountInt: h.Count},
- Sum: h.Sum,
- Schema: h.Schema,
- ZeroThreshold: h.ZeroThreshold,
- ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
- NegativeSpans: spansToSpansProto(h.NegativeSpans),
- NegativeDeltas: h.NegativeBuckets,
- PositiveSpans: spansToSpansProto(h.PositiveSpans),
- PositiveDeltas: h.PositiveBuckets,
- ResetHint: prompb.Histogram_ResetHint(h.CounterResetHint),
- Timestamp: timestamp,
- }
-}
-
-func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) prompb.Histogram {
- return prompb.Histogram{
- Count: &prompb.Histogram_CountFloat{CountFloat: fh.Count},
- Sum: fh.Sum,
- Schema: fh.Schema,
- ZeroThreshold: fh.ZeroThreshold,
- ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
- NegativeSpans: spansToSpansProto(fh.NegativeSpans),
- NegativeCounts: fh.NegativeBuckets,
- PositiveSpans: spansToSpansProto(fh.PositiveSpans),
- PositiveCounts: fh.PositiveBuckets,
- ResetHint: prompb.Histogram_ResetHint(fh.CounterResetHint),
- Timestamp: timestamp,
- }
-}
-
-func spansToSpansProto(s []histogram.Span) []prompb.BucketSpan {
- spans := make([]prompb.BucketSpan, len(s))
- for i := 0; i < len(s); i++ {
- spans[i] = prompb.BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
- }
-
- return spans
-}
-
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric.
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
metric := make(model.Metric, len(labelPairs))
@@ -762,44 +621,32 @@ func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
return metric
}
-// LabelProtosToLabels transforms prompb labels into labels. The labels builder
-// will be used to build the returned labels.
-func LabelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) labels.Labels {
- b.Reset()
- for _, l := range labelPairs {
- b.Add(l.Name, l.Value)
+// DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling
+// snappy decompression.
+// Used also by documentation/examples/remote_storage.
+func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
+ compressed, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
}
- b.Sort()
- return b.Labels()
-}
-// LabelsToLabelsProto transforms labels into prompb labels. The buffer slice
-// will be used to avoid allocations if it is big enough to store the labels.
-func LabelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label {
- result := buf[:0]
- lbls.Range(func(l labels.Label) {
- result = append(result, prompb.Label{
- Name: l.Name,
- Value: l.Value,
- })
- })
- return result
-}
+ reqBuf, err := snappy.Decode(nil, compressed)
+ if err != nil {
+ return nil, err
+ }
-// metricTypeToMetricTypeProto transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum.
-func metricTypeToMetricTypeProto(t model.MetricType) prompb.MetricMetadata_MetricType {
- mt := strings.ToUpper(string(t))
- v, ok := prompb.MetricMetadata_MetricType_value[mt]
- if !ok {
- return prompb.MetricMetadata_UNKNOWN
+ var req prompb.WriteRequest
+ if err := proto.Unmarshal(reqBuf, &req); err != nil {
+ return nil, err
}
- return prompb.MetricMetadata_MetricType(v)
+ return &req, nil
}
-// DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling
+// DecodeWriteV2Request from an io.Reader into a writev2.Request, handling
// snappy decompression.
-func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
+// Used also by documentation/examples/remote_storage.
+func DecodeWriteV2Request(r io.Reader) (*writev2.Request, error) {
compressed, err := io.ReadAll(r)
if err != nil {
return nil, err
@@ -810,7 +657,7 @@ func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
return nil, err
}
- var req prompb.WriteRequest
+ var req writev2.Request
if err := proto.Unmarshal(reqBuf, &req); err != nil {
return nil, err
}
diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go
index c3a4cbc6d..15f8fe132 100644
--- a/storage/remote/codec_test.go
+++ b/storage/remote/codec_test.go
@@ -19,13 +19,16 @@ import (
"sync"
"testing"
+ "github.com/go-kit/log"
"github.com/gogo/protobuf/proto"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/prompb"
+ writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
@@ -57,7 +60,7 @@ var writeRequestFixture = &prompb.WriteRequest{
},
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
- Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))},
+ Histograms: []prompb.Histogram{prompb.FromIntHistogram(0, &testHistogram), prompb.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
},
{
Labels: []prompb.Label{
@@ -69,11 +72,59 @@ var writeRequestFixture = &prompb.WriteRequest{
},
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
- Histograms: []prompb.Histogram{HistogramToHistogramProto(2, &testHistogram), FloatHistogramToHistogramProto(3, testHistogram.ToFloat(nil))},
+ Histograms: []prompb.Histogram{prompb.FromIntHistogram(2, &testHistogram), prompb.FromFloatHistogram(3, testHistogram.ToFloat(nil))},
},
},
}
+var (
+ writeV2RequestSeries1Metadata = metadata.Metadata{
+ Type: model.MetricTypeGauge,
+ Help: "Test gauge for test purposes",
+ Unit: "Maybe op/sec who knows (:",
+ }
+ writeV2RequestSeries2Metadata = metadata.Metadata{
+ Type: model.MetricTypeCounter,
+ Help: "Test counter for test purposes",
+ }
+
+ // writeV2RequestFixture represents the same request as writeRequestFixture, but using the v2 representation.
+ writeV2RequestFixture = func() *writev2.Request {
+ st := writev2.NewSymbolTable()
+ b := labels.NewScratchBuilder(0)
+ labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil)
+ exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
+ exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
+ return &writev2.Request{
+ Timeseries: []writev2.TimeSeries{
+ {
+ LabelsRefs: labelRefs,
+ Metadata: writev2.Metadata{
+ Type: writev2.Metadata_METRIC_TYPE_GAUGE, // Same as writeV2RequestSeries1Metadata.Type, but in writev2.
+ HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help),
+ UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit),
+ },
+ Samples: []writev2.Sample{{Value: 1, Timestamp: 0}},
+ Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 0}},
+ Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
+ },
+ {
+ LabelsRefs: labelRefs,
+ Metadata: writev2.Metadata{
+ Type: writev2.Metadata_METRIC_TYPE_COUNTER, // Same as writeV2RequestSeries2Metadata.Type, but in writev2.
+ HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help),
+ // No unit.
+ },
+ Samples: []writev2.Sample{{Value: 2, Timestamp: 1}},
+ Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 1}},
+ Histograms: []writev2.Histogram{writev2.FromIntHistogram(2, &testHistogram), writev2.FromFloatHistogram(3, testHistogram.ToFloat(nil))},
+ },
+ },
+ Symbols: st.Symbols(),
+ }
+ }()
+)
+
func TestValidateLabelsAndMetricName(t *testing.T) {
tests := []struct {
input []prompb.Label
@@ -268,7 +319,7 @@ func TestConcreteSeriesIterator_HistogramSamples(t *testing.T) {
} else {
ts = int64(i)
}
- histProtos[i] = HistogramToHistogramProto(ts, h)
+ histProtos[i] = prompb.FromIntHistogram(ts, h)
}
series := &concreteSeries{
labels: labels.FromStrings("foo", "bar"),
@@ -319,9 +370,9 @@ func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) {
histProtos := make([]prompb.Histogram, len(histograms))
for i, h := range histograms {
if i < 10 {
- histProtos[i] = HistogramToHistogramProto(int64(i+1), h)
+ histProtos[i] = prompb.FromIntHistogram(int64(i+1), h)
} else {
- histProtos[i] = HistogramToHistogramProto(int64(i+6), h)
+ histProtos[i] = prompb.FromIntHistogram(int64(i+6), h)
}
}
series := &concreteSeries{
@@ -401,7 +452,7 @@ func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) {
require.Equal(t, chunkenc.ValHistogram, it.Next())
ts, fh = it.AtFloatHistogram(nil)
require.Equal(t, int64(17), ts)
- expected := HistogramProtoToFloatHistogram(HistogramToHistogramProto(int64(17), histograms[11]))
+ expected := prompb.FromIntHistogram(int64(17), histograms[11]).ToFloatHistogram()
require.Equal(t, expected, fh)
// Keep calling Next() until the end.
@@ -485,39 +536,8 @@ func TestMergeLabels(t *testing.T) {
}
}
-func TestMetricTypeToMetricTypeProto(t *testing.T) {
- tc := []struct {
- desc string
- input model.MetricType
- expected prompb.MetricMetadata_MetricType
- }{
- {
- desc: "with a single-word metric",
- input: model.MetricTypeCounter,
- expected: prompb.MetricMetadata_COUNTER,
- },
- {
- desc: "with a two-word metric",
- input: model.MetricTypeStateset,
- expected: prompb.MetricMetadata_STATESET,
- },
- {
- desc: "with an unknown metric",
- input: "not-known",
- expected: prompb.MetricMetadata_UNKNOWN,
- },
- }
-
- for _, tt := range tc {
- t.Run(tt.desc, func(t *testing.T) {
- m := metricTypeToMetricTypeProto(tt.input)
- require.Equal(t, tt.expected, m)
- })
- }
-}
-
func TestDecodeWriteRequest(t *testing.T) {
- buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil)
+ buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
require.NoError(t, err)
actual, err := DecodeWriteRequest(bytes.NewReader(buf))
@@ -525,212 +545,18 @@ func TestDecodeWriteRequest(t *testing.T) {
require.Equal(t, writeRequestFixture, actual)
}
-func TestNilHistogramProto(*testing.T) {
- // This function will panic if it impromperly handles nil
- // values, causing the test to fail.
- HistogramProtoToHistogram(prompb.Histogram{})
- HistogramProtoToFloatHistogram(prompb.Histogram{})
-}
-
-func exampleHistogram() histogram.Histogram {
- return histogram.Histogram{
- CounterResetHint: histogram.GaugeType,
- Schema: 0,
- Count: 19,
- Sum: 2.7,
- PositiveSpans: []histogram.Span{
- {Offset: 0, Length: 4},
- {Offset: 0, Length: 0},
- {Offset: 0, Length: 3},
- },
- PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
- NegativeSpans: []histogram.Span{
- {Offset: 0, Length: 5},
- {Offset: 1, Length: 0},
- {Offset: 0, Length: 1},
- },
- NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
- }
-}
-
-func exampleHistogramProto() prompb.Histogram {
- return prompb.Histogram{
- Count: &prompb.Histogram_CountInt{CountInt: 19},
- Sum: 2.7,
- Schema: 0,
- ZeroThreshold: 0,
- ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0},
- NegativeSpans: []prompb.BucketSpan{
- {
- Offset: 0,
- Length: 5,
- },
- {
- Offset: 1,
- Length: 0,
- },
- {
- Offset: 0,
- Length: 1,
- },
- },
- NegativeDeltas: []int64{1, 2, -2, 1, -1, 0},
- PositiveSpans: []prompb.BucketSpan{
- {
- Offset: 0,
- Length: 4,
- },
- {
- Offset: 0,
- Length: 0,
- },
- {
- Offset: 0,
- Length: 3,
- },
- },
- PositiveDeltas: []int64{1, 2, -2, 1, -1, 0, 0},
- ResetHint: prompb.Histogram_GAUGE,
- Timestamp: 1337,
- }
-}
-
-func TestHistogramToProtoConvert(t *testing.T) {
- tests := []struct {
- input histogram.CounterResetHint
- expected prompb.Histogram_ResetHint
- }{
- {
- input: histogram.UnknownCounterReset,
- expected: prompb.Histogram_UNKNOWN,
- },
- {
- input: histogram.CounterReset,
- expected: prompb.Histogram_YES,
- },
- {
- input: histogram.NotCounterReset,
- expected: prompb.Histogram_NO,
- },
- {
- input: histogram.GaugeType,
- expected: prompb.Histogram_GAUGE,
- },
- }
-
- for _, test := range tests {
- h := exampleHistogram()
- h.CounterResetHint = test.input
- p := exampleHistogramProto()
- p.ResetHint = test.expected
-
- require.Equal(t, p, HistogramToHistogramProto(1337, &h))
-
- require.Equal(t, h, *HistogramProtoToHistogram(p))
- }
-}
-
-func exampleFloatHistogram() histogram.FloatHistogram {
- return histogram.FloatHistogram{
- CounterResetHint: histogram.GaugeType,
- Schema: 0,
- Count: 19,
- Sum: 2.7,
- PositiveSpans: []histogram.Span{
- {Offset: 0, Length: 4},
- {Offset: 0, Length: 0},
- {Offset: 0, Length: 3},
- },
- PositiveBuckets: []float64{1, 2, -2, 1, -1, 0, 0},
- NegativeSpans: []histogram.Span{
- {Offset: 0, Length: 5},
- {Offset: 1, Length: 0},
- {Offset: 0, Length: 1},
- },
- NegativeBuckets: []float64{1, 2, -2, 1, -1, 0},
- }
-}
-
-func exampleFloatHistogramProto() prompb.Histogram {
- return prompb.Histogram{
- Count: &prompb.Histogram_CountFloat{CountFloat: 19},
- Sum: 2.7,
- Schema: 0,
- ZeroThreshold: 0,
- ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: 0},
- NegativeSpans: []prompb.BucketSpan{
- {
- Offset: 0,
- Length: 5,
- },
- {
- Offset: 1,
- Length: 0,
- },
- {
- Offset: 0,
- Length: 1,
- },
- },
- NegativeCounts: []float64{1, 2, -2, 1, -1, 0},
- PositiveSpans: []prompb.BucketSpan{
- {
- Offset: 0,
- Length: 4,
- },
- {
- Offset: 0,
- Length: 0,
- },
- {
- Offset: 0,
- Length: 3,
- },
- },
- PositiveCounts: []float64{1, 2, -2, 1, -1, 0, 0},
- ResetHint: prompb.Histogram_GAUGE,
- Timestamp: 1337,
- }
-}
-
-func TestFloatHistogramToProtoConvert(t *testing.T) {
- tests := []struct {
- input histogram.CounterResetHint
- expected prompb.Histogram_ResetHint
- }{
- {
- input: histogram.UnknownCounterReset,
- expected: prompb.Histogram_UNKNOWN,
- },
- {
- input: histogram.CounterReset,
- expected: prompb.Histogram_YES,
- },
- {
- input: histogram.NotCounterReset,
- expected: prompb.Histogram_NO,
- },
- {
- input: histogram.GaugeType,
- expected: prompb.Histogram_GAUGE,
- },
- }
-
- for _, test := range tests {
- h := exampleFloatHistogram()
- h.CounterResetHint = test.input
- p := exampleFloatHistogramProto()
- p.ResetHint = test.expected
-
- require.Equal(t, p, FloatHistogramToHistogramProto(1337, &h))
+func TestDecodeWriteV2Request(t *testing.T) {
+ buf, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
+ require.NoError(t, err)
- require.Equal(t, h, *FloatHistogramProtoToFloatHistogram(p))
- }
+ actual, err := DecodeWriteV2Request(bytes.NewReader(buf))
+ require.NoError(t, err)
+ require.Equal(t, writeV2RequestFixture, actual)
}
func TestStreamResponse(t *testing.T) {
- lbs1 := LabelsToLabelsProto(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil)
- lbs2 := LabelsToLabelsProto(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil)
+ lbs1 := prompb.FromLabels(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil)
+ lbs2 := prompb.FromLabels(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil)
chunk := prompb.Chunk{
Type: prompb.Chunk_XOR,
Data: make([]byte, 100),
@@ -802,7 +628,7 @@ func (c *mockChunkSeriesSet) Next() bool {
func (c *mockChunkSeriesSet) At() storage.ChunkSeries {
return &storage.ChunkSeriesEntry{
- Lset: LabelProtosToLabels(&c.builder, c.chunkedSeries[c.index].Labels),
+ Lset: c.chunkedSeries[c.index].ToLabels(&c.builder, nil),
ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
return &mockChunkIterator{
chunks: c.chunkedSeries[c.index].Chunks,
diff --git a/storage/remote/metadata_watcher.go b/storage/remote/metadata_watcher.go
index abfea3c7b..fdcd668f5 100644
--- a/storage/remote/metadata_watcher.go
+++ b/storage/remote/metadata_watcher.go
@@ -27,7 +27,7 @@ import (
// MetadataAppender is an interface used by the Metadata Watcher to send metadata, It is read from the scrape manager, on to somewhere else.
type MetadataAppender interface {
- AppendMetadata(context.Context, []scrape.MetricMetadata)
+ AppendWatcherMetadata(context.Context, []scrape.MetricMetadata)
}
// Watchable represents from where we fetch active targets for metadata.
@@ -146,7 +146,7 @@ func (mw *MetadataWatcher) collect() {
}
// Blocks until the metadata is sent to the remote write endpoint or hardShutdownContext is expired.
- mw.writer.AppendMetadata(mw.hardShutdownCtx, metadata)
+ mw.writer.AppendWatcherMetadata(mw.hardShutdownCtx, metadata)
}
func (mw *MetadataWatcher) ready() bool {
diff --git a/storage/remote/metadata_watcher_test.go b/storage/remote/metadata_watcher_test.go
index 0cd6027a8..ce9b9d022 100644
--- a/storage/remote/metadata_watcher_test.go
+++ b/storage/remote/metadata_watcher_test.go
@@ -57,7 +57,7 @@ type writeMetadataToMock struct {
metadataAppended int
}
-func (mwtm *writeMetadataToMock) AppendMetadata(_ context.Context, m []scrape.MetricMetadata) {
+func (mwtm *writeMetadataToMock) AppendWatcherMetadata(_ context.Context, m []scrape.MetricMetadata) {
mwtm.metadataAppended += len(m)
}
diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go
index 4cf36671a..71bba40e4 100644
--- a/storage/remote/otlptranslator/prometheus/normalize_name.go
+++ b/storage/remote/otlptranslator/prometheus/normalize_name.go
@@ -29,7 +29,6 @@ import (
// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units
// OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units
var unitMap = map[string]string{
-
// Time
"d": "days",
"h": "hours",
@@ -111,7 +110,6 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix
// Build a normalized name for the specified metric
func normalizeName(metric pmetric.Metric, namespace string) string {
-
// Split metric name in "tokens" (remove all non-alphanumeric)
nameTokens := strings.FieldsFunc(
metric.Name(),
diff --git a/storage/remote/otlptranslator/prometheus/unit_to_ucum.go b/storage/remote/otlptranslator/prometheus/unit_to_ucum.go
index 1f8bf1a63..39a42734d 100644
--- a/storage/remote/otlptranslator/prometheus/unit_to_ucum.go
+++ b/storage/remote/otlptranslator/prometheus/unit_to_ucum.go
@@ -19,7 +19,6 @@ package prometheus
import "strings"
var wordToUCUM = map[string]string{
-
// Time
"days": "d",
"hours": "h",
diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go
index dde78d35e..fb13da70d 100644
--- a/storage/remote/queue_manager.go
+++ b/storage/remote/queue_manager.go
@@ -36,9 +36,11 @@ import (
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/prompb"
+ writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
@@ -389,7 +391,7 @@ func (m *queueManagerMetrics) unregister() {
// external timeseries database.
type WriteClient interface {
// Store stores the given samples in the remote storage.
- Store(context.Context, []byte, int) error
+ Store(ctx context.Context, req []byte, retryAttempt int) error
// Name uniquely identifies the remote storage.
Name() string
// Endpoint is the remote read or write endpoint for the storage client.
@@ -418,11 +420,14 @@ type QueueManager struct {
clientMtx sync.RWMutex
storeClient WriteClient
+ protoMsg config.RemoteWriteProtoMsg
+ enc Compression
- seriesMtx sync.Mutex // Covers seriesLabels, droppedSeries and builder.
- seriesLabels map[chunks.HeadSeriesRef]labels.Labels
- droppedSeries map[chunks.HeadSeriesRef]struct{}
- builder *labels.Builder
+ seriesMtx sync.Mutex // Covers seriesLabels, seriesMetadata, droppedSeries and builder.
+ seriesLabels map[chunks.HeadSeriesRef]labels.Labels
+ seriesMetadata map[chunks.HeadSeriesRef]*metadata.Metadata
+ droppedSeries map[chunks.HeadSeriesRef]struct{}
+ builder *labels.Builder
seriesSegmentMtx sync.Mutex // Covers seriesSegmentIndexes - if you also lock seriesMtx, take seriesMtx first.
seriesSegmentIndexes map[chunks.HeadSeriesRef]int
@@ -463,6 +468,7 @@ func NewQueueManager(
sm ReadyScrapeManager,
enableExemplarRemoteWrite bool,
enableNativeHistogramRemoteWrite bool,
+ protoMsg config.RemoteWriteProtoMsg,
) *QueueManager {
if logger == nil {
logger = log.NewNopLogger()
@@ -487,6 +493,7 @@ func NewQueueManager(
sendNativeHistograms: enableNativeHistogramRemoteWrite,
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
+ seriesMetadata: make(map[chunks.HeadSeriesRef]*metadata.Metadata),
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
droppedSeries: make(map[chunks.HeadSeriesRef]struct{}),
builder: labels.NewBuilder(labels.EmptyLabels()),
@@ -503,9 +510,26 @@ func NewQueueManager(
metrics: metrics,
interner: interner,
highestRecvTimestamp: highestRecvTimestamp,
+
+ protoMsg: protoMsg,
+ enc: SnappyBlockCompression, // Hardcoded for now, but scaffolding exists for likely future use.
+ }
+
+ walMetadata := false
+ if t.protoMsg != config.RemoteWriteProtoMsgV1 {
+ walMetadata = true
+ }
+ t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite, walMetadata)
+
+ // The current MetadataWatcher implementation is mutually exclusive
+ // with the new approach, which stores metadata as WAL records and
+ // ships them alongside series. If both mechanisms are set, the new one
+ // takes precedence by implicitly disabling the older one.
+ if t.mcfg.Send && t.protoMsg != config.RemoteWriteProtoMsgV1 {
+ level.Warn(logger).Log("msg", "usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request")
+ t.mcfg.Send = false
}
- t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite)
if t.mcfg.Send {
t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline)
}
@@ -514,14 +538,21 @@ func NewQueueManager(
return t
}
-// AppendMetadata sends metadata to the remote storage. Metadata is sent in batches, but is not parallelized.
-func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.MetricMetadata) {
+// AppendWatcherMetadata sends metadata to the remote storage. Metadata is sent in batches, but is not parallelized.
+// This is only used for the metadata_config.send setting and 1.x Remote Write.
+func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scrape.MetricMetadata) {
+ // no op for any newer proto format, which will cache metadata sent to it from the WAL watcher.
+ if t.protoMsg != config.RemoteWriteProtoMsgV1 {
+ return
+ }
+
+ // 1.X will still get metadata in batches.
mm := make([]prompb.MetricMetadata, 0, len(metadata))
for _, entry := range metadata {
mm = append(mm, prompb.MetricMetadata{
MetricFamilyName: entry.Metric,
Help: entry.Help,
- Type: metricTypeToMetricTypeProto(entry.Type),
+ Type: prompb.FromMetadataType(entry.Type),
Unit: entry.Unit,
})
}
@@ -542,8 +573,8 @@ func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.Met
}
func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata, pBuf *proto.Buffer) error {
- // Build the WriteRequest with no samples.
- req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil)
+ // Build the WriteRequest with no samples (v1 flow).
+ req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil, t.enc)
if err != nil {
return err
}
@@ -629,6 +660,36 @@ func isTimeSeriesOldFilter(metrics *queueManagerMetrics, baseTime time.Time, sam
}
}
+func isV2TimeSeriesOldFilter(metrics *queueManagerMetrics, baseTime time.Time, sampleAgeLimit time.Duration) func(ts writev2.TimeSeries) bool {
+ return func(ts writev2.TimeSeries) bool {
+ if sampleAgeLimit == 0 {
+ // If sampleAgeLimit is unset, then we never skip samples due to their age.
+ return false
+ }
+ switch {
+ // Only the first element should be set in the series, therefore we only check the first element.
+ case len(ts.Samples) > 0:
+ if isSampleOld(baseTime, sampleAgeLimit, ts.Samples[0].Timestamp) {
+ metrics.droppedSamplesTotal.WithLabelValues(reasonTooOld).Inc()
+ return true
+ }
+ case len(ts.Histograms) > 0:
+ if isSampleOld(baseTime, sampleAgeLimit, ts.Histograms[0].Timestamp) {
+ metrics.droppedHistogramsTotal.WithLabelValues(reasonTooOld).Inc()
+ return true
+ }
+ case len(ts.Exemplars) > 0:
+ if isSampleOld(baseTime, sampleAgeLimit, ts.Exemplars[0].Timestamp) {
+ metrics.droppedExemplarsTotal.WithLabelValues(reasonTooOld).Inc()
+ return true
+ }
+ default:
+ return false
+ }
+ return false
+ }
+}
+
// Append queues a sample to be sent to the remote storage. Blocks until all samples are
// enqueued on their shards or a shutdown signal is received.
func (t *QueueManager) Append(samples []record.RefSample) bool {
@@ -652,6 +713,9 @@ outer:
t.seriesMtx.Unlock()
continue
}
+ // TODO(cstyan): Handle or at least log an error if no metadata is found.
+ // See https://github.com/prometheus/prometheus/issues/14405
+ meta := t.seriesMetadata[s.Ref]
t.seriesMtx.Unlock()
// Start with a very small backoff. This should not be t.cfg.MinBackoff
// as it can happen without errors, and we want to pickup work after
@@ -666,6 +730,7 @@ outer:
}
if t.shards.enqueue(s.Ref, timeSeries{
seriesLabels: lbls,
+ metadata: meta,
timestamp: s.T,
value: s.V,
sType: tSample,
@@ -711,6 +776,7 @@ outer:
t.seriesMtx.Unlock()
continue
}
+ meta := t.seriesMetadata[e.Ref]
t.seriesMtx.Unlock()
// This will only loop if the queues are being resharded.
backoff := t.cfg.MinBackoff
@@ -722,6 +788,7 @@ outer:
}
if t.shards.enqueue(e.Ref, timeSeries{
seriesLabels: lbls,
+ metadata: meta,
timestamp: e.T,
value: e.V,
exemplarLabels: e.Labels,
@@ -765,6 +832,7 @@ outer:
t.seriesMtx.Unlock()
continue
}
+ meta := t.seriesMetadata[h.Ref]
t.seriesMtx.Unlock()
backoff := model.Duration(5 * time.Millisecond)
@@ -776,6 +844,7 @@ outer:
}
if t.shards.enqueue(h.Ref, timeSeries{
seriesLabels: lbls,
+ metadata: meta,
timestamp: h.T,
histogram: h.H,
sType: tHistogram,
@@ -818,6 +887,7 @@ outer:
t.seriesMtx.Unlock()
continue
}
+ meta := t.seriesMetadata[h.Ref]
t.seriesMtx.Unlock()
backoff := model.Duration(5 * time.Millisecond)
@@ -829,6 +899,7 @@ outer:
}
if t.shards.enqueue(h.Ref, timeSeries{
seriesLabels: lbls,
+ metadata: meta,
timestamp: h.T,
floatHistogram: h.FH,
sType: tFloatHistogram,
@@ -925,6 +996,23 @@ func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) {
}
}
+// StoreMetadata keeps track of known series' metadata for lookups when sending samples to remote.
+func (t *QueueManager) StoreMetadata(meta []record.RefMetadata) {
+ if t.protoMsg == config.RemoteWriteProtoMsgV1 {
+ return
+ }
+
+ t.seriesMtx.Lock()
+ defer t.seriesMtx.Unlock()
+ for _, m := range meta {
+ t.seriesMetadata[m.Ref] = &metadata.Metadata{
+ Type: record.ToMetricType(m.Type),
+ Unit: m.Unit,
+ Help: m.Help,
+ }
+ }
+}
+
// UpdateSeriesSegment updates the segment number held against the series,
// so we can trim older ones in SeriesReset.
func (t *QueueManager) UpdateSeriesSegment(series []record.RefSeries, index int) {
@@ -950,6 +1038,7 @@ func (t *QueueManager) SeriesReset(index int) {
delete(t.seriesSegmentIndexes, k)
t.releaseLabels(t.seriesLabels[k])
delete(t.seriesLabels, k)
+ delete(t.seriesMetadata, k)
delete(t.droppedSeries, k)
}
}
@@ -1165,6 +1254,7 @@ type shards struct {
samplesDroppedOnHardShutdown atomic.Uint32
exemplarsDroppedOnHardShutdown atomic.Uint32
histogramsDroppedOnHardShutdown atomic.Uint32
+ metadataDroppedOnHardShutdown atomic.Uint32
}
// start the shards; must be called before any call to enqueue.
@@ -1193,6 +1283,7 @@ func (s *shards) start(n int) {
s.samplesDroppedOnHardShutdown.Store(0)
s.exemplarsDroppedOnHardShutdown.Store(0)
s.histogramsDroppedOnHardShutdown.Store(0)
+ s.metadataDroppedOnHardShutdown.Store(0)
for i := 0; i < n; i++ {
go s.runShard(hardShutdownCtx, i, newQueues[i])
}
@@ -1245,7 +1336,6 @@ func (s *shards) stop() {
func (s *shards) enqueue(ref chunks.HeadSeriesRef, data timeSeries) bool {
s.mtx.RLock()
defer s.mtx.RUnlock()
-
shard := uint64(ref) % uint64(len(s.queues))
select {
case <-s.softShutdown:
@@ -1288,6 +1378,7 @@ type timeSeries struct {
value float64
histogram *histogram.Histogram
floatHistogram *histogram.FloatHistogram
+ metadata *metadata.Metadata
timestamp int64
exemplarLabels labels.Labels
// The type of series: sample, exemplar, or histogram.
@@ -1301,6 +1392,7 @@ const (
tExemplar
tHistogram
tFloatHistogram
+ tMetadata
)
func newQueue(batchSize, capacity int) *queue {
@@ -1324,6 +1416,10 @@ func newQueue(batchSize, capacity int) *queue {
func (q *queue) Append(datum timeSeries) bool {
q.batchMtx.Lock()
defer q.batchMtx.Unlock()
+ // TODO(cstyan): Check if metadata now means we've reduced the total # of samples
+ // we can batch together here, and if so find a way to not include metadata
+ // in the batch size calculation.
+ // See https://github.com/prometheus/prometheus/issues/14405
q.batch = append(q.batch, datum)
if len(q.batch) == cap(q.batch) {
select {
@@ -1347,7 +1443,6 @@ func (q *queue) Chan() <-chan []timeSeries {
func (q *queue) Batch() []timeSeries {
q.batchMtx.Lock()
defer q.batchMtx.Unlock()
-
select {
case batch := <-q.batchQueue:
return batch
@@ -1419,19 +1514,23 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
}()
shardNum := strconv.Itoa(shardID)
+ symbolTable := writev2.NewSymbolTable()
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
// If we have fewer samples than that, flush them out after a deadline anyways.
var (
max = s.qm.cfg.MaxSamplesPerSend
- pBuf = proto.NewBuffer(nil)
- buf []byte
+ pBuf = proto.NewBuffer(nil)
+ pBufRaw []byte
+ buf []byte
)
+ // TODO(@tpaschalis) Should we also raise the max if we have WAL metadata?
if s.qm.sendExemplars {
max += int(float64(max) * 0.1)
}
+ // TODO: Dry all of this, we should make an interface/generic for the timeseries type.
batchQueue := queue.Chan()
pendingData := make([]prompb.TimeSeries, max)
for i := range pendingData {
@@ -1440,6 +1539,10 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
pendingData[i].Exemplars = []prompb.Exemplar{{}}
}
}
+ pendingDataV2 := make([]writev2.TimeSeries, max)
+ for i := range pendingDataV2 {
+ pendingDataV2[i].Samples = []writev2.Sample{{}}
+ }
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
stop := func() {
@@ -1452,6 +1555,24 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
}
defer stop()
+ sendBatch := func(batch []timeSeries, protoMsg config.RemoteWriteProtoMsg, enc Compression, timer bool) {
+ switch protoMsg {
+ case config.RemoteWriteProtoMsgV1:
+ nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
+ n := nPendingSamples + nPendingExemplars + nPendingHistograms
+ if timer {
+ level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
+ "exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
+ }
+ _ = s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf, enc)
+ case config.RemoteWriteProtoMsgV2:
+ nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata := populateV2TimeSeries(&symbolTable, batch, pendingDataV2, s.qm.sendExemplars, s.qm.sendNativeHistograms)
+ n := nPendingSamples + nPendingExemplars + nPendingHistograms
+ _ = s.sendV2Samples(ctx, pendingDataV2[:n], symbolTable.Symbols(), nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata, &pBufRaw, &buf, enc)
+ symbolTable.Reset()
+ }
+ }
+
for {
select {
case <-ctx.Done():
@@ -1475,10 +1596,11 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
if !ok {
return
}
- nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
+
+ sendBatch(batch, s.qm.protoMsg, s.qm.enc, false)
+ // TODO(bwplotka): Previously the return was between popular and send.
+ // Consider this when DRY-ing https://github.com/prometheus/prometheus/issues/14409
queue.ReturnForReuse(batch)
- n := nPendingSamples + nPendingExemplars + nPendingHistograms
- s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
stop()
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
@@ -1486,11 +1608,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
case <-timer.C:
batch := queue.Batch()
if len(batch) > 0 {
- nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
- n := nPendingSamples + nPendingExemplars + nPendingHistograms
- level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
- "exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
- s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
+ sendBatch(batch, s.qm.protoMsg, s.qm.enc, true)
}
queue.ReturnForReuse(batch)
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
@@ -1498,21 +1616,22 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
}
}
-func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries) (int, int, int) {
+func populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int) {
var nPendingSamples, nPendingExemplars, nPendingHistograms int
for nPending, d := range batch {
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
- if s.qm.sendExemplars {
+ if sendExemplars {
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
}
- if s.qm.sendNativeHistograms {
+ if sendNativeHistograms {
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
}
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
- pendingData[nPending].Labels = LabelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
+ pendingData[nPending].Labels = prompb.FromLabels(d.seriesLabels, pendingData[nPending].Labels)
+
switch d.sType {
case tSample:
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
@@ -1522,25 +1641,39 @@ func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.Tim
nPendingSamples++
case tExemplar:
pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.Exemplar{
- Labels: LabelsToLabelsProto(d.exemplarLabels, nil),
+ Labels: prompb.FromLabels(d.exemplarLabels, nil),
Value: d.value,
Timestamp: d.timestamp,
})
nPendingExemplars++
case tHistogram:
- pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
+ pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, prompb.FromIntHistogram(d.timestamp, d.histogram))
nPendingHistograms++
case tFloatHistogram:
- pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
+ pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, prompb.FromFloatHistogram(d.timestamp, d.floatHistogram))
nPendingHistograms++
}
}
return nPendingSamples, nPendingExemplars, nPendingHistograms
}
-func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) {
+func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error {
+ begin := time.Now()
+ err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, enc)
+ s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, time.Since(begin))
+ return err
+}
+
+// TODO(bwplotka): DRY this (have one logic for both v1 and v2).
+// See https://github.com/prometheus/prometheus/issues/14409
+func (s *shards) sendV2Samples(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error {
begin := time.Now()
- err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, pBuf, buf)
+ err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, enc)
+ s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, time.Since(begin))
+ return err
+}
+
+func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exemplarCount, histogramCount, metadataCount int, duration time.Duration) {
if err != nil {
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "histogramCount", histogramCount, "err", err)
s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount))
@@ -1550,8 +1683,8 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
// These counters are used to calculate the dynamic sharding, and as such
// should be maintained irrespective of success or failure.
- s.qm.dataOut.incr(int64(len(samples)))
- s.qm.dataOutDuration.incr(int64(time.Since(begin)))
+ s.qm.dataOut.incr(int64(sampleCount + exemplarCount + histogramCount + metadataCount))
+ s.qm.dataOutDuration.incr(int64(duration))
s.qm.lastSendTimestamp.Store(time.Now().Unix())
// Pending samples/exemplars/histograms also should be subtracted, as an error means
// they will not be retried.
@@ -1564,9 +1697,9 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
}
// sendSamples to the remote storage with backoff for recoverable errors.
-func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) error {
+func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error {
// Build the WriteRequest with no metadata.
- req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, *buf, nil)
+ req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil, enc)
s.qm.buildRequestLimitTimestamp.Store(lowest)
if err != nil {
// Failing to build the write request is non-recoverable, since it will
@@ -1590,8 +1723,9 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
samples,
nil,
pBuf,
- *buf,
+ buf,
isTimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)),
+ enc,
)
s.qm.buildRequestLimitTimestamp.Store(lowest)
if err != nil {
@@ -1622,6 +1756,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
s.qm.metrics.samplesTotal.Add(float64(sampleCount))
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
+ s.qm.metrics.metadataTotal.Add(float64(metadataCount))
err := s.qm.client().Store(ctx, *buf, try)
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
@@ -1652,6 +1787,148 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
return err
}
+// sendV2Samples to the remote storage with backoff for recoverable errors.
+func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error {
+ // Build the WriteRequest with no metadata.
+ req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil, enc)
+ s.qm.buildRequestLimitTimestamp.Store(lowest)
+ if err != nil {
+ // Failing to build the write request is non-recoverable, since it will
+ // only error if marshaling the proto to bytes fails.
+ return err
+ }
+
+ reqSize := len(req)
+ *buf = req
+
+ // An anonymous function allows us to defer the completion of our per-try spans
+ // without causing a memory leak, and it has the nice effect of not propagating any
+ // parameters for sendSamplesWithBackoff/3.
+ attemptStore := func(try int) error {
+ currentTime := time.Now()
+ lowest := s.qm.buildRequestLimitTimestamp.Load()
+ if isSampleOld(currentTime, time.Duration(s.qm.cfg.SampleAgeLimit), lowest) {
+ // This will filter out old samples during retries.
+ req, _, lowest, err := buildV2WriteRequest(
+ s.qm.logger,
+ samples,
+ labels,
+ pBuf,
+ buf,
+ isV2TimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)),
+ enc,
+ )
+ s.qm.buildRequestLimitTimestamp.Store(lowest)
+ if err != nil {
+ return err
+ }
+ *buf = req
+ }
+
+ ctx, span := otel.Tracer("").Start(ctx, "Remote Send Batch")
+ defer span.End()
+
+ span.SetAttributes(
+ attribute.Int("request_size", reqSize),
+ attribute.Int("samples", sampleCount),
+ attribute.Int("try", try),
+ attribute.String("remote_name", s.qm.storeClient.Name()),
+ attribute.String("remote_url", s.qm.storeClient.Endpoint()),
+ )
+
+ if exemplarCount > 0 {
+ span.SetAttributes(attribute.Int("exemplars", exemplarCount))
+ }
+ if histogramCount > 0 {
+ span.SetAttributes(attribute.Int("histograms", histogramCount))
+ }
+
+ begin := time.Now()
+ s.qm.metrics.samplesTotal.Add(float64(sampleCount))
+ s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
+ s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
+ s.qm.metrics.metadataTotal.Add(float64(metadataCount))
+ err := s.qm.client().Store(ctx, *buf, try)
+ s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
+
+ if err != nil {
+ span.RecordError(err)
+ return err
+ }
+
+ return nil
+ }
+
+ onRetry := func() {
+ s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount))
+ s.qm.metrics.retriedExemplarsTotal.Add(float64(exemplarCount))
+ s.qm.metrics.retriedHistogramsTotal.Add(float64(histogramCount))
+ }
+
+ err = s.qm.sendWriteRequestWithBackoff(ctx, attemptStore, onRetry)
+ if errors.Is(err, context.Canceled) {
+ // When there is resharding, we cancel the context for this queue, which means the data is not sent.
+ // So we exit early to not update the metrics.
+ return err
+ }
+
+ s.qm.metrics.sentBytesTotal.Add(float64(reqSize))
+ s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000))
+
+ return err
+}
+
+func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, pendingData []writev2.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int, int) {
+ var nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata int
+ for nPending, d := range batch {
+ pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
+ // todo: should we also safeguard against empty metadata here?
+ if d.metadata != nil {
+ pendingData[nPending].Metadata.Type = writev2.FromMetadataType(d.metadata.Type)
+ pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Help)
+ pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Unit)
+ nPendingMetadata++
+ }
+
+ if sendExemplars {
+ pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
+ }
+ if sendNativeHistograms {
+ pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
+ }
+
+ // Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
+ // retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
+ // stop reading from the queue. This makes it safe to reference pendingSamples by index.
+ pendingData[nPending].LabelsRefs = symbolTable.SymbolizeLabels(d.seriesLabels, pendingData[nPending].LabelsRefs)
+ switch d.sType {
+ case tSample:
+ pendingData[nPending].Samples = append(pendingData[nPending].Samples, writev2.Sample{
+ Value: d.value,
+ Timestamp: d.timestamp,
+ })
+ nPendingSamples++
+ case tExemplar:
+ pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, writev2.Exemplar{
+ LabelsRefs: symbolTable.SymbolizeLabels(d.exemplarLabels, nil), // TODO: optimize, reuse slice
+ Value: d.value,
+ Timestamp: d.timestamp,
+ })
+ nPendingExemplars++
+ case tHistogram:
+ pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, writev2.FromIntHistogram(d.timestamp, d.histogram))
+ nPendingHistograms++
+ case tFloatHistogram:
+ pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, writev2.FromFloatHistogram(d.timestamp, d.floatHistogram))
+ nPendingHistograms++
+ case tMetadata:
+ // TODO: log or return an error?
+ // we shouldn't receive metadata type data here, it should already be inserted into the timeSeries
+ }
+ }
+ return nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata
+}
+
func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt func(int) error, onRetry func()) error {
backoff := t.cfg.MinBackoff
sleepDuration := model.Duration(0)
@@ -1795,7 +2072,21 @@ func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeri
return highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms
}
-func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf []byte, filter func(prompb.TimeSeries) bool) ([]byte, int64, int64, error) {
+func compressPayload(tmpbuf *[]byte, inp []byte, enc Compression) (compressed []byte, _ error) {
+ switch enc {
+ case SnappyBlockCompression:
+ compressed = snappy.Encode(*tmpbuf, inp)
+ if n := snappy.MaxEncodedLen(len(inp)); n > len(*tmpbuf) {
+ // grow the buffer for the next time
+ *tmpbuf = make([]byte, n)
+ }
+ return compressed, nil
+ default:
+ return compressed, fmt.Errorf("Unknown compression scheme [%v]", enc)
+ }
+}
+
+func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) {
highest, lowest, timeSeries,
droppedSamples, droppedExemplars, droppedHistograms := buildTimeSeries(timeSeries, filter)
@@ -1821,8 +2112,105 @@ func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metada
// snappy uses len() to see if it needs to allocate a new slice. Make the
// buffer as long as possible.
if buf != nil {
- buf = buf[0:cap(buf)]
+ *buf = (*buf)[0:cap(*buf)]
+ } else {
+ buf = &[]byte{}
+ }
+
+ compressed, err = compressPayload(buf, pBuf.Bytes(), enc)
+ if err != nil {
+ return nil, highest, lowest, err
+ }
+ return compressed, highest, lowest, nil
+}
+
+func buildV2WriteRequest(logger log.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) {
+ highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildV2TimeSeries(samples, filter)
+
+ if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 {
+ level.Debug(logger).Log("msg", "dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms)
+ }
+
+ req := &writev2.Request{
+ Symbols: labels,
+ Timeseries: timeSeries,
+ }
+
+ if pBuf == nil {
+ pBuf = &[]byte{} // For convenience in tests. Not efficient.
+ }
+
+ data, err := req.OptimizedMarshal(*pBuf)
+ if err != nil {
+ return nil, highest, lowest, err
+ }
+ *pBuf = data
+
+ // snappy uses len() to see if it needs to allocate a new slice. Make the
+ // buffer as long as possible.
+ if buf != nil {
+ *buf = (*buf)[0:cap(*buf)]
+ } else {
+ buf = &[]byte{}
+ }
+
+ compressed, err = compressPayload(buf, data, enc)
+ if err != nil {
+ return nil, highest, lowest, err
}
- compressed := snappy.Encode(buf, pBuf.Bytes())
return compressed, highest, lowest, nil
}
+
+func buildV2TimeSeries(timeSeries []writev2.TimeSeries, filter func(writev2.TimeSeries) bool) (int64, int64, []writev2.TimeSeries, int, int, int) {
+ var highest int64
+ var lowest int64
+ var droppedSamples, droppedExemplars, droppedHistograms int
+
+ keepIdx := 0
+ lowest = math.MaxInt64
+ for i, ts := range timeSeries {
+ if filter != nil && filter(ts) {
+ if len(ts.Samples) > 0 {
+ droppedSamples++
+ }
+ if len(ts.Exemplars) > 0 {
+ droppedExemplars++
+ }
+ if len(ts.Histograms) > 0 {
+ droppedHistograms++
+ }
+ continue
+ }
+
+ // At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
+ if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
+ highest = ts.Samples[0].Timestamp
+ }
+ if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
+ highest = ts.Exemplars[0].Timestamp
+ }
+ if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
+ highest = ts.Histograms[0].Timestamp
+ }
+
+ // Get the lowest timestamp.
+ if len(ts.Samples) > 0 && ts.Samples[0].Timestamp < lowest {
+ lowest = ts.Samples[0].Timestamp
+ }
+ if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp < lowest {
+ lowest = ts.Exemplars[0].Timestamp
+ }
+ if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp < lowest {
+ lowest = ts.Histograms[0].Timestamp
+ }
+ if i != keepIdx {
+ // We have to swap the kept timeseries with the one which should be dropped.
+ // Copying any elements within timeSeries could cause data corruptions when reusing the slice in a next batch (shards.populateTimeSeries).
+ timeSeries[keepIdx], timeSeries[i] = timeSeries[i], timeSeries[keepIdx]
+ }
+ keepIdx++
+ }
+
+ timeSeries = timeSeries[:keepIdx]
+ return highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms
+}
diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go
index 4d299994b..9ab563eda 100644
--- a/storage/remote/queue_manager_test.go
+++ b/storage/remote/queue_manager_test.go
@@ -15,6 +15,7 @@ package remote
import (
"context"
+ "errors"
"fmt"
"math"
"math/rand"
@@ -43,9 +44,11 @@ import (
"github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/prompb"
+ writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
+ "github.com/prometheus/prometheus/util/runutil"
"github.com/prometheus/prometheus/util/testutil"
)
@@ -62,30 +65,150 @@ func newHighestTimestampMetric() *maxTimestamp {
}
}
-func TestSampleDelivery(t *testing.T) {
- testcases := []struct {
- name string
- samples bool
- exemplars bool
- histograms bool
- floatHistograms bool
+func TestBasicContentNegotiation(t *testing.T) {
+ queueConfig := config.DefaultQueueConfig
+ queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond)
+ queueConfig.MaxShards = 1
+
+ // We need to set URL's so that metric creation doesn't panic.
+ writeConfig := baseRemoteWriteConfig("http://test-storage.com")
+ writeConfig.QueueConfig = queueConfig
+
+ conf := &config.Config{
+ GlobalConfig: config.DefaultGlobalConfig,
+ RemoteWriteConfigs: []*config.RemoteWriteConfig{
+ writeConfig,
+ },
+ }
+
+ for _, tc := range []struct {
+ name string
+ senderProtoMsg config.RemoteWriteProtoMsg
+ receiverProtoMsg config.RemoteWriteProtoMsg
+ injectErrs []error
+ expectFail bool
}{
- {samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"},
- {samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"},
- {samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"},
- {samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"},
- {samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"},
+ {
+ name: "v2 happy path",
+ senderProtoMsg: config.RemoteWriteProtoMsgV2, receiverProtoMsg: config.RemoteWriteProtoMsgV2,
+ injectErrs: []error{nil},
+ },
+ {
+ name: "v1 happy path",
+ senderProtoMsg: config.RemoteWriteProtoMsgV1, receiverProtoMsg: config.RemoteWriteProtoMsgV1,
+ injectErrs: []error{nil},
+ },
+ // Test a case where the v1 request has a temporary delay but goes through on retry.
+ {
+ name: "v1 happy path with one 5xx retry",
+ senderProtoMsg: config.RemoteWriteProtoMsgV1, receiverProtoMsg: config.RemoteWriteProtoMsgV1,
+ injectErrs: []error{RecoverableError{errors.New("pretend 500"), 1}, nil},
+ },
+ // Repeat the above test but with v2. The request has a temporary delay but goes through on retry.
+ {
+ name: "v2 happy path with one 5xx retry",
+ senderProtoMsg: config.RemoteWriteProtoMsgV2, receiverProtoMsg: config.RemoteWriteProtoMsgV2,
+ injectErrs: []error{RecoverableError{errors.New("pretend 500"), 1}, nil},
+ },
+ // A few error cases of v2 talking to v1.
+ {
+ name: "v2 talks to v1 that gives 400 or 415",
+ senderProtoMsg: config.RemoteWriteProtoMsgV2, receiverProtoMsg: config.RemoteWriteProtoMsgV1,
+ injectErrs: []error{errors.New("pretend unrecoverable err")},
+ expectFail: true,
+ },
+ {
+ name: "v2 talks to v1 that tries to unmarshal v2 payload with v1 proto",
+ senderProtoMsg: config.RemoteWriteProtoMsgV2, receiverProtoMsg: config.RemoteWriteProtoMsgV1,
+ injectErrs: []error{nil},
+ expectFail: true, // invalid request, no timeseries
+ },
+ // Opposite, v1 talking to v2 only server.
+ {
+ name: "v1 talks to v2 that gives 400 or 415",
+ senderProtoMsg: config.RemoteWriteProtoMsgV1, receiverProtoMsg: config.RemoteWriteProtoMsgV2,
+ injectErrs: []error{errors.New("pretend unrecoverable err")},
+ expectFail: true,
+ },
+ {
+ name: "v1 talks to (broken) v2 that tries to unmarshal v1 payload with v2 proto",
+ senderProtoMsg: config.RemoteWriteProtoMsgV1, receiverProtoMsg: config.RemoteWriteProtoMsgV2,
+ injectErrs: []error{nil},
+ expectFail: true, // invalid request, no timeseries
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ dir := t.TempDir()
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, true)
+ defer s.Close()
+
+ var (
+ series []record.RefSeries
+ metadata []record.RefMetadata
+ samples []record.RefSample
+ )
+
+ // Generates same series in both cases.
+ samples, series = createTimeseries(1, 1)
+ metadata = createSeriesMetadata(series)
+
+ // Apply new config.
+ queueConfig.Capacity = len(samples)
+ queueConfig.MaxSamplesPerSend = len(samples)
+ // For now we only ever have a single rw config in this test.
+ conf.RemoteWriteConfigs[0].ProtobufMessage = tc.senderProtoMsg
+ require.NoError(t, s.ApplyConfig(conf))
+ hash, err := toHash(writeConfig)
+ require.NoError(t, err)
+ qm := s.rws.queues[hash]
+
+ c := NewTestWriteClient(tc.receiverProtoMsg)
+ c.injectErrors(tc.injectErrs)
+ qm.SetClient(c)
+
+ qm.StoreSeries(series, 0)
+ qm.StoreMetadata(metadata)
+
+ // Do we expect some data back?
+ if !tc.expectFail {
+ c.expectSamples(samples, series)
+ } else {
+ c.expectSamples(nil, nil)
+ }
+
+ // Schedule send.
+ qm.Append(samples)
+
+ if !tc.expectFail {
+ // No error expected, so wait for data.
+ c.waitForExpectedData(t, 5*time.Second)
+ require.Equal(t, 1, c.writesReceived)
+ require.Equal(t, 0.0, client_testutil.ToFloat64(qm.metrics.failedSamplesTotal))
+ } else {
+ // Wait for failure to be recorded in metrics.
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ require.NoError(t, runutil.Retry(500*time.Millisecond, ctx.Done(), func() error {
+ if client_testutil.ToFloat64(qm.metrics.failedSamplesTotal) != 1.0 {
+ return errors.New("expected one sample failed in qm metrics")
+ }
+ return nil
+ }))
+ require.Equal(t, 0, c.writesReceived)
+ }
+
+ // samplesTotal means attempts.
+ require.Equal(t, float64(len(tc.injectErrs)), client_testutil.ToFloat64(qm.metrics.samplesTotal))
+ require.Equal(t, float64(len(tc.injectErrs)-1), client_testutil.ToFloat64(qm.metrics.retriedSamplesTotal))
+ })
}
+}
- // Let's create an even number of send batches so we don't run into the
+func TestSampleDelivery(t *testing.T) {
+ // Let's create an even number of send batches, so we don't run into the
// batch timeout case.
n := 3
- dir := t.TempDir()
-
- s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
- defer s.Close()
-
queueConfig := config.DefaultQueueConfig
queueConfig.BatchSendDeadline = model.Duration(100 * time.Millisecond)
queueConfig.MaxShards = 1
@@ -102,11 +225,36 @@ func TestSampleDelivery(t *testing.T) {
writeConfig,
},
}
+ for _, tc := range []struct {
+ protoMsg config.RemoteWriteProtoMsg
+
+ name string
+ samples bool
+ exemplars bool
+ histograms bool
+ floatHistograms bool
+ }{
+ {protoMsg: config.RemoteWriteProtoMsgV1, samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"},
+ {protoMsg: config.RemoteWriteProtoMsgV1, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"},
+ {protoMsg: config.RemoteWriteProtoMsgV1, samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"},
+ {protoMsg: config.RemoteWriteProtoMsgV1, samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"},
+ {protoMsg: config.RemoteWriteProtoMsgV1, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"},
+
+ // TODO(alexg): update some portion of this test to check for the 2.0 metadata
+ {protoMsg: config.RemoteWriteProtoMsgV2, samples: true, exemplars: false, histograms: false, floatHistograms: false, name: "samples only"},
+ {protoMsg: config.RemoteWriteProtoMsgV2, samples: true, exemplars: true, histograms: true, floatHistograms: true, name: "samples, exemplars, and histograms"},
+ {protoMsg: config.RemoteWriteProtoMsgV2, samples: false, exemplars: true, histograms: false, floatHistograms: false, name: "exemplars only"},
+ {protoMsg: config.RemoteWriteProtoMsgV2, samples: false, exemplars: false, histograms: true, floatHistograms: false, name: "histograms only"},
+ {protoMsg: config.RemoteWriteProtoMsgV2, samples: false, exemplars: false, histograms: false, floatHistograms: true, name: "float histograms only"},
+ } {
+ t.Run(fmt.Sprintf("%s-%s", tc.protoMsg, tc.name), func(t *testing.T) {
+ dir := t.TempDir()
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, true)
+ defer s.Close()
- for _, tc := range testcases {
- t.Run(tc.name, func(t *testing.T) {
var (
series []record.RefSeries
+ metadata []record.RefMetadata
samples []record.RefSample
exemplars []record.RefExemplar
histograms []record.RefHistogramSample
@@ -126,19 +274,23 @@ func TestSampleDelivery(t *testing.T) {
if tc.floatHistograms {
_, floatHistograms, series = createHistograms(n, n, true)
}
+ metadata = createSeriesMetadata(series)
// Apply new config.
queueConfig.Capacity = len(samples)
queueConfig.MaxSamplesPerSend = len(samples) / 2
+ // For now we only ever have a single rw config in this test.
+ conf.RemoteWriteConfigs[0].ProtobufMessage = tc.protoMsg
require.NoError(t, s.ApplyConfig(conf))
hash, err := toHash(writeConfig)
require.NoError(t, err)
qm := s.rws.queues[hash]
- c := NewTestWriteClient()
+ c := NewTestWriteClient(tc.protoMsg)
qm.SetClient(c)
qm.StoreSeries(series, 0)
+ qm.StoreMetadata(metadata)
// Send first half of data.
c.expectSamples(samples[:len(samples)/2], series)
@@ -149,7 +301,7 @@ func TestSampleDelivery(t *testing.T) {
qm.AppendExemplars(exemplars[:len(exemplars)/2])
qm.AppendHistograms(histograms[:len(histograms)/2])
qm.AppendFloatHistograms(floatHistograms[:len(floatHistograms)/2])
- c.waitForExpectedData(t)
+ c.waitForExpectedData(t, 30*time.Second)
// Send second half of data.
c.expectSamples(samples[len(samples)/2:], series)
@@ -160,28 +312,35 @@ func TestSampleDelivery(t *testing.T) {
qm.AppendExemplars(exemplars[len(exemplars)/2:])
qm.AppendHistograms(histograms[len(histograms)/2:])
qm.AppendFloatHistograms(floatHistograms[len(floatHistograms)/2:])
- c.waitForExpectedData(t)
+ c.waitForExpectedData(t, 30*time.Second)
})
}
}
-func newTestClientAndQueueManager(t testing.TB, flushDeadline time.Duration) (*TestWriteClient, *QueueManager) {
- c := NewTestWriteClient()
+func newTestClientAndQueueManager(t testing.TB, flushDeadline time.Duration, protoMsg config.RemoteWriteProtoMsg) (*TestWriteClient, *QueueManager) {
+ c := NewTestWriteClient(protoMsg)
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
- return c, newTestQueueManager(t, cfg, mcfg, flushDeadline, c)
+ return c, newTestQueueManager(t, cfg, mcfg, flushDeadline, c, protoMsg)
}
-func newTestQueueManager(t testing.TB, cfg config.QueueConfig, mcfg config.MetadataConfig, deadline time.Duration, c WriteClient) *QueueManager {
+func newTestQueueManager(t testing.TB, cfg config.QueueConfig, mcfg config.MetadataConfig, deadline time.Duration, c WriteClient, protoMsg config.RemoteWriteProtoMsg) *QueueManager {
dir := t.TempDir()
metrics := newQueueManagerMetrics(nil, "", "")
- m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false)
+ m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, deadline, newPool(), newHighestTimestampMetric(), nil, false, false, protoMsg)
return m
}
+func testDefaultQueueConfig() config.QueueConfig {
+ cfg := config.DefaultQueueConfig
+ // For faster unit tests we don't wait default 5 seconds.
+ cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
+ return cfg
+}
+
func TestMetadataDelivery(t *testing.T) {
- c, m := newTestClientAndQueueManager(t, defaultFlushDeadline)
+ c, m := newTestClientAndQueueManager(t, defaultFlushDeadline, config.RemoteWriteProtoMsgV1)
m.Start()
defer m.Stop()
@@ -196,8 +355,9 @@ func TestMetadataDelivery(t *testing.T) {
})
}
- m.AppendMetadata(context.Background(), metadata)
+ m.AppendWatcherMetadata(context.Background(), metadata)
+ require.Equal(t, 0.0, client_testutil.ToFloat64(m.metrics.failedMetadataTotal))
require.Len(t, c.receivedMetadata, numMetadata)
// One more write than the rounded qoutient should be performed in order to get samples that didn't
// fit into MaxSamplesPerSend.
@@ -206,58 +366,106 @@ func TestMetadataDelivery(t *testing.T) {
require.Equal(t, c.receivedMetadata[metadata[len(metadata)-1].Metric][0].MetricFamilyName, metadata[len(metadata)-1].Metric)
}
-func TestSampleDeliveryTimeout(t *testing.T) {
- // Let's send one less sample than batch size, and wait the timeout duration
- n := 9
- samples, series := createTimeseries(n, n)
+func TestWALMetadataDelivery(t *testing.T) {
+ dir := t.TempDir()
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, true)
+ defer s.Close()
cfg := config.DefaultQueueConfig
- cfg.MaxShards = 1
cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
+ cfg.MaxShards = 1
- c := NewTestWriteClient()
- m := newTestQueueManager(t, cfg, config.DefaultMetadataConfig, defaultFlushDeadline, c)
- m.StoreSeries(series, 0)
- m.Start()
- defer m.Stop()
+ writeConfig := baseRemoteWriteConfig("http://test-storage.com")
+ writeConfig.QueueConfig = cfg
+ writeConfig.ProtobufMessage = config.RemoteWriteProtoMsgV2
- // Send the samples twice, waiting for the samples in the meantime.
- c.expectSamples(samples, series)
- m.Append(samples)
- c.waitForExpectedData(t)
+ conf := &config.Config{
+ GlobalConfig: config.DefaultGlobalConfig,
+ RemoteWriteConfigs: []*config.RemoteWriteConfig{
+ writeConfig,
+ },
+ }
- c.expectSamples(samples, series)
- m.Append(samples)
- c.waitForExpectedData(t)
+ num := 3
+ _, series := createTimeseries(0, num)
+ metadata := createSeriesMetadata(series)
+
+ require.NoError(t, s.ApplyConfig(conf))
+ hash, err := toHash(writeConfig)
+ require.NoError(t, err)
+ qm := s.rws.queues[hash]
+
+ c := NewTestWriteClient(config.RemoteWriteProtoMsgV1)
+ qm.SetClient(c)
+
+ qm.StoreSeries(series, 0)
+ qm.StoreMetadata(metadata)
+
+ require.Len(t, qm.seriesLabels, num)
+ require.Len(t, qm.seriesMetadata, num)
+
+ c.waitForExpectedData(t, 30*time.Second)
}
-func TestSampleDeliveryOrder(t *testing.T) {
- ts := 10
- n := config.DefaultQueueConfig.MaxSamplesPerSend * ts
- samples := make([]record.RefSample, 0, n)
- series := make([]record.RefSeries, 0, n)
- for i := 0; i < n; i++ {
- name := fmt.Sprintf("test_metric_%d", i%ts)
- samples = append(samples, record.RefSample{
- Ref: chunks.HeadSeriesRef(i),
- T: int64(i),
- V: float64(i),
- })
- series = append(series, record.RefSeries{
- Ref: chunks.HeadSeriesRef(i),
- Labels: labels.FromStrings("__name__", name),
+func TestSampleDeliveryTimeout(t *testing.T) {
+ for _, protoMsg := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} {
+ t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
+ // Let's send one less sample than batch size, and wait the timeout duration
+ n := 9
+ samples, series := createTimeseries(n, n)
+ cfg := testDefaultQueueConfig()
+ mcfg := config.DefaultMetadataConfig
+ cfg.MaxShards = 1
+
+ c := NewTestWriteClient(protoMsg)
+ m := newTestQueueManager(t, cfg, mcfg, defaultFlushDeadline, c, protoMsg)
+ m.StoreSeries(series, 0)
+ m.Start()
+ defer m.Stop()
+
+ // Send the samples twice, waiting for the samples in the meantime.
+ c.expectSamples(samples, series)
+ m.Append(samples)
+ c.waitForExpectedData(t, 30*time.Second)
+
+ c.expectSamples(samples, series)
+ m.Append(samples)
+ c.waitForExpectedData(t, 30*time.Second)
})
}
+}
- c, m := newTestClientAndQueueManager(t, defaultFlushDeadline)
- c.expectSamples(samples, series)
- m.StoreSeries(series, 0)
+func TestSampleDeliveryOrder(t *testing.T) {
+ for _, protoMsg := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} {
+ t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
+ ts := 10
+ n := config.DefaultQueueConfig.MaxSamplesPerSend * ts
+ samples := make([]record.RefSample, 0, n)
+ series := make([]record.RefSeries, 0, n)
+ for i := 0; i < n; i++ {
+ name := fmt.Sprintf("test_metric_%d", i%ts)
+ samples = append(samples, record.RefSample{
+ Ref: chunks.HeadSeriesRef(i),
+ T: int64(i),
+ V: float64(i),
+ })
+ series = append(series, record.RefSeries{
+ Ref: chunks.HeadSeriesRef(i),
+ Labels: labels.FromStrings("__name__", name),
+ })
+ }
- m.Start()
- defer m.Stop()
- // These should be received by the client.
- m.Append(samples)
- c.waitForExpectedData(t)
+ c, m := newTestClientAndQueueManager(t, defaultFlushDeadline, protoMsg)
+ c.expectSamples(samples, series)
+ m.StoreSeries(series, 0)
+
+ m.Start()
+ defer m.Stop()
+ // These should be received by the client.
+ m.Append(samples)
+ c.waitForExpectedData(t, 30*time.Second)
+ })
+ }
}
func TestShutdown(t *testing.T) {
@@ -267,7 +475,7 @@ func TestShutdown(t *testing.T) {
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
- m := newTestQueueManager(t, cfg, mcfg, deadline, c)
+ m := newTestQueueManager(t, cfg, mcfg, deadline, c, config.RemoteWriteProtoMsgV1)
n := 2 * config.DefaultQueueConfig.MaxSamplesPerSend
samples, series := createTimeseries(n, n)
m.StoreSeries(series, 0)
@@ -302,8 +510,7 @@ func TestSeriesReset(t *testing.T) {
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
- m := newTestQueueManager(t, cfg, mcfg, deadline, c)
-
+ m := newTestQueueManager(t, cfg, mcfg, deadline, c, config.RemoteWriteProtoMsgV1)
for i := 0; i < numSegments; i++ {
series := []record.RefSeries{}
for j := 0; j < numSeries; j++ {
@@ -317,167 +524,186 @@ func TestSeriesReset(t *testing.T) {
}
func TestReshard(t *testing.T) {
- size := 10 // Make bigger to find more races.
- nSeries := 6
- nSamples := config.DefaultQueueConfig.Capacity * size
- samples, series := createTimeseries(nSamples, nSeries)
+ for _, protoMsg := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} {
+ t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
+ size := 10 // Make bigger to find more races.
+ nSeries := 6
+ nSamples := config.DefaultQueueConfig.Capacity * size
+ samples, series := createTimeseries(nSamples, nSeries)
- cfg := config.DefaultQueueConfig
- cfg.MaxShards = 1
+ cfg := config.DefaultQueueConfig
+ cfg.MaxShards = 1
- c := NewTestWriteClient()
- m := newTestQueueManager(t, cfg, config.DefaultMetadataConfig, defaultFlushDeadline, c)
- c.expectSamples(samples, series)
- m.StoreSeries(series, 0)
+ c := NewTestWriteClient(protoMsg)
+ m := newTestQueueManager(t, cfg, config.DefaultMetadataConfig, defaultFlushDeadline, c, protoMsg)
+ c.expectSamples(samples, series)
+ m.StoreSeries(series, 0)
- m.Start()
- defer m.Stop()
+ m.Start()
+ defer m.Stop()
- go func() {
- for i := 0; i < len(samples); i += config.DefaultQueueConfig.Capacity {
- sent := m.Append(samples[i : i+config.DefaultQueueConfig.Capacity])
- require.True(t, sent, "samples not sent")
- time.Sleep(100 * time.Millisecond)
- }
- }()
+ go func() {
+ for i := 0; i < len(samples); i += config.DefaultQueueConfig.Capacity {
+ sent := m.Append(samples[i : i+config.DefaultQueueConfig.Capacity])
+ require.True(t, sent, "samples not sent")
+ time.Sleep(100 * time.Millisecond)
+ }
+ }()
- for i := 1; i < len(samples)/config.DefaultQueueConfig.Capacity; i++ {
- m.shards.stop()
- m.shards.start(i)
- time.Sleep(100 * time.Millisecond)
- }
+ for i := 1; i < len(samples)/config.DefaultQueueConfig.Capacity; i++ {
+ m.shards.stop()
+ m.shards.start(i)
+ time.Sleep(100 * time.Millisecond)
+ }
- c.waitForExpectedData(t)
+ c.waitForExpectedData(t, 30*time.Second)
+ })
+ }
}
func TestReshardRaceWithStop(t *testing.T) {
- c := NewTestWriteClient()
- var m *QueueManager
- h := sync.Mutex{}
-
- h.Lock()
-
- cfg := config.DefaultQueueConfig
- mcfg := config.DefaultMetadataConfig
- exitCh := make(chan struct{})
- go func() {
- for {
- m = newTestQueueManager(t, cfg, mcfg, defaultFlushDeadline, c)
- m.Start()
- h.Unlock()
+ for _, protoMsg := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} {
+ t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
+ c := NewTestWriteClient(protoMsg)
+ var m *QueueManager
+ h := sync.Mutex{}
h.Lock()
- m.Stop()
- select {
- case exitCh <- struct{}{}:
- return
- default:
- }
- }
- }()
+ cfg := testDefaultQueueConfig()
+ mcfg := config.DefaultMetadataConfig
+ exitCh := make(chan struct{})
+ go func() {
+ for {
+ m = newTestQueueManager(t, cfg, mcfg, defaultFlushDeadline, c, protoMsg)
+
+ m.Start()
+ h.Unlock()
+ h.Lock()
+ m.Stop()
+
+ select {
+ case exitCh <- struct{}{}:
+ return
+ default:
+ }
+ }
+ }()
- for i := 1; i < 100; i++ {
- h.Lock()
- m.reshardChan <- i
- h.Unlock()
+ for i := 1; i < 100; i++ {
+ h.Lock()
+ m.reshardChan <- i
+ h.Unlock()
+ }
+ <-exitCh
+ })
}
- <-exitCh
}
func TestReshardPartialBatch(t *testing.T) {
- samples, series := createTimeseries(1, 10)
+ for _, protoMsg := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} {
+ t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
+ samples, series := createTimeseries(1, 10)
- c := NewTestBlockedWriteClient()
+ c := NewTestBlockedWriteClient()
- cfg := config.DefaultQueueConfig
- mcfg := config.DefaultMetadataConfig
- cfg.MaxShards = 1
- batchSendDeadline := time.Millisecond
- flushDeadline := 10 * time.Millisecond
- cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
+ cfg := testDefaultQueueConfig()
+ mcfg := config.DefaultMetadataConfig
+ cfg.MaxShards = 1
+ batchSendDeadline := time.Millisecond
+ flushDeadline := 10 * time.Millisecond
+ cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
- m := newTestQueueManager(t, cfg, mcfg, flushDeadline, c)
- m.StoreSeries(series, 0)
+ m := newTestQueueManager(t, cfg, mcfg, flushDeadline, c, protoMsg)
+ m.StoreSeries(series, 0)
- m.Start()
+ m.Start()
- for i := 0; i < 100; i++ {
- done := make(chan struct{})
- go func() {
- m.Append(samples)
- time.Sleep(batchSendDeadline)
- m.shards.stop()
- m.shards.start(1)
- done <- struct{}{}
- }()
- select {
- case <-done:
- case <-time.After(2 * time.Second):
- t.Error("Deadlock between sending and stopping detected")
- pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
- t.FailNow()
- }
+ for i := 0; i < 100; i++ {
+ done := make(chan struct{})
+ go func() {
+ m.Append(samples)
+ time.Sleep(batchSendDeadline)
+ m.shards.stop()
+ m.shards.start(1)
+ done <- struct{}{}
+ }()
+ select {
+ case <-done:
+ case <-time.After(2 * time.Second):
+ t.Error("Deadlock between sending and stopping detected")
+ pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
+ t.FailNow()
+ }
+ }
+ // We can only call stop if there was not a deadlock.
+ m.Stop()
+ })
}
- // We can only call stop if there was not a deadlock.
- m.Stop()
}
// TestQueueFilledDeadlock makes sure the code does not deadlock in the case
// where a large scrape (> capacity + max samples per send) is appended at the
// same time as a batch times out according to the batch send deadline.
func TestQueueFilledDeadlock(t *testing.T) {
- samples, series := createTimeseries(50, 1)
-
- c := NewNopWriteClient()
-
- cfg := config.DefaultQueueConfig
- mcfg := config.DefaultMetadataConfig
- cfg.MaxShards = 1
- cfg.MaxSamplesPerSend = 10
- cfg.Capacity = 20
- flushDeadline := time.Second
- batchSendDeadline := time.Millisecond
- cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
-
- m := newTestQueueManager(t, cfg, mcfg, flushDeadline, c)
- m.StoreSeries(series, 0)
- m.Start()
- defer m.Stop()
-
- for i := 0; i < 100; i++ {
- done := make(chan struct{})
- go func() {
- time.Sleep(batchSendDeadline)
- m.Append(samples)
- done <- struct{}{}
- }()
- select {
- case <-done:
- case <-time.After(2 * time.Second):
- t.Error("Deadlock between sending and appending detected")
- pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
- t.FailNow()
- }
+ for _, protoMsg := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} {
+ t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
+ samples, series := createTimeseries(50, 1)
+
+ c := NewNopWriteClient()
+
+ cfg := testDefaultQueueConfig()
+ mcfg := config.DefaultMetadataConfig
+ cfg.MaxShards = 1
+ cfg.MaxSamplesPerSend = 10
+ cfg.Capacity = 20
+ flushDeadline := time.Second
+ batchSendDeadline := time.Millisecond
+ cfg.BatchSendDeadline = model.Duration(batchSendDeadline)
+
+ m := newTestQueueManager(t, cfg, mcfg, flushDeadline, c, protoMsg)
+ m.StoreSeries(series, 0)
+ m.Start()
+ defer m.Stop()
+
+ for i := 0; i < 100; i++ {
+ done := make(chan struct{})
+ go func() {
+ time.Sleep(batchSendDeadline)
+ m.Append(samples)
+ done <- struct{}{}
+ }()
+ select {
+ case <-done:
+ case <-time.After(2 * time.Second):
+ t.Error("Deadlock between sending and appending detected")
+ pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
+ t.FailNow()
+ }
+ }
+ })
}
}
func TestReleaseNoninternedString(t *testing.T) {
- _, m := newTestClientAndQueueManager(t, defaultFlushDeadline)
- m.Start()
- defer m.Stop()
+ for _, protoMsg := range []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2} {
+ t.Run(fmt.Sprint(protoMsg), func(t *testing.T) {
+ _, m := newTestClientAndQueueManager(t, defaultFlushDeadline, protoMsg)
+ m.Start()
+ defer m.Stop()
+ for i := 1; i < 1000; i++ {
+ m.StoreSeries([]record.RefSeries{
+ {
+ Ref: chunks.HeadSeriesRef(i),
+ Labels: labels.FromStrings("asdf", strconv.Itoa(i)),
+ },
+ }, 0)
+ m.SeriesReset(1)
+ }
- for i := 1; i < 1000; i++ {
- m.StoreSeries([]record.RefSeries{
- {
- Ref: chunks.HeadSeriesRef(i),
- Labels: labels.FromStrings("asdf", strconv.Itoa(i)),
- },
- }, 0)
- m.SeriesReset(1)
+ metric := client_testutil.ToFloat64(noReferenceReleases)
+ require.Equal(t, 0.0, metric, "expected there to be no calls to release for strings that were not already interned: %d", int(metric))
+ })
}
-
- metric := client_testutil.ToFloat64(noReferenceReleases)
- require.Equal(t, 0.0, metric, "expected there to be no calls to release for strings that were not already interned: %d", int(metric))
}
func TestShouldReshard(t *testing.T) {
@@ -505,7 +731,7 @@ func TestShouldReshard(t *testing.T) {
}
for _, c := range cases {
- _, m := newTestClientAndQueueManager(t, defaultFlushDeadline)
+ _, m := newTestClientAndQueueManager(t, defaultFlushDeadline, config.RemoteWriteProtoMsgV1)
m.numShards = c.startingShards
m.dataIn.incr(c.samplesIn)
m.dataOut.incr(c.samplesOut)
@@ -551,7 +777,7 @@ func TestDisableReshardOnRetry(t *testing.T) {
}
)
- m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, 0, newPool(), newHighestTimestampMetric(), nil, false, false)
+ m := NewQueueManager(metrics, nil, nil, nil, "", newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, client, 0, newPool(), newHighestTimestampMetric(), nil, false, false, config.RemoteWriteProtoMsgV1)
m.StoreSeries(fakeSeries, 0)
// Attempt to samples while the manager is running. We immediately stop the
@@ -601,6 +827,9 @@ func createTimeseries(numSamples, numSeries int, extraLabels ...labels.Label) ([
// Create Labels that is name of series plus any extra labels supplied.
lb.Reset()
lb.Add(labels.MetricName, name)
+ rand.Shuffle(len(extraLabels), func(i, j int) {
+ extraLabels[i], extraLabels[j] = extraLabels[j], extraLabels[i]
+ })
for _, l := range extraLabels {
lb.Add(l.Name, l.Value)
}
@@ -705,10 +934,26 @@ func createHistograms(numSamples, numSeries int, floatHistogram bool) ([]record.
return histograms, nil, series
}
+func createSeriesMetadata(series []record.RefSeries) []record.RefMetadata {
+ metas := make([]record.RefMetadata, len(series))
+
+ for _, s := range series {
+ metas = append(metas, record.RefMetadata{
+ Ref: s.Ref,
+ Type: uint8(record.Counter),
+ Unit: "unit text",
+ Help: "help text",
+ })
+ }
+ return metas
+}
+
func getSeriesIDFromRef(r record.RefSeries) string {
return r.Labels.String()
}
+// TestWriteClient represents write client which does not call remote storage,
+// but instead re-implements fake WriteHandler for test purposes.
type TestWriteClient struct {
receivedSamples map[string][]prompb.Sample
expectedSamples map[string][]prompb.Sample
@@ -720,30 +965,37 @@ type TestWriteClient struct {
expectedFloatHistograms map[string][]prompb.Histogram
receivedMetadata map[string][]prompb.MetricMetadata
writesReceived int
- withWaitGroup bool
- wg sync.WaitGroup
mtx sync.Mutex
buf []byte
+ protoMsg config.RemoteWriteProtoMsg
+ injectedErrs []error
+ currErr int
+ retry bool
- storeWait time.Duration
+ storeWait time.Duration
+ // TODO(npazosmendez): maybe replaceable with injectedErrs?
returnError error
}
-func NewTestWriteClient() *TestWriteClient {
+// NewTestWriteClient creates a new testing write client.
+func NewTestWriteClient(protoMsg config.RemoteWriteProtoMsg) *TestWriteClient {
return &TestWriteClient{
- withWaitGroup: true,
receivedSamples: map[string][]prompb.Sample{},
expectedSamples: map[string][]prompb.Sample{},
receivedMetadata: map[string][]prompb.MetricMetadata{},
+ protoMsg: protoMsg,
storeWait: 0,
returnError: nil,
}
}
+func (c *TestWriteClient) injectErrors(injectedErrs []error) {
+ c.injectedErrs = injectedErrs
+ c.currErr = -1
+ c.retry = false
+}
+
func (c *TestWriteClient) expectSamples(ss []record.RefSample, series []record.RefSeries) {
- if !c.withWaitGroup {
- return
- }
c.mtx.Lock()
defer c.mtx.Unlock()
@@ -757,16 +1009,9 @@ func (c *TestWriteClient) expectSamples(ss []record.RefSample, series []record.R
Value: s.V,
})
}
- if !c.withWaitGroup {
- return
- }
- c.wg.Add(len(ss))
}
func (c *TestWriteClient) expectExemplars(ss []record.RefExemplar, series []record.RefSeries) {
- if !c.withWaitGroup {
- return
- }
c.mtx.Lock()
defer c.mtx.Unlock()
@@ -776,19 +1021,15 @@ func (c *TestWriteClient) expectExemplars(ss []record.RefExemplar, series []reco
for _, s := range ss {
tsID := getSeriesIDFromRef(series[s.Ref])
e := prompb.Exemplar{
- Labels: LabelsToLabelsProto(s.Labels, nil),
+ Labels: prompb.FromLabels(s.Labels, nil),
Timestamp: s.T,
Value: s.V,
}
c.expectedExemplars[tsID] = append(c.expectedExemplars[tsID], e)
}
- c.wg.Add(len(ss))
}
func (c *TestWriteClient) expectHistograms(hh []record.RefHistogramSample, series []record.RefSeries) {
- if !c.withWaitGroup {
- return
- }
c.mtx.Lock()
defer c.mtx.Unlock()
@@ -797,15 +1038,11 @@ func (c *TestWriteClient) expectHistograms(hh []record.RefHistogramSample, serie
for _, h := range hh {
tsID := getSeriesIDFromRef(series[h.Ref])
- c.expectedHistograms[tsID] = append(c.expectedHistograms[tsID], HistogramToHistogramProto(h.T, h.H))
+ c.expectedHistograms[tsID] = append(c.expectedHistograms[tsID], prompb.FromIntHistogram(h.T, h.H))
}
- c.wg.Add(len(hh))
}
func (c *TestWriteClient) expectFloatHistograms(fhs []record.RefFloatHistogramSample, series []record.RefSeries) {
- if !c.withWaitGroup {
- return
- }
c.mtx.Lock()
defer c.mtx.Unlock()
@@ -814,18 +1051,42 @@ func (c *TestWriteClient) expectFloatHistograms(fhs []record.RefFloatHistogramSa
for _, fh := range fhs {
tsID := getSeriesIDFromRef(series[fh.Ref])
- c.expectedFloatHistograms[tsID] = append(c.expectedFloatHistograms[tsID], FloatHistogramToHistogramProto(fh.T, fh.FH))
+ c.expectedFloatHistograms[tsID] = append(c.expectedFloatHistograms[tsID], prompb.FromFloatHistogram(fh.T, fh.FH))
}
- c.wg.Add(len(fhs))
}
-func (c *TestWriteClient) waitForExpectedData(tb testing.TB) {
- if !c.withWaitGroup {
- return
+func deepLen[M any](ms ...map[string][]M) int {
+ l := 0
+ for _, m := range ms {
+ for _, v := range m {
+ l += len(v)
+ }
+ }
+ return l
+}
+
+func (c *TestWriteClient) waitForExpectedData(tb testing.TB, timeout time.Duration) {
+ tb.Helper()
+
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ if err := runutil.Retry(500*time.Millisecond, ctx.Done(), func() error {
+ c.mtx.Lock()
+ exp := deepLen(c.expectedSamples) + deepLen(c.expectedExemplars) + deepLen(c.expectedHistograms, c.expectedFloatHistograms)
+ got := deepLen(c.receivedSamples) + deepLen(c.receivedExemplars) + deepLen(c.receivedHistograms, c.receivedFloatHistograms)
+ c.mtx.Unlock()
+
+ if got < exp {
+ return fmt.Errorf("expected %v samples/exemplars/histograms/floathistograms, got %v", exp, got)
+ }
+ return nil
+ }); err != nil {
+ tb.Error(err)
}
- c.wg.Wait()
+
c.mtx.Lock()
defer c.mtx.Unlock()
+
for ts, expectedSamples := range c.expectedSamples {
require.Equal(tb, expectedSamples, c.receivedSamples[ts], ts)
}
@@ -865,50 +1126,68 @@ func (c *TestWriteClient) Store(_ context.Context, req []byte, _ int) error {
if c.buf != nil {
c.buf = c.buf[:cap(c.buf)]
}
+
reqBuf, err := snappy.Decode(c.buf, req)
c.buf = reqBuf
if err != nil {
return err
}
- var reqProto prompb.WriteRequest
- if err := proto.Unmarshal(reqBuf, &reqProto); err != nil {
+ // Check if we've been told to inject err for this call.
+ if len(c.injectedErrs) > 0 {
+ c.currErr++
+ if err = c.injectedErrs[c.currErr]; err != nil {
+ return err
+ }
+ }
+
+ var reqProto *prompb.WriteRequest
+ switch c.protoMsg {
+ case config.RemoteWriteProtoMsgV1:
+ reqProto = &prompb.WriteRequest{}
+ err = proto.Unmarshal(reqBuf, reqProto)
+ case config.RemoteWriteProtoMsgV2:
+ // NOTE(bwplotka): v1 msg can be unmarshaled to v2 sometimes, without
+ // errors.
+ var reqProtoV2 writev2.Request
+ err = proto.Unmarshal(reqBuf, &reqProtoV2)
+ if err == nil {
+ reqProto, err = v2RequestToWriteRequest(&reqProtoV2)
+ }
+ }
+ if err != nil {
return err
}
- builder := labels.NewScratchBuilder(0)
- count := 0
+
+ if len(reqProto.Timeseries) == 0 && len(reqProto.Metadata) == 0 {
+ return errors.New("invalid request, no timeseries")
+ }
+
+ b := labels.NewScratchBuilder(0)
for _, ts := range reqProto.Timeseries {
- labels := LabelProtosToLabels(&builder, ts.Labels)
+ labels := ts.ToLabels(&b, nil)
tsID := labels.String()
- for _, sample := range ts.Samples {
- count++
- c.receivedSamples[tsID] = append(c.receivedSamples[tsID], sample)
+ if len(ts.Samples) > 0 {
+ c.receivedSamples[tsID] = append(c.receivedSamples[tsID], ts.Samples...)
}
- for _, ex := range ts.Exemplars {
- count++
- c.receivedExemplars[tsID] = append(c.receivedExemplars[tsID], ex)
+ if len(ts.Exemplars) > 0 {
+ c.receivedExemplars[tsID] = append(c.receivedExemplars[tsID], ts.Exemplars...)
}
- for _, histogram := range ts.Histograms {
- count++
- if histogram.IsFloatHistogram() {
- c.receivedFloatHistograms[tsID] = append(c.receivedFloatHistograms[tsID], histogram)
+ for _, h := range ts.Histograms {
+ if h.IsFloatHistogram() {
+ c.receivedFloatHistograms[tsID] = append(c.receivedFloatHistograms[tsID], h)
} else {
- c.receivedHistograms[tsID] = append(c.receivedHistograms[tsID], histogram)
+ c.receivedHistograms[tsID] = append(c.receivedHistograms[tsID], h)
}
}
}
- if c.withWaitGroup {
- c.wg.Add(-count)
- }
-
for _, m := range reqProto.Metadata {
c.receivedMetadata[m.MetricFamilyName] = append(c.receivedMetadata[m.MetricFamilyName], m)
}
c.writesReceived++
-
return nil
}
@@ -920,6 +1199,51 @@ func (c *TestWriteClient) Endpoint() string {
return "http://test-remote.com/1234"
}
+func v2RequestToWriteRequest(v2Req *writev2.Request) (*prompb.WriteRequest, error) {
+ req := &prompb.WriteRequest{
+ Timeseries: make([]prompb.TimeSeries, len(v2Req.Timeseries)),
+ // TODO handle metadata?
+ }
+ b := labels.NewScratchBuilder(0)
+ for i, rts := range v2Req.Timeseries {
+ rts.ToLabels(&b, v2Req.Symbols).Range(func(l labels.Label) {
+ req.Timeseries[i].Labels = append(req.Timeseries[i].Labels, prompb.Label{
+ Name: l.Name,
+ Value: l.Value,
+ })
+ })
+
+ exemplars := make([]prompb.Exemplar, len(rts.Exemplars))
+ for j, e := range rts.Exemplars {
+ exemplars[j].Value = e.Value
+ exemplars[j].Timestamp = e.Timestamp
+ e.ToExemplar(&b, v2Req.Symbols).Labels.Range(func(l labels.Label) {
+ exemplars[j].Labels = append(exemplars[j].Labels, prompb.Label{
+ Name: l.Name,
+ Value: l.Value,
+ })
+ })
+ }
+ req.Timeseries[i].Exemplars = exemplars
+
+ req.Timeseries[i].Samples = make([]prompb.Sample, len(rts.Samples))
+ for j, s := range rts.Samples {
+ req.Timeseries[i].Samples[j].Timestamp = s.Timestamp
+ req.Timeseries[i].Samples[j].Value = s.Value
+ }
+
+ req.Timeseries[i].Histograms = make([]prompb.Histogram, len(rts.Histograms))
+ for j, h := range rts.Histograms {
+ if h.IsFloatHistogram() {
+ req.Timeseries[i].Histograms[j] = prompb.FromFloatHistogram(h.Timestamp, h.ToFloatHistogram())
+ continue
+ }
+ req.Timeseries[i].Histograms[j] = prompb.FromIntHistogram(h.Timestamp, h.ToIntHistogram())
+ }
+ }
+ return req, nil
+}
+
// TestBlockingWriteClient is a queue_manager WriteClient which will block
// on any calls to Store(), until the request's Context is cancelled, at which
// point the `numCalls` property will contain a count of how many times Store()
@@ -953,10 +1277,12 @@ func (c *TestBlockingWriteClient) Endpoint() string {
// For benchmarking the send and not the receive side.
type NopWriteClient struct{}
-func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} }
-func (c *NopWriteClient) Store(context.Context, []byte, int) error { return nil }
-func (c *NopWriteClient) Name() string { return "nopwriteclient" }
-func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" }
+func NewNopWriteClient() *NopWriteClient { return &NopWriteClient{} }
+func (c *NopWriteClient) Store(context.Context, []byte, int) error {
+ return nil
+}
+func (c *NopWriteClient) Name() string { return "nopwriteclient" }
+func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" }
type MockWriteClient struct {
StoreFunc func(context.Context, []byte, int) error
@@ -998,13 +1324,14 @@ func BenchmarkSampleSend(b *testing.B) {
c := NewNopWriteClient()
- cfg := config.DefaultQueueConfig
+ cfg := testDefaultQueueConfig()
mcfg := config.DefaultMetadataConfig
cfg.BatchSendDeadline = model.Duration(100 * time.Millisecond)
cfg.MinShards = 20
cfg.MaxShards = 20
- m := newTestQueueManager(b, cfg, mcfg, defaultFlushDeadline, c)
+ // todo: test with new proto type(s)
+ m := newTestQueueManager(b, cfg, mcfg, defaultFlushDeadline, c, config.RemoteWriteProtoMsgV1)
m.StoreSeries(series, 0)
// These should be received by the client.
@@ -1058,12 +1385,12 @@ func BenchmarkStoreSeries(b *testing.B) {
for _, tc := range testCases {
b.Run(tc.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
- c := NewTestWriteClient()
+ c := NewTestWriteClient(config.RemoteWriteProtoMsgV1)
dir := b.TempDir()
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
metrics := newQueueManagerMetrics(nil, "", "")
- m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false)
+ m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false, config.RemoteWriteProtoMsgV1)
m.externalLabels = tc.externalLabels
m.relabelConfigs = tc.relabelConfigs
@@ -1095,14 +1422,15 @@ func BenchmarkStartup(b *testing.B) {
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
logger = log.With(logger, "caller", log.DefaultCaller)
- cfg := config.DefaultQueueConfig
+ cfg := testDefaultQueueConfig()
mcfg := config.DefaultMetadataConfig
for n := 0; n < b.N; n++ {
metrics := newQueueManagerMetrics(nil, "", "")
c := NewTestBlockedWriteClient()
+ // todo: test with new proto type(s)
m := NewQueueManager(metrics, nil, nil, logger, dir,
newEWMARate(ewmaWeight, shardUpdateDuration),
- cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false)
+ cfg, mcfg, labels.EmptyLabels(), nil, c, 1*time.Minute, newPool(), newHighestTimestampMetric(), nil, false, false, config.RemoteWriteProtoMsgV1)
m.watcher.SetStartTime(timestamp.Time(math.MaxInt64))
m.watcher.MaxSegment = segments[len(segments)-2]
err := m.watcher.Run()
@@ -1181,7 +1509,7 @@ func TestProcessExternalLabels(t *testing.T) {
func TestCalculateDesiredShards(t *testing.T) {
cfg := config.DefaultQueueConfig
- _, m := newTestClientAndQueueManager(t, defaultFlushDeadline)
+ _, m := newTestClientAndQueueManager(t, defaultFlushDeadline, config.RemoteWriteProtoMsgV1)
samplesIn := m.dataIn
// Need to start the queue manager so the proper metrics are initialized.
@@ -1251,7 +1579,7 @@ func TestCalculateDesiredShards(t *testing.T) {
}
func TestCalculateDesiredShardsDetail(t *testing.T) {
- _, m := newTestClientAndQueueManager(t, defaultFlushDeadline)
+ _, m := newTestClientAndQueueManager(t, defaultFlushDeadline, config.RemoteWriteProtoMsgV1)
samplesIn := m.dataIn
for _, tc := range []struct {
@@ -1464,27 +1792,179 @@ func TestQueue_FlushAndShutdownDoesNotDeadlock(t *testing.T) {
}
}
+func createDummyTimeSeries(instances int) []timeSeries {
+ metrics := []labels.Labels{
+ labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"),
+ labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
+ labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5"),
+ labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.75"),
+ labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1"),
+ labels.FromStrings("__name__", "go_gc_duration_seconds_sum"),
+ labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
+ labels.FromStrings("__name__", "go_memstats_alloc_bytes_total"),
+ labels.FromStrings("__name__", "go_memstats_frees_total"),
+ labels.FromStrings("__name__", "go_memstats_lookups_total"),
+ labels.FromStrings("__name__", "go_memstats_mallocs_total"),
+ labels.FromStrings("__name__", "go_goroutines"),
+ labels.FromStrings("__name__", "go_info", "version", "go1.19.3"),
+ labels.FromStrings("__name__", "go_memstats_alloc_bytes"),
+ labels.FromStrings("__name__", "go_memstats_buck_hash_sys_bytes"),
+ labels.FromStrings("__name__", "go_memstats_gc_sys_bytes"),
+ labels.FromStrings("__name__", "go_memstats_heap_alloc_bytes"),
+ labels.FromStrings("__name__", "go_memstats_heap_idle_bytes"),
+ labels.FromStrings("__name__", "go_memstats_heap_inuse_bytes"),
+ labels.FromStrings("__name__", "go_memstats_heap_objects"),
+ labels.FromStrings("__name__", "go_memstats_heap_released_bytes"),
+ labels.FromStrings("__name__", "go_memstats_heap_sys_bytes"),
+ labels.FromStrings("__name__", "go_memstats_last_gc_time_seconds"),
+ labels.FromStrings("__name__", "go_memstats_mcache_inuse_bytes"),
+ labels.FromStrings("__name__", "go_memstats_mcache_sys_bytes"),
+ labels.FromStrings("__name__", "go_memstats_mspan_inuse_bytes"),
+ labels.FromStrings("__name__", "go_memstats_mspan_sys_bytes"),
+ labels.FromStrings("__name__", "go_memstats_next_gc_bytes"),
+ labels.FromStrings("__name__", "go_memstats_other_sys_bytes"),
+ labels.FromStrings("__name__", "go_memstats_stack_inuse_bytes"),
+ labels.FromStrings("__name__", "go_memstats_stack_sys_bytes"),
+ labels.FromStrings("__name__", "go_memstats_sys_bytes"),
+ labels.FromStrings("__name__", "go_threads"),
+ }
+
+ commonLabels := labels.FromStrings(
+ "cluster", "some-cluster-0",
+ "container", "prometheus",
+ "job", "some-namespace/prometheus",
+ "namespace", "some-namespace")
+
+ var result []timeSeries
+ r := rand.New(rand.NewSource(0))
+ for i := 0; i < instances; i++ {
+ b := labels.NewBuilder(commonLabels)
+ b.Set("pod", "prometheus-"+strconv.Itoa(i))
+ for _, lbls := range metrics {
+ lbls.Range(func(l labels.Label) {
+ b.Set(l.Name, l.Value)
+ })
+ result = append(result, timeSeries{
+ seriesLabels: b.Labels(),
+ value: r.Float64(),
+ })
+ }
+ }
+ return result
+}
+
+func BenchmarkBuildWriteRequest(b *testing.B) {
+ noopLogger := log.NewNopLogger()
+ bench := func(b *testing.B, batch []timeSeries) {
+ buff := make([]byte, 0)
+ seriesBuff := make([]prompb.TimeSeries, len(batch))
+ for i := range seriesBuff {
+ seriesBuff[i].Samples = []prompb.Sample{{}}
+ seriesBuff[i].Exemplars = []prompb.Exemplar{{}}
+ }
+ pBuf := proto.NewBuffer(nil)
+
+ // Warmup buffers
+ for i := 0; i < 10; i++ {
+ populateTimeSeries(batch, seriesBuff, true, true)
+ buildWriteRequest(noopLogger, seriesBuff, nil, pBuf, &buff, nil, "snappy")
+ }
+
+ b.ResetTimer()
+ totalSize := 0
+ for i := 0; i < b.N; i++ {
+ populateTimeSeries(batch, seriesBuff, true, true)
+ req, _, _, err := buildWriteRequest(noopLogger, seriesBuff, nil, pBuf, &buff, nil, "snappy")
+ if err != nil {
+ b.Fatal(err)
+ }
+ totalSize += len(req)
+ b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op")
+ }
+ }
+
+ twoBatch := createDummyTimeSeries(2)
+ tenBatch := createDummyTimeSeries(10)
+ hundredBatch := createDummyTimeSeries(100)
+
+ b.Run("2 instances", func(b *testing.B) {
+ bench(b, twoBatch)
+ })
+
+ b.Run("10 instances", func(b *testing.B) {
+ bench(b, tenBatch)
+ })
+
+ b.Run("1k instances", func(b *testing.B) {
+ bench(b, hundredBatch)
+ })
+}
+
+func BenchmarkBuildV2WriteRequest(b *testing.B) {
+ noopLogger := log.NewNopLogger()
+ type testcase struct {
+ batch []timeSeries
+ }
+ testCases := []testcase{
+ {createDummyTimeSeries(2)},
+ {createDummyTimeSeries(10)},
+ {createDummyTimeSeries(100)},
+ }
+ for _, tc := range testCases {
+ symbolTable := writev2.NewSymbolTable()
+ buff := make([]byte, 0)
+ seriesBuff := make([]writev2.TimeSeries, len(tc.batch))
+ for i := range seriesBuff {
+ seriesBuff[i].Samples = []writev2.Sample{{}}
+ seriesBuff[i].Exemplars = []writev2.Exemplar{{}}
+ }
+ pBuf := []byte{}
+
+ // Warmup buffers
+ for i := 0; i < 10; i++ {
+ populateV2TimeSeries(&symbolTable, tc.batch, seriesBuff, true, true)
+ buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy")
+ }
+
+ b.Run(fmt.Sprintf("%d-instances", len(tc.batch)), func(b *testing.B) {
+ totalSize := 0
+ for j := 0; j < b.N; j++ {
+ populateV2TimeSeries(&symbolTable, tc.batch, seriesBuff, true, true)
+ b.ResetTimer()
+ req, _, _, err := buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy")
+ if err != nil {
+ b.Fatal(err)
+ }
+ symbolTable.Reset()
+ totalSize += len(req)
+ b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op")
+ }
+ })
+ }
+}
+
func TestDropOldTimeSeries(t *testing.T) {
size := 10
nSeries := 6
nSamples := config.DefaultQueueConfig.Capacity * size
samples, newSamples, series := createTimeseriesWithOldSamples(nSamples, nSeries)
- c := NewTestWriteClient()
+ // TODO(alexg): test with new version
+ c := NewTestWriteClient(config.RemoteWriteProtoMsgV1)
c.expectSamples(newSamples, series)
cfg := config.DefaultQueueConfig
mcfg := config.DefaultMetadataConfig
cfg.MaxShards = 1
cfg.SampleAgeLimit = model.Duration(60 * time.Second)
- m := newTestQueueManager(t, cfg, mcfg, defaultFlushDeadline, c)
+ m := newTestQueueManager(t, cfg, mcfg, defaultFlushDeadline, c, config.RemoteWriteProtoMsgV1)
m.StoreSeries(series, 0)
m.Start()
defer m.Stop()
m.Append(samples)
- c.waitForExpectedData(t)
+ c.waitForExpectedData(t, 30*time.Second)
}
func TestIsSampleOld(t *testing.T) {
@@ -1511,9 +1991,8 @@ func TestSendSamplesWithBackoffWithSampleAgeLimit(t *testing.T) {
metadataCfg.Send = true
metadataCfg.SendInterval = model.Duration(time.Second * 60)
metadataCfg.MaxSamplesPerSend = maxSamplesPerSend
- c := NewTestWriteClient()
- c.withWaitGroup = false
- m := newTestQueueManager(t, cfg, metadataCfg, time.Second, c)
+ c := NewTestWriteClient(config.RemoteWriteProtoMsgV1)
+ m := newTestQueueManager(t, cfg, metadataCfg, time.Second, c, config.RemoteWriteProtoMsgV1)
m.Start()
diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go
index 452b29221..a68187268 100644
--- a/storage/remote/read_handler_test.go
+++ b/storage/remote/read_handler_test.go
@@ -124,7 +124,7 @@ func TestSampledReadEndpoint(t *testing.T) {
{Name: "d", Value: "e"},
},
Histograms: []prompb.Histogram{
- FloatHistogramToHistogramProto(0, tsdbutil.GenerateTestFloatHistogram(0)),
+ prompb.FromFloatHistogram(0, tsdbutil.GenerateTestFloatHistogram(0)),
},
},
},
diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go
index 810009af0..357bdba1f 100644
--- a/storage/remote/read_test.go
+++ b/storage/remote/read_test.go
@@ -92,7 +92,7 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
for _, tc := range cases {
t.Run("", func(t *testing.T) {
- s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteReadConfigs: tc.cfgs,
@@ -172,12 +172,12 @@ func TestSeriesSetFilter(t *testing.T) {
toRemove: []string{"foo"},
in: &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{
- {Labels: LabelsToLabelsProto(labels.FromStrings("foo", "bar", "a", "b"), nil)},
+ {Labels: prompb.FromLabels(labels.FromStrings("foo", "bar", "a", "b"), nil)},
},
},
expected: &prompb.QueryResult{
Timeseries: []*prompb.TimeSeries{
- {Labels: LabelsToLabelsProto(labels.FromStrings("a", "b"), nil)},
+ {Labels: prompb.FromLabels(labels.FromStrings("a", "b"), nil)},
},
},
},
@@ -211,7 +211,7 @@ func (c *mockedRemoteClient) Read(_ context.Context, query *prompb.Query) (*prom
q := &prompb.QueryResult{}
for _, s := range c.store {
- l := LabelProtosToLabels(&c.b, s.Labels)
+ l := s.ToLabels(&c.b, nil)
var notMatch bool
for _, m := range matchers {
diff --git a/storage/remote/storage.go b/storage/remote/storage.go
index 758ba3cc9..afa2d411a 100644
--- a/storage/remote/storage.go
+++ b/storage/remote/storage.go
@@ -62,7 +62,7 @@ type Storage struct {
}
// NewStorage returns a remote.Storage.
-func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager) *Storage {
+func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage {
if l == nil {
l = log.NewNopLogger()
}
@@ -72,7 +72,7 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal
logger: logger,
localStartTimeCallback: stCallback,
}
- s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm)
+ s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, metadataInWAL)
return s
}
diff --git a/storage/remote/storage_test.go b/storage/remote/storage_test.go
index a62cd2da3..8c97d870e 100644
--- a/storage/remote/storage_test.go
+++ b/storage/remote/storage_test.go
@@ -29,7 +29,7 @@ import (
func TestStorageLifecycle(t *testing.T) {
dir := t.TempDir()
- s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{
@@ -56,7 +56,7 @@ func TestStorageLifecycle(t *testing.T) {
func TestUpdateRemoteReadConfigs(t *testing.T) {
dir := t.TempDir()
- s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{},
@@ -77,7 +77,7 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
func TestFilterExternalLabels(t *testing.T) {
dir := t.TempDir()
- s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{
@@ -102,7 +102,7 @@ func TestFilterExternalLabels(t *testing.T) {
func TestIgnoreExternalLabels(t *testing.T) {
dir := t.TempDir()
- s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
+ s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{
@@ -154,7 +154,7 @@ func baseRemoteReadConfig(host string) *config.RemoteReadConfig {
// ApplyConfig runs concurrently with Notify
// See https://github.com/prometheus/prometheus/issues/12747
func TestWriteStorageApplyConfigsDuringCommit(t *testing.T) {
- s := NewStorage(nil, nil, nil, t.TempDir(), defaultFlushDeadline, nil)
+ s := NewStorage(nil, nil, nil, t.TempDir(), defaultFlushDeadline, nil, false)
var wg sync.WaitGroup
wg.Add(2000)
diff --git a/storage/remote/write.go b/storage/remote/write.go
index 66455cb4d..cd8cd588c 100644
--- a/storage/remote/write.go
+++ b/storage/remote/write.go
@@ -15,6 +15,7 @@ package remote
import (
"context"
+ "errors"
"fmt"
"math"
"sync"
@@ -65,6 +66,7 @@ type WriteStorage struct {
externalLabels labels.Labels
dir string
queues map[string]*QueueManager
+ metadataInWAL bool
samplesIn *ewmaRate
flushDeadline time.Duration
interner *pool
@@ -76,7 +78,7 @@ type WriteStorage struct {
}
// NewWriteStorage creates and runs a WriteStorage.
-func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager) *WriteStorage {
+func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage {
if logger == nil {
logger = log.NewNopLogger()
}
@@ -92,6 +94,7 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, f
interner: newPool(),
scraper: sm,
quit: make(chan struct{}),
+ metadataInWAL: metadataInWal,
highestTimestamp: &maxTimestamp{
Gauge: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
@@ -145,6 +148,9 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
newQueues := make(map[string]*QueueManager)
newHashes := []string{}
for _, rwConf := range conf.RemoteWriteConfigs {
+ if rwConf.ProtobufMessage == config.RemoteWriteProtoMsgV2 && !rws.metadataInWAL {
+ return errors.New("invalid remote write configuration, if you are using remote write version 2.0 the `--enable-feature=metadata-wal-records` feature flag must be enabled")
+ }
hash, err := toHash(rwConf)
if err != nil {
return err
@@ -165,6 +171,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
c, err := NewWriteClient(name, &ClientConfig{
URL: rwConf.URL,
+ WriteProtoMsg: rwConf.ProtobufMessage,
Timeout: rwConf.RemoteTimeout,
HTTPClientConfig: rwConf.HTTPClientConfig,
SigV4Config: rwConf.SigV4Config,
@@ -207,6 +214,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
rws.scraper,
rwConf.SendExemplars,
rwConf.SendNativeHistograms,
+ rwConf.ProtobufMessage,
)
// Keep track of which queues are new so we know which to start.
newHashes = append(newHashes, hash)
diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go
index 0832c65ab..9997811ab 100644
--- a/storage/remote/write_handler.go
+++ b/storage/remote/write_handler.go
@@ -17,19 +17,24 @@ import (
"context"
"errors"
"fmt"
+ "io"
"net/http"
+ "strings"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
-
+ "github.com/gogo/protobuf/proto"
+ "github.com/golang/snappy"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/prompb"
+ writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/storage"
otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
)
@@ -39,17 +44,23 @@ type writeHandler struct {
appendable storage.Appendable
samplesWithInvalidLabelsTotal prometheus.Counter
+
+ acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{}
}
const maxAheadTime = 10 * time.Minute
-// NewWriteHandler creates a http.Handler that accepts remote write requests and
-// writes them to the provided appendable.
-func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable) http.Handler {
+// NewWriteHandler creates a http.Handler that accepts remote write requests with
+// the given message in acceptedProtoMsgs and writes them to the provided appendable.
+func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler {
+ protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{}
+ for _, acc := range acceptedProtoMsgs {
+ protoMsgs[acc] = struct{}{}
+ }
h := &writeHandler{
- logger: logger,
- appendable: appendable,
-
+ logger: logger,
+ appendable: appendable,
+ acceptedProtoMsgs: protoMsgs,
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "api",
@@ -63,15 +74,107 @@ func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable st
return h
}
+func (h *writeHandler) parseProtoMsg(contentType string) (config.RemoteWriteProtoMsg, error) {
+ contentType = strings.TrimSpace(contentType)
+
+ parts := strings.Split(contentType, ";")
+ if parts[0] != appProtoContentType {
+ return "", fmt.Errorf("expected %v as the first (media) part, got %v content-type", appProtoContentType, contentType)
+ }
+ // Parse potential https://www.rfc-editor.org/rfc/rfc9110#parameter
+ for _, p := range parts[1:] {
+ pair := strings.Split(p, "=")
+ if len(pair) != 2 {
+ return "", fmt.Errorf("as per https://www.rfc-editor.org/rfc/rfc9110#parameter expected parameters to be key-values, got %v in %v content-type", p, contentType)
+ }
+ if pair[0] == "proto" {
+ ret := config.RemoteWriteProtoMsg(pair[1])
+ if err := ret.Validate(); err != nil {
+ return "", fmt.Errorf("got %v content type; %w", contentType, err)
+ }
+ return ret, nil
+ }
+ }
+ // No "proto=" parameter, assuming v1.
+ return config.RemoteWriteProtoMsgV1, nil
+}
+
func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- req, err := DecodeWriteRequest(r.Body)
+ contentType := r.Header.Get("Content-Type")
+ if contentType == "" {
+ // Don't break yolo 1.0 clients if not needed. This is similar to what we did
+ // before 2.0: https://github.com/prometheus/prometheus/blob/d78253319daa62c8f28ed47e40bafcad2dd8b586/storage/remote/write_handler.go#L62
+ // We could give http.StatusUnsupportedMediaType, but let's assume 1.0 message by default.
+ contentType = appProtoContentType
+ }
+
+ msg, err := h.parseProtoMsg(contentType)
+ if err != nil {
+ level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
+ http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
+ return
+ }
+
+ if _, ok := h.acceptedProtoMsgs[msg]; !ok {
+ err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msg, func() (ret []string) {
+ for k := range h.acceptedProtoMsgs {
+ ret = append(ret, string(k))
+ }
+ return ret
+ }())
+ level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
+ http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
+ }
+
+ enc := r.Header.Get("Content-Encoding")
+ if enc == "" {
+ // Don't break yolo 1.0 clients if not needed. This is similar to what we did
+ // before 2.0: https://github.com/prometheus/prometheus/blob/d78253319daa62c8f28ed47e40bafcad2dd8b586/storage/remote/write_handler.go#L62
+ // We could give http.StatusUnsupportedMediaType, but let's assume snappy by default.
+ } else if enc != string(SnappyBlockCompression) {
+ err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, SnappyBlockCompression)
+ level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
+ http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
+ }
+
+ // Read the request body.
+ body, err := io.ReadAll(r.Body)
if err != nil {
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
- err = h.write(r.Context(), req)
+ decompressed, err := snappy.Decode(nil, body)
+ if err != nil {
+ // TODO(bwplotka): Add more context to responded error?
+ level.Error(h.logger).Log("msg", "Error decompressing remote write request", "err", err.Error())
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ // Now we have a decompressed buffer we can unmarshal it.
+ switch msg {
+ case config.RemoteWriteProtoMsgV1:
+ var req prompb.WriteRequest
+ if err := proto.Unmarshal(decompressed, &req); err != nil {
+ // TODO(bwplotka): Add more context to responded error?
+ level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msg, "err", err.Error())
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ err = h.write(r.Context(), &req)
+ case config.RemoteWriteProtoMsgV2:
+ var req writev2.Request
+ if err := proto.Unmarshal(decompressed, &req); err != nil {
+ // TODO(bwplotka): Add more context to responded error?
+ level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msg, "err", err.Error())
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ err = h.writeV2(r.Context(), &req)
+ }
+
switch {
case err == nil:
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
@@ -123,62 +226,27 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
}()
b := labels.NewScratchBuilder(0)
- var exemplarErr error
-
for _, ts := range req.Timeseries {
- labels := LabelProtosToLabels(&b, ts.Labels)
- if !labels.IsValid() {
- level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", labels.String())
+ ls := ts.ToLabels(&b, nil)
+ if !ls.IsValid() {
+ level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String())
samplesWithInvalidLabels++
continue
}
- var ref storage.SeriesRef
- for _, s := range ts.Samples {
- ref, err = timeLimitApp.Append(ref, labels, s.Timestamp, s.Value)
- if err != nil {
- unwrappedErr := errors.Unwrap(err)
- if unwrappedErr == nil {
- unwrappedErr = err
- }
- if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
- level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
- }
- return err
- }
+
+ err := h.appendSamples(timeLimitApp, ts.Samples, ls)
+ if err != nil {
+ return err
}
for _, ep := range ts.Exemplars {
- e := exemplarProtoToExemplar(&b, ep)
-
- _, exemplarErr = timeLimitApp.AppendExemplar(0, labels, e)
- exemplarErr = h.checkAppendExemplarError(exemplarErr, e, &outOfOrderExemplarErrs)
- if exemplarErr != nil {
- // Since exemplar storage is still experimental, we don't fail the request on ingestion errors.
- level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
- }
+ e := ep.ToExemplar(&b, nil)
+ h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs)
}
- for _, hp := range ts.Histograms {
- if hp.IsFloatHistogram() {
- fhs := FloatHistogramProtoToFloatHistogram(hp)
- _, err = timeLimitApp.AppendHistogram(0, labels, hp.Timestamp, nil, fhs)
- } else {
- hs := HistogramProtoToHistogram(hp)
- _, err = timeLimitApp.AppendHistogram(0, labels, hp.Timestamp, hs, nil)
- }
-
- if err != nil {
- unwrappedErr := errors.Unwrap(err)
- if unwrappedErr == nil {
- unwrappedErr = err
- }
- // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
- // a note indicating its inclusion in the future.
- if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
- level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
- }
- return err
- }
+ err = h.appendHistograms(timeLimitApp, ts.Histograms, ls)
+ if err != nil {
+ return err
}
}
@@ -192,6 +260,149 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
return nil
}
+func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (err error) {
+ outOfOrderExemplarErrs := 0
+
+ timeLimitApp := &timeLimitAppender{
+ Appender: h.appendable.Appender(ctx),
+ maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
+ }
+
+ defer func() {
+ if err != nil {
+ _ = timeLimitApp.Rollback()
+ return
+ }
+ err = timeLimitApp.Commit()
+ }()
+
+ b := labels.NewScratchBuilder(0)
+ for _, ts := range req.Timeseries {
+ ls := ts.ToLabels(&b, req.Symbols)
+
+ err := h.appendSamplesV2(timeLimitApp, ts.Samples, ls)
+ if err != nil {
+ return err
+ }
+
+ for _, ep := range ts.Exemplars {
+ e := ep.ToExemplar(&b, req.Symbols)
+ h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs)
+ }
+
+ err = h.appendHistogramsV2(timeLimitApp, ts.Histograms, ls)
+ if err != nil {
+ return err
+ }
+
+ m := ts.ToMetadata(req.Symbols)
+ if _, err = timeLimitApp.UpdateMetadata(0, ls, m); err != nil {
+ level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err)
+ }
+ }
+
+ if outOfOrderExemplarErrs > 0 {
+ _ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
+ }
+
+ return nil
+}
+
+func (h *writeHandler) appendExemplar(app storage.Appender, e exemplar.Exemplar, labels labels.Labels, outOfOrderExemplarErrs *int) {
+ _, err := app.AppendExemplar(0, labels, e)
+ err = h.checkAppendExemplarError(err, e, outOfOrderExemplarErrs)
+ if err != nil {
+ // Since exemplar storage is still experimental, we don't fail the request on ingestion errors
+ level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", err)
+ }
+}
+
+func (h *writeHandler) appendSamples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error {
+ var ref storage.SeriesRef
+ var err error
+ for _, s := range ss {
+ ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
+ if err != nil {
+ unwrappedErr := errors.Unwrap(err)
+ if unwrappedErr == nil {
+ unwrappedErr = err
+ }
+ if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
+ level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+func (h *writeHandler) appendSamplesV2(app storage.Appender, ss []writev2.Sample, labels labels.Labels) error {
+ var ref storage.SeriesRef
+ var err error
+ for _, s := range ss {
+ ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
+ if err != nil {
+ unwrappedErr := errors.Unwrap(err)
+ if unwrappedErr == nil {
+ unwrappedErr = err
+ }
+ if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
+ level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+func (h *writeHandler) appendHistograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error {
+ var err error
+ for _, hp := range hh {
+ if hp.IsFloatHistogram() {
+ _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
+ } else {
+ _, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
+ }
+ if err != nil {
+ unwrappedErr := errors.Unwrap(err)
+ if unwrappedErr == nil {
+ unwrappedErr = err
+ }
+ // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
+ // a note indicating its inclusion in the future.
+ if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
+ level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+func (h *writeHandler) appendHistogramsV2(app storage.Appender, hh []writev2.Histogram, labels labels.Labels) error {
+ var err error
+ for _, hp := range hh {
+ if hp.IsFloatHistogram() {
+ _, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
+ } else {
+ _, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
+ }
+ if err != nil {
+ unwrappedErr := errors.Unwrap(err)
+ if unwrappedErr == nil {
+ unwrappedErr = err
+ }
+ // Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
+ // a note indicating its inclusion in the future.
+ if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
+ level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
+ }
+ return err
+ }
+ }
+ return nil
+}
+
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
// writes them to the provided appendable.
func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler {
diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go
index 30dc1b3d6..24bd7059a 100644
--- a/storage/remote/write_handler_test.go
+++ b/storage/remote/write_handler_test.go
@@ -30,25 +30,230 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
+ "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/prompb"
+ writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/util/testutil"
)
-func TestRemoteWriteHandler(t *testing.T) {
- buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil)
+func TestRemoteWriteHandlerHeadersHandling_V1Message(t *testing.T) {
+ payload, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
require.NoError(t, err)
- req, err := http.NewRequest("", "", bytes.NewReader(buf))
+ for _, tc := range []struct {
+ name string
+ reqHeaders map[string]string
+ expectedCode int
+ }{
+ // Generally Prometheus 1.0 Receiver never checked for existence of the headers, so
+ // we keep things permissive.
+ {
+ name: "correct PRW 1.0 headers",
+ reqHeaders: map[string]string{
+ "Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV1],
+ "Content-Encoding": string(SnappyBlockCompression),
+ RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
+ },
+ expectedCode: http.StatusNoContent,
+ },
+ {
+ name: "missing remote write version",
+ reqHeaders: map[string]string{
+ "Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV1],
+ "Content-Encoding": string(SnappyBlockCompression),
+ },
+ expectedCode: http.StatusNoContent,
+ },
+ {
+ name: "no headers",
+ reqHeaders: map[string]string{},
+ expectedCode: http.StatusNoContent,
+ },
+ {
+ name: "missing content-type",
+ reqHeaders: map[string]string{
+ "Content-Encoding": string(SnappyBlockCompression),
+ RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
+ },
+ expectedCode: http.StatusNoContent,
+ },
+ {
+ name: "missing content-encoding",
+ reqHeaders: map[string]string{
+ "Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV1],
+ RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
+ },
+ expectedCode: http.StatusNoContent,
+ },
+ {
+ name: "wrong content-type",
+ reqHeaders: map[string]string{
+ "Content-Type": "yolo",
+ "Content-Encoding": string(SnappyBlockCompression),
+ RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
+ },
+ expectedCode: http.StatusUnsupportedMediaType,
+ },
+ {
+ name: "wrong content-type2",
+ reqHeaders: map[string]string{
+ "Content-Type": appProtoContentType + ";proto=yolo",
+ "Content-Encoding": string(SnappyBlockCompression),
+ RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
+ },
+ expectedCode: http.StatusUnsupportedMediaType,
+ },
+ {
+ name: "not supported content-encoding",
+ reqHeaders: map[string]string{
+ "Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV1],
+ "Content-Encoding": "zstd",
+ RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
+ },
+ expectedCode: http.StatusUnsupportedMediaType,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ req, err := http.NewRequest("", "", bytes.NewReader(payload))
+ require.NoError(t, err)
+ for k, v := range tc.reqHeaders {
+ req.Header.Set(k, v)
+ }
+
+ appendable := &mockAppendable{}
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
+
+ recorder := httptest.NewRecorder()
+ handler.ServeHTTP(recorder, req)
+
+ resp := recorder.Result()
+ out, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+ _ = resp.Body.Close()
+ require.Equal(t, tc.expectedCode, resp.StatusCode, string(out))
+ })
+ }
+}
+
+func TestRemoteWriteHandlerHeadersHandling_V2Message(t *testing.T) {
+ payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
require.NoError(t, err)
+ for _, tc := range []struct {
+ name string
+ reqHeaders map[string]string
+ expectedCode int
+ }{
+ {
+ name: "correct PRW 2.0 headers",
+ reqHeaders: map[string]string{
+ "Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2],
+ "Content-Encoding": string(SnappyBlockCompression),
+ RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
+ },
+ expectedCode: http.StatusNoContent,
+ },
+ {
+ name: "missing remote write version",
+ reqHeaders: map[string]string{
+ "Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2],
+ "Content-Encoding": string(SnappyBlockCompression),
+ },
+ expectedCode: http.StatusNoContent, // We don't check for now.
+ },
+ {
+ name: "no headers",
+ reqHeaders: map[string]string{},
+ expectedCode: http.StatusUnsupportedMediaType,
+ },
+ {
+ name: "missing content-type",
+ reqHeaders: map[string]string{
+ "Content-Encoding": string(SnappyBlockCompression),
+ RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
+ },
+ // This only gives 415, because we explicitly only support 2.0. If we supported both
+ // (default) it would be empty message parsed and ok response.
+ // This is perhaps better, than 415 for previously working 1.0 flow with
+ // no content-type.
+ expectedCode: http.StatusUnsupportedMediaType,
+ },
+ {
+ name: "missing content-encoding",
+ reqHeaders: map[string]string{
+ "Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2],
+ RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
+ },
+ expectedCode: http.StatusNoContent, // Similar to 1.0 impl, we default to Snappy, so it works.
+ },
+ {
+ name: "wrong content-type",
+ reqHeaders: map[string]string{
+ "Content-Type": "yolo",
+ "Content-Encoding": string(SnappyBlockCompression),
+ RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
+ },
+ expectedCode: http.StatusUnsupportedMediaType,
+ },
+ {
+ name: "wrong content-type2",
+ reqHeaders: map[string]string{
+ "Content-Type": appProtoContentType + ";proto=yolo",
+ "Content-Encoding": string(SnappyBlockCompression),
+ RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
+ },
+ expectedCode: http.StatusUnsupportedMediaType,
+ },
+ {
+ name: "not supported content-encoding",
+ reqHeaders: map[string]string{
+ "Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2],
+ "Content-Encoding": "zstd",
+ RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
+ },
+ expectedCode: http.StatusUnsupportedMediaType,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ req, err := http.NewRequest("", "", bytes.NewReader(payload))
+ require.NoError(t, err)
+ for k, v := range tc.reqHeaders {
+ req.Header.Set(k, v)
+ }
+
+ appendable := &mockAppendable{}
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
+
+ recorder := httptest.NewRecorder()
+ handler.ServeHTTP(recorder, req)
+
+ resp := recorder.Result()
+ out, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+ _ = resp.Body.Close()
+ require.Equal(t, tc.expectedCode, resp.StatusCode, string(out))
+ })
+ }
+}
+
+func TestRemoteWriteHandler_V1Message(t *testing.T) {
+ payload, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("", "", bytes.NewReader(payload))
+ require.NoError(t, err)
+
+ // NOTE: Strictly speaking, even for 1.0 we require headers, but we never verified those
+ // in Prometheus, so keeping like this to not break existing 1.0 clients.
+
appendable := &mockAppendable{}
- handler := NewWriteHandler(nil, nil, appendable)
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -61,24 +266,22 @@ func TestRemoteWriteHandler(t *testing.T) {
j := 0
k := 0
for _, ts := range writeRequestFixture.Timeseries {
- labels := LabelProtosToLabels(&b, ts.Labels)
+ labels := ts.ToLabels(&b, nil)
for _, s := range ts.Samples {
requireEqual(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
i++
}
-
for _, e := range ts.Exemplars {
- exemplarLabels := LabelProtosToLabels(&b, e.Labels)
+ exemplarLabels := e.ToExemplar(&b, nil).Labels
requireEqual(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++
}
-
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
- fh := FloatHistogramProtoToFloatHistogram(hp)
+ fh := hp.ToFloatHistogram()
requireEqual(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k])
} else {
- h := HistogramProtoToHistogram(hp)
+ h := hp.ToIntHistogram()
requireEqual(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
}
@@ -87,8 +290,66 @@ func TestRemoteWriteHandler(t *testing.T) {
}
}
-func TestOutOfOrderSample(t *testing.T) {
- tests := []struct {
+func TestRemoteWriteHandler_V2Message(t *testing.T) {
+ payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("", "", bytes.NewReader(payload))
+ require.NoError(t, err)
+
+ req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
+ req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
+ req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
+
+ appendable := &mockAppendable{}
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
+
+ recorder := httptest.NewRecorder()
+ handler.ServeHTTP(recorder, req)
+
+ resp := recorder.Result()
+ require.Equal(t, http.StatusNoContent, resp.StatusCode)
+
+ b := labels.NewScratchBuilder(0)
+ i := 0
+ j := 0
+ k := 0
+ for _, ts := range writeV2RequestFixture.Timeseries {
+ ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols)
+
+ for _, s := range ts.Samples {
+ requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
+
+ switch i {
+ case 0:
+ requireEqual(t, mockMetadata{ls, writeV2RequestSeries1Metadata}, appendable.metadata[i])
+ case 1:
+ requireEqual(t, mockMetadata{ls, writeV2RequestSeries2Metadata}, appendable.metadata[i])
+ default:
+ t.Fatal("more series/samples then expected")
+ }
+ i++
+ }
+ for _, e := range ts.Exemplars {
+ exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels
+ requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
+ j++
+ }
+ for _, hp := range ts.Histograms {
+ if hp.IsFloatHistogram() {
+ fh := hp.ToFloatHistogram()
+ requireEqual(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
+ } else {
+ h := hp.ToIntHistogram()
+ requireEqual(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
+ }
+ k++
+ }
+ }
+}
+
+func TestOutOfOrderSample_V1Message(t *testing.T) {
+ for _, tc := range []struct {
Name string
Timestamp int64
}{
@@ -100,23 +361,59 @@ func TestOutOfOrderSample(t *testing.T) {
Name: "future",
Timestamp: math.MaxInt64,
},
- }
-
- for _, tc := range tests {
+ } {
t.Run(tc.Name, func(t *testing.T) {
- buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
+ payload, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
Samples: []prompb.Sample{{Value: 1, Timestamp: tc.Timestamp}},
- }}, nil, nil, nil, nil)
+ }}, nil, nil, nil, nil, "snappy")
require.NoError(t, err)
- req, err := http.NewRequest("", "", bytes.NewReader(buf))
+ req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
- appendable := &mockAppendable{
- latestSample: 100,
- }
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
+ appendable := &mockAppendable{latestSample: 100}
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
+
+ recorder := httptest.NewRecorder()
+ handler.ServeHTTP(recorder, req)
+
+ resp := recorder.Result()
+ require.Equal(t, http.StatusBadRequest, resp.StatusCode)
+ })
+ }
+}
+
+func TestOutOfOrderSample_V2Message(t *testing.T) {
+ for _, tc := range []struct {
+ Name string
+ Timestamp int64
+ }{
+ {
+ Name: "historic",
+ Timestamp: 0,
+ },
+ {
+ Name: "future",
+ Timestamp: math.MaxInt64,
+ },
+ } {
+ t.Run(tc.Name, func(t *testing.T) {
+ payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
+ LabelsRefs: []uint32{1, 2},
+ Samples: []writev2.Sample{{Value: 1, Timestamp: tc.Timestamp}},
+ }}, []string{"", "__name__", "metric1"}, nil, nil, nil, "snappy")
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("", "", bytes.NewReader(payload))
+ require.NoError(t, err)
+
+ req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
+ req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
+ req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
+
+ appendable := &mockAppendable{latestSample: 100}
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -128,9 +425,9 @@ func TestOutOfOrderSample(t *testing.T) {
}
// This test case currently aims to verify that the WriteHandler endpoint
-// don't fail on ingestion errors since the exemplar storage is
+// don't fail on exemplar ingestion errors since the exemplar storage is
// still experimental.
-func TestOutOfOrderExemplar(t *testing.T) {
+func TestOutOfOrderExemplar_V1Message(t *testing.T) {
tests := []struct {
Name string
Timestamp int64
@@ -147,19 +444,17 @@ func TestOutOfOrderExemplar(t *testing.T) {
for _, tc := range tests {
t.Run(tc.Name, func(t *testing.T) {
- buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
+ payload, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "foo", Value: "bar"}}, Value: 1, Timestamp: tc.Timestamp}},
- }}, nil, nil, nil, nil)
+ }}, nil, nil, nil, nil, "snappy")
require.NoError(t, err)
- req, err := http.NewRequest("", "", bytes.NewReader(buf))
+ req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
- appendable := &mockAppendable{
- latestExemplar: 100,
- }
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
+ appendable := &mockAppendable{latestExemplar: 100}
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -171,7 +466,7 @@ func TestOutOfOrderExemplar(t *testing.T) {
}
}
-func TestOutOfOrderHistogram(t *testing.T) {
+func TestOutOfOrderExemplar_V2Message(t *testing.T) {
tests := []struct {
Name string
Timestamp int64
@@ -188,19 +483,58 @@ func TestOutOfOrderHistogram(t *testing.T) {
for _, tc := range tests {
t.Run(tc.Name, func(t *testing.T) {
- buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
+ payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
+ LabelsRefs: []uint32{1, 2},
+ Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{3, 4}, Value: 1, Timestamp: tc.Timestamp}},
+ }}, []string{"", "__name__", "metric1", "foo", "bar"}, nil, nil, nil, "snappy")
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("", "", bytes.NewReader(payload))
+ require.NoError(t, err)
+
+ req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
+ req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
+ req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
+
+ appendable := &mockAppendable{latestExemplar: 100}
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
+
+ recorder := httptest.NewRecorder()
+ handler.ServeHTTP(recorder, req)
+
+ resp := recorder.Result()
+ // TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental.
+ require.Equal(t, http.StatusNoContent, resp.StatusCode)
+ })
+ }
+}
+
+func TestOutOfOrderHistogram_V1Message(t *testing.T) {
+ for _, tc := range []struct {
+ Name string
+ Timestamp int64
+ }{
+ {
+ Name: "historic",
+ Timestamp: 0,
+ },
+ {
+ Name: "future",
+ Timestamp: math.MaxInt64,
+ },
+ } {
+ t.Run(tc.Name, func(t *testing.T) {
+ payload, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
- Histograms: []prompb.Histogram{HistogramToHistogramProto(tc.Timestamp, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))},
- }}, nil, nil, nil, nil)
+ Histograms: []prompb.Histogram{prompb.FromIntHistogram(tc.Timestamp, &testHistogram), prompb.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
+ }}, nil, nil, nil, nil, "snappy")
require.NoError(t, err)
- req, err := http.NewRequest("", "", bytes.NewReader(buf))
+ req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
- appendable := &mockAppendable{
- latestHistogram: 100,
- }
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
+ appendable := &mockAppendable{latestHistogram: 100}
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -211,9 +545,49 @@ func TestOutOfOrderHistogram(t *testing.T) {
}
}
-func BenchmarkRemoteWritehandler(b *testing.B) {
+func TestOutOfOrderHistogram_V2Message(t *testing.T) {
+ for _, tc := range []struct {
+ Name string
+ Timestamp int64
+ }{
+ {
+ Name: "historic",
+ Timestamp: 0,
+ },
+ {
+ Name: "future",
+ Timestamp: math.MaxInt64,
+ },
+ } {
+ t.Run(tc.Name, func(t *testing.T) {
+ payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
+ LabelsRefs: []uint32{0, 1},
+ Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
+ }}, []string{"__name__", "metric1"}, nil, nil, nil, "snappy")
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("", "", bytes.NewReader(payload))
+ require.NoError(t, err)
+
+ req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
+ req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
+ req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
+
+ appendable := &mockAppendable{latestHistogram: 100}
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
+
+ recorder := httptest.NewRecorder()
+ handler.ServeHTTP(recorder, req)
+
+ resp := recorder.Result()
+ require.Equal(t, http.StatusBadRequest, resp.StatusCode)
+ })
+ }
+}
+
+func BenchmarkRemoteWriteHandler(b *testing.B) {
const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
- reqs := []*http.Request{}
+ var reqs []*http.Request
for i := 0; i < b.N; i++ {
num := strings.Repeat(strconv.Itoa(i), 16)
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
@@ -221,8 +595,8 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
{Name: "__name__", Value: "test_metric"},
{Name: "test_label_name_" + num, Value: labelValue + num},
},
- Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)},
- }}, nil, nil, nil, nil)
+ Histograms: []prompb.Histogram{prompb.FromIntHistogram(0, &testHistogram)},
+ }}, nil, nil, nil, nil, "snappy")
require.NoError(b, err)
req, err := http.NewRequest("", "", bytes.NewReader(buf))
require.NoError(b, err)
@@ -230,7 +604,8 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
}
appendable := &mockAppendable{}
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
+ // TODO: test with other proto format(s)
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder()
b.ResetTimer()
@@ -239,17 +614,39 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
}
}
-func TestCommitErr(t *testing.T) {
- buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil)
+func TestCommitErr_V1Message(t *testing.T) {
+ payload, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
require.NoError(t, err)
- req, err := http.NewRequest("", "", bytes.NewReader(buf))
+ req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
- appendable := &mockAppendable{
- commitErr: fmt.Errorf("commit error"),
- }
- handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
+ appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")}
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
+
+ recorder := httptest.NewRecorder()
+ handler.ServeHTTP(recorder, req)
+
+ resp := recorder.Result()
+ body, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+ require.Equal(t, http.StatusInternalServerError, resp.StatusCode)
+ require.Equal(t, "commit error\n", string(body))
+}
+
+func TestCommitErr_V2Message(t *testing.T) {
+ payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("", "", bytes.NewReader(payload))
+ require.NoError(t, err)
+
+ req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
+ req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
+ req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
+
+ appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")}
+ handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@@ -275,10 +672,10 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
b.Cleanup(func() {
require.NoError(b, db.Close())
})
+ // TODO: test with other proto format(s)
+ handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
- handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head())
-
- buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil)
+ buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy")
require.NoError(b, err)
req, err := http.NewRequest("", "", bytes.NewReader(buf))
@@ -291,7 +688,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
var bufRequests [][]byte
for i := 0; i < 100; i++ {
- buf, _, _, err = buildWriteRequest(nil, genSeriesWithSample(1000, int64(80+i)*time.Minute.Milliseconds()), nil, nil, nil, nil)
+ buf, _, _, err = buildWriteRequest(nil, genSeriesWithSample(1000, int64(80+i)*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy")
require.NoError(b, err)
bufRequests = append(bufRequests, buf)
}
@@ -328,7 +725,9 @@ type mockAppendable struct {
exemplars []mockExemplar
latestHistogram int64
histograms []mockHistogram
- commitErr error
+ metadata []mockMetadata
+
+ commitErr error
}
type mockSample struct {
@@ -351,10 +750,17 @@ type mockHistogram struct {
fh *histogram.FloatHistogram
}
+type mockMetadata struct {
+ l labels.Labels
+ m metadata.Metadata
+}
+
// Wrapper to instruct go-cmp package to compare a list of structs with unexported fields.
func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) {
+ t.Helper()
+
testutil.RequireEqualWithOptions(t, expected, actual,
- []cmp.Option{cmp.AllowUnexported(mockSample{}), cmp.AllowUnexported(mockExemplar{}), cmp.AllowUnexported(mockHistogram{})},
+ []cmp.Option{cmp.AllowUnexported(mockSample{}), cmp.AllowUnexported(mockExemplar{}), cmp.AllowUnexported(mockHistogram{}), cmp.AllowUnexported(mockMetadata{})},
msgAndArgs...)
}
@@ -400,13 +806,14 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t
return 0, nil
}
-func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
- // TODO: Wire metadata in a mockAppendable field when we get around to handling metadata in remote_write.
- // UpdateMetadata is no-op for remote write (where mockAppendable is being used to test) for now.
+func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) {
+ m.metadata = append(m.metadata, mockMetadata{l: l, m: mp})
return 0, nil
}
func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) {
// AppendCTZeroSample is no-op for remote-write for now.
+ // TODO(bwplotka): Add support for PRW 2.0 for CT zero feature (but also we might
+ // replace this with in-metadata CT storage, see https://github.com/prometheus/prometheus/issues/14218).
return 0, nil
}
diff --git a/storage/remote/write_test.go b/storage/remote/write_test.go
index c79ac3ab7..648ec4b17 100644
--- a/storage/remote/write_test.go
+++ b/storage/remote/write_test.go
@@ -15,6 +15,7 @@ package remote
import (
"bytes"
+ "errors"
"net/http"
"net/http/httptest"
"net/url"
@@ -43,11 +44,12 @@ func testRemoteWriteConfig() *config.RemoteWriteConfig {
Host: "localhost",
},
},
- QueueConfig: config.DefaultQueueConfig,
+ QueueConfig: config.DefaultQueueConfig,
+ ProtobufMessage: config.RemoteWriteProtoMsgV1,
}
}
-func TestNoDuplicateWriteConfigs(t *testing.T) {
+func TestWriteStorageApplyConfig_NoDuplicateWriteConfigs(t *testing.T) {
dir := t.TempDir()
cfg1 := config.RemoteWriteConfig{
@@ -58,7 +60,8 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
Host: "localhost",
},
},
- QueueConfig: config.DefaultQueueConfig,
+ QueueConfig: config.DefaultQueueConfig,
+ ProtobufMessage: config.RemoteWriteProtoMsgV1,
}
cfg2 := config.RemoteWriteConfig{
Name: "write-2",
@@ -68,7 +71,8 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
Host: "localhost",
},
},
- QueueConfig: config.DefaultQueueConfig,
+ QueueConfig: config.DefaultQueueConfig,
+ ProtobufMessage: config.RemoteWriteProtoMsgV1,
}
cfg3 := config.RemoteWriteConfig{
URL: &common_config.URL{
@@ -77,61 +81,49 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
Host: "localhost",
},
},
- QueueConfig: config.DefaultQueueConfig,
+ QueueConfig: config.DefaultQueueConfig,
+ ProtobufMessage: config.RemoteWriteProtoMsgV1,
}
- type testcase struct {
- cfgs []*config.RemoteWriteConfig
- err bool
- }
-
- cases := []testcase{
+ for _, tc := range []struct {
+ cfgs []*config.RemoteWriteConfig
+ expectedErr error
+ }{
{ // Two duplicates, we should get an error.
- cfgs: []*config.RemoteWriteConfig{
- &cfg1,
- &cfg1,
- },
- err: true,
+ cfgs: []*config.RemoteWriteConfig{&cfg1, &cfg1},
+ expectedErr: errors.New("duplicate remote write configs are not allowed, found duplicate for URL: http://localhost"),
},
{ // Duplicates but with different names, we should not get an error.
- cfgs: []*config.RemoteWriteConfig{
- &cfg1,
- &cfg2,
- },
- err: false,
+ cfgs: []*config.RemoteWriteConfig{&cfg1, &cfg2},
},
{ // Duplicates but one with no name, we should not get an error.
- cfgs: []*config.RemoteWriteConfig{
- &cfg1,
- &cfg3,
- },
- err: false,
+ cfgs: []*config.RemoteWriteConfig{&cfg1, &cfg3},
},
{ // Duplicates both with no name, we should get an error.
- cfgs: []*config.RemoteWriteConfig{
- &cfg3,
- &cfg3,
- },
- err: true,
+ cfgs: []*config.RemoteWriteConfig{&cfg3, &cfg3},
+ expectedErr: errors.New("duplicate remote write configs are not allowed, found duplicate for URL: http://localhost"),
},
- }
-
- for _, tc := range cases {
- s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil)
- conf := &config.Config{
- GlobalConfig: config.DefaultGlobalConfig,
- RemoteWriteConfigs: tc.cfgs,
- }
- err := s.ApplyConfig(conf)
- gotError := err != nil
- require.Equal(t, tc.err, gotError)
-
- err = s.Close()
- require.NoError(t, err)
+ } {
+ t.Run("", func(t *testing.T) {
+ s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false)
+ conf := &config.Config{
+ GlobalConfig: config.DefaultGlobalConfig,
+ RemoteWriteConfigs: tc.cfgs,
+ }
+ err := s.ApplyConfig(conf)
+ if tc.expectedErr == nil {
+ require.NoError(t, err)
+ } else {
+ require.Error(t, err)
+ require.Equal(t, tc.expectedErr, err)
+ }
+
+ require.NoError(t, s.Close())
+ })
}
}
-func TestRestartOnNameChange(t *testing.T) {
+func TestWriteStorageApplyConfig_RestartOnNameChange(t *testing.T) {
dir := t.TempDir()
cfg := testRemoteWriteConfig()
@@ -139,13 +131,11 @@ func TestRestartOnNameChange(t *testing.T) {
hash, err := toHash(cfg)
require.NoError(t, err)
- s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil)
+ s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false)
conf := &config.Config{
- GlobalConfig: config.DefaultGlobalConfig,
- RemoteWriteConfigs: []*config.RemoteWriteConfig{
- cfg,
- },
+ GlobalConfig: config.DefaultGlobalConfig,
+ RemoteWriteConfigs: []*config.RemoteWriteConfig{cfg},
}
require.NoError(t, s.ApplyConfig(conf))
require.Equal(t, s.queues[hash].client().Name(), cfg.Name)
@@ -157,14 +147,13 @@ func TestRestartOnNameChange(t *testing.T) {
require.NoError(t, err)
require.Equal(t, s.queues[hash].client().Name(), conf.RemoteWriteConfigs[0].Name)
- err = s.Close()
- require.NoError(t, err)
+ require.NoError(t, s.Close())
}
-func TestUpdateWithRegisterer(t *testing.T) {
+func TestWriteStorageApplyConfig_UpdateWithRegisterer(t *testing.T) {
dir := t.TempDir()
- s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil)
+ s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, false)
c1 := &config.RemoteWriteConfig{
Name: "named",
URL: &common_config.URL{
@@ -173,7 +162,8 @@ func TestUpdateWithRegisterer(t *testing.T) {
Host: "localhost",
},
},
- QueueConfig: config.DefaultQueueConfig,
+ QueueConfig: config.DefaultQueueConfig,
+ ProtobufMessage: config.RemoteWriteProtoMsgV1,
}
c2 := &config.RemoteWriteConfig{
URL: &common_config.URL{
@@ -182,7 +172,8 @@ func TestUpdateWithRegisterer(t *testing.T) {
Host: "localhost",
},
},
- QueueConfig: config.DefaultQueueConfig,
+ QueueConfig: config.DefaultQueueConfig,
+ ProtobufMessage: config.RemoteWriteProtoMsgV1,
}
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
@@ -197,14 +188,13 @@ func TestUpdateWithRegisterer(t *testing.T) {
require.Equal(t, 10, queue.cfg.MaxShards)
}
- err := s.Close()
- require.NoError(t, err)
+ require.NoError(t, s.Close())
}
-func TestWriteStorageLifecycle(t *testing.T) {
+func TestWriteStorageApplyConfig_Lifecycle(t *testing.T) {
dir := t.TempDir()
- s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil)
+ s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.DefaultGlobalConfig,
RemoteWriteConfigs: []*config.RemoteWriteConfig{
@@ -214,14 +204,13 @@ func TestWriteStorageLifecycle(t *testing.T) {
require.NoError(t, s.ApplyConfig(conf))
require.Len(t, s.queues, 1)
- err := s.Close()
- require.NoError(t, err)
+ require.NoError(t, s.Close())
}
-func TestUpdateExternalLabels(t *testing.T) {
+func TestWriteStorageApplyConfig_UpdateExternalLabels(t *testing.T) {
dir := t.TempDir()
- s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil)
+ s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, false)
externalLabels := labels.FromStrings("external", "true")
conf := &config.Config{
@@ -243,15 +232,13 @@ func TestUpdateExternalLabels(t *testing.T) {
require.Len(t, s.queues, 1)
require.Equal(t, []labels.Label{{Name: "external", Value: "true"}}, s.queues[hash].externalLabels)
- err = s.Close()
- require.NoError(t, err)
+ require.NoError(t, s.Close())
}
-func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
+func TestWriteStorageApplyConfig_Idempotent(t *testing.T) {
dir := t.TempDir()
- s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil)
-
+ s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
conf := &config.Config{
GlobalConfig: config.GlobalConfig{},
RemoteWriteConfigs: []*config.RemoteWriteConfig{
@@ -269,14 +256,13 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
_, hashExists := s.queues[hash]
require.True(t, hashExists, "Queue pointer should have remained the same")
- err = s.Close()
- require.NoError(t, err)
+ require.NoError(t, s.Close())
}
-func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
+func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) {
dir := t.TempDir()
- s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil)
+ s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
c0 := &config.RemoteWriteConfig{
RemoteTimeout: model.Duration(10 * time.Second),
@@ -286,6 +272,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
Regex: relabel.MustNewRegexp(".+"),
},
},
+ ProtobufMessage: config.RemoteWriteProtoMsgV1,
}
c1 := &config.RemoteWriteConfig{
RemoteTimeout: model.Duration(20 * time.Second),
@@ -293,10 +280,12 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
HTTPClientConfig: common_config.HTTPClientConfig{
BearerToken: "foo",
},
+ ProtobufMessage: config.RemoteWriteProtoMsgV1,
}
c2 := &config.RemoteWriteConfig{
- RemoteTimeout: model.Duration(30 * time.Second),
- QueueConfig: config.DefaultQueueConfig,
+ RemoteTimeout: model.Duration(30 * time.Second),
+ QueueConfig: config.DefaultQueueConfig,
+ ProtobufMessage: config.RemoteWriteProtoMsgV1,
}
conf := &config.Config{
@@ -376,8 +365,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
_, hashExists = s.queues[hashes[2]]
require.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
- err = s.Close()
- require.NoError(t, err)
+ require.NoError(t, s.Close())
}
func TestOTLPWriteHandler(t *testing.T) {
diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go
index b984e6bc0..b31041b1b 100644
--- a/tsdb/agent/db_test.go
+++ b/tsdb/agent/db_test.go
@@ -89,7 +89,7 @@ func createTestAgentDB(t testing.TB, reg prometheus.Registerer, opts *Options) *
t.Helper()
dbDir := t.TempDir()
- rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil)
+ rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, false)
t.Cleanup(func() {
require.NoError(t, rs.Close())
})
@@ -585,7 +585,7 @@ func TestLockfile(t *testing.T) {
tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) {
logger := log.NewNopLogger()
reg := prometheus.NewRegistry()
- rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil)
+ rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, false)
t.Cleanup(func() {
require.NoError(t, rs.Close())
})
@@ -605,7 +605,7 @@ func TestLockfile(t *testing.T) {
func Test_ExistingWAL_NextRef(t *testing.T) {
dbDir := t.TempDir()
- rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil)
+ rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false)
defer func() {
require.NoError(t, rs.Close())
}()
diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go
index 8ebd9249a..3d74a551d 100644
--- a/tsdb/wlog/watcher.go
+++ b/tsdb/wlog/watcher.go
@@ -57,6 +57,7 @@ type WriteTo interface {
AppendHistograms([]record.RefHistogramSample) bool
AppendFloatHistograms([]record.RefFloatHistogramSample) bool
StoreSeries([]record.RefSeries, int)
+ StoreMetadata([]record.RefMetadata)
// Next two methods are intended for garbage-collection: first we call
// UpdateSeriesSegment on all current series
@@ -88,6 +89,7 @@ type Watcher struct {
lastCheckpoint string
sendExemplars bool
sendHistograms bool
+ sendMetadata bool
metrics *WatcherMetrics
readerMetrics *LiveReaderMetrics
@@ -170,7 +172,7 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
}
// NewWatcher creates a new WAL watcher for a given WriteTo.
-func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms bool) *Watcher {
+func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms, sendMetadata bool) *Watcher {
if logger == nil {
logger = log.NewNopLogger()
}
@@ -183,6 +185,7 @@ func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logge
name: name,
sendExemplars: sendExemplars,
sendHistograms: sendHistograms,
+ sendMetadata: sendMetadata,
readNotify: make(chan struct{}),
quit: make(chan struct{}),
@@ -541,6 +544,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
histogramsToSend []record.RefHistogramSample
floatHistograms []record.RefFloatHistogramSample
floatHistogramsToSend []record.RefFloatHistogramSample
+ metadata []record.RefMetadata
)
for r.Next() && !isClosed(w.quit) {
rec := r.Record()
@@ -652,6 +656,17 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
w.writer.AppendFloatHistograms(floatHistogramsToSend)
floatHistogramsToSend = floatHistogramsToSend[:0]
}
+
+ case record.Metadata:
+ if !w.sendMetadata || !tail {
+ break
+ }
+ meta, err := dec.Metadata(rec, metadata[:0])
+ if err != nil {
+ w.recordDecodeFailsMetric.Inc()
+ return err
+ }
+ w.writer.StoreMetadata(meta)
case record.Tombstones:
default:
diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go
index ff006cb81..824010f30 100644
--- a/tsdb/wlog/watcher_test.go
+++ b/tsdb/wlog/watcher_test.go
@@ -92,6 +92,8 @@ func (wtm *writeToMock) StoreSeries(series []record.RefSeries, index int) {
wtm.UpdateSeriesSegment(series, index)
}
+func (wtm *writeToMock) StoreMetadata(_ []record.RefMetadata) { /* no-op */ }
+
func (wtm *writeToMock) UpdateSeriesSegment(series []record.RefSeries, index int) {
wtm.seriesLock.Lock()
defer wtm.seriesLock.Unlock()
@@ -219,7 +221,7 @@ func TestTailSamples(t *testing.T) {
require.NoError(t, err)
wt := newWriteToMock(0)
- watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, true, true)
+ watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, true, true, true)
watcher.SetStartTime(now)
// Set the Watcher's metrics so they're not nil pointers.
@@ -304,7 +306,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
require.NoError(t, err)
wt := newWriteToMock(0)
- watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
+ watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
go watcher.Start()
expected := seriesCount
@@ -393,7 +395,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
require.NoError(t, err)
readTimeout = time.Second
wt := newWriteToMock(0)
- watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
+ watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
go watcher.Start()
expected := seriesCount * 2
@@ -464,7 +466,7 @@ func TestReadCheckpoint(t *testing.T) {
require.NoError(t, err)
wt := newWriteToMock(0)
- watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
+ watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
go watcher.Start()
expectedSeries := seriesCount
@@ -533,7 +535,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) {
}
wt := newWriteToMock(0)
- watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
+ watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
watcher.MaxSegment = -1
// Set the Watcher's metrics so they're not nil pointers.
@@ -606,7 +608,7 @@ func TestCheckpointSeriesReset(t *testing.T) {
readTimeout = time.Second
wt := newWriteToMock(0)
- watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
+ watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
watcher.MaxSegment = -1
go watcher.Start()
@@ -685,7 +687,7 @@ func TestRun_StartupTime(t *testing.T) {
require.NoError(t, w.Close())
wt := newWriteToMock(0)
- watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
+ watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
watcher.MaxSegment = segments
watcher.setMetrics()
@@ -774,7 +776,7 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
}()
wt := newWriteToMock(time.Millisecond)
- watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
+ watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
watcher.MaxSegment = segments
watcher.setMetrics()
diff --git a/web/api/v1/api.go b/web/api/v1/api.go
index b95ff25cf..c93892f00 100644
--- a/web/api/v1/api.go
+++ b/web/api/v1/api.go
@@ -248,6 +248,7 @@ func NewAPI(
registerer prometheus.Registerer,
statsRenderer StatsRenderer,
rwEnabled bool,
+ acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg,
otlpEnabled bool,
) *API {
a := &API{
@@ -290,7 +291,7 @@ func NewAPI(
}
if rwEnabled {
- a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap)
+ a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs)
}
if otlpEnabled {
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap)
diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go
index 74cd2239d..9eb7d08c3 100644
--- a/web/api/v1/api_test.go
+++ b/web/api/v1/api_test.go
@@ -455,7 +455,7 @@ func TestEndpoints(t *testing.T) {
remote := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, func() (int64, error) {
return 0, nil
- }, dbDir, 1*time.Second, nil)
+ }, dbDir, 1*time.Second, nil, false)
err = remote.ApplyConfig(&config.Config{
RemoteReadConfigs: []*config.RemoteReadConfig{
diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go
index e76a1a3d3..a83bfe017 100644
--- a/web/api/v1/errors_test.go
+++ b/web/api/v1/errors_test.go
@@ -135,6 +135,7 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router {
nil,
nil,
false,
+ config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2},
false,
)
diff --git a/web/web.go b/web/web.go
index a87759fb2..9426ed935 100644
--- a/web/web.go
+++ b/web/web.go
@@ -265,6 +265,8 @@ type Options struct {
IsAgent bool
AppName string
+ AcceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg
+
Gatherer prometheus.Gatherer
Registerer prometheus.Registerer
}
@@ -353,6 +355,7 @@ func New(logger log.Logger, o *Options) *Handler {
o.Registerer,
nil,
o.EnableRemoteWriteReceiver,
+ o.AcceptRemoteWriteProtoMsgs,
o.EnableOTLPWriteReceiver,
)