prometheus/storage/remote/client.go

334 lines
10 KiB
Go
Raw Normal View History

2017-05-10 09:44:13 +00:00
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"bufio"
"bytes"
"context"
2017-05-10 09:44:13 +00:00
"fmt"
"io"
"net/http"
"strconv"
"strings"
2017-05-10 09:44:13 +00:00
"time"
"github.com/gogo/protobuf/proto"
2017-05-10 09:44:13 +00:00
"github.com/golang/snappy"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/sigv4"
"github.com/prometheus/common/version"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
2017-05-10 09:44:13 +00:00
2017-07-12 21:06:35 +00:00
"github.com/prometheus/prometheus/prompb"
2017-05-10 09:44:13 +00:00
)
const maxErrMsgLen = 1024
2017-05-10 09:44:13 +00:00
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
var (
remoteReadQueriesTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "read_queries_total",
Help: "The total number of remote read queries.",
},
[]string{remoteName, endpoint, "code"},
)
remoteReadQueries = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "remote_read_queries",
Help: "The number of in-flight remote read queries.",
},
[]string{remoteName, endpoint},
)
remoteReadQueryDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "read_request_duration_seconds",
Help: "Histogram of the latency for remote read requests.",
Buckets: append(prometheus.DefBuckets, 25, 60),
},
[]string{remoteName, endpoint},
)
)
func init() {
prometheus.MustRegister(remoteReadQueriesTotal, remoteReadQueries, remoteReadQueryDuration)
}
// Client allows reading and writing from/to a remote HTTP endpoint.
type Client struct {
remoteName string // Used to differentiate clients in metrics.
url *config_util.URL
Client *http.Client
timeout time.Duration
retryOnRateLimit bool
readQueries prometheus.Gauge
readQueriesTotal *prometheus.CounterVec
readQueriesDuration prometheus.Observer
2017-05-10 09:44:13 +00:00
}
// ClientConfig configures a client.
type ClientConfig struct {
Refactor SD configuration to remove `config` dependency (#3629) * refactor: move targetGroup struct and CheckOverflow() to their own package * refactor: move auth and security related structs to a utility package, fix import error in utility package * refactor: Azure SD, remove SD struct from config * refactor: DNS SD, remove SD struct from config into dns package * refactor: ec2 SD, move SD struct from config into the ec2 package * refactor: file SD, move SD struct from config to file discovery package * refactor: gce, move SD struct from config to gce discovery package * refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil * refactor: consul, move SD struct from config into consul discovery package * refactor: marathon, move SD struct from config into marathon discovery package * refactor: triton, move SD struct from config to triton discovery package, fix test * refactor: zookeeper, move SD structs from config to zookeeper discovery package * refactor: openstack, remove SD struct from config, move into openstack discovery package * refactor: kubernetes, move SD struct from config into kubernetes discovery package * refactor: notifier, use targetgroup package instead of config * refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup * refactor: retrieval, use targetgroup package instead of config.TargetGroup * refactor: storage, use config util package * refactor: discovery manager, use targetgroup package instead of config.TargetGroup * refactor: use HTTPClient and TLS config from configUtil instead of config * refactor: tests, use targetgroup package instead of config.TargetGroup * refactor: fix tagetgroup.Group pointers that were removed by mistake * refactor: openstack, kubernetes: drop prefixes * refactor: remove import aliases forced due to vscode bug * refactor: move main SD struct out of config into discovery/config * refactor: rename configUtil to config_util * refactor: rename yamlUtil to yaml_config * refactor: kubernetes, remove prefixes * refactor: move the TargetGroup package to discovery/ * refactor: fix order of imports
2017-12-29 20:01:34 +00:00
URL *config_util.URL
Timeout model.Duration
Refactor SD configuration to remove `config` dependency (#3629) * refactor: move targetGroup struct and CheckOverflow() to their own package * refactor: move auth and security related structs to a utility package, fix import error in utility package * refactor: Azure SD, remove SD struct from config * refactor: DNS SD, remove SD struct from config into dns package * refactor: ec2 SD, move SD struct from config into the ec2 package * refactor: file SD, move SD struct from config to file discovery package * refactor: gce, move SD struct from config to gce discovery package * refactor: move HTTPClientConfig and URL into util/config, fix import error in httputil * refactor: consul, move SD struct from config into consul discovery package * refactor: marathon, move SD struct from config into marathon discovery package * refactor: triton, move SD struct from config to triton discovery package, fix test * refactor: zookeeper, move SD structs from config to zookeeper discovery package * refactor: openstack, remove SD struct from config, move into openstack discovery package * refactor: kubernetes, move SD struct from config into kubernetes discovery package * refactor: notifier, use targetgroup package instead of config * refactor: tests for file, marathon, triton SD - use targetgroup package instead of config.TargetGroup * refactor: retrieval, use targetgroup package instead of config.TargetGroup * refactor: storage, use config util package * refactor: discovery manager, use targetgroup package instead of config.TargetGroup * refactor: use HTTPClient and TLS config from configUtil instead of config * refactor: tests, use targetgroup package instead of config.TargetGroup * refactor: fix tagetgroup.Group pointers that were removed by mistake * refactor: openstack, kubernetes: drop prefixes * refactor: remove import aliases forced due to vscode bug * refactor: move main SD struct out of config into discovery/config * refactor: rename configUtil to config_util * refactor: rename yamlUtil to yaml_config * refactor: kubernetes, remove prefixes * refactor: move the TargetGroup package to discovery/ * refactor: fix order of imports
2017-12-29 20:01:34 +00:00
HTTPClientConfig config_util.HTTPClientConfig
SigV4Config *sigv4.SigV4Config
Headers map[string]string
RetryOnRateLimit bool
2017-05-10 09:44:13 +00:00
}
// ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
// TODO(bwplotka): Add streamed chunked remote read method as well (https://github.com/prometheus/prometheus/issues/5926).
type ReadClient interface {
Read(ctx context.Context, query *prompb.Query) (*prompb.QueryResult, error)
}
// NewReadClient creates a new client for remote read.
func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) {
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_read_client")
if err != nil {
return nil, err
}
t := httpClient.Transport
if len(conf.Headers) > 0 {
t = newInjectHeadersRoundTripper(conf.Headers, t)
}
httpClient.Transport = otelhttp.NewTransport(t)
return &Client{
remoteName: name,
url: conf.URL,
Client: httpClient,
timeout: time.Duration(conf.Timeout),
readQueries: remoteReadQueries.WithLabelValues(name, conf.URL.String()),
readQueriesTotal: remoteReadQueriesTotal.MustCurryWith(prometheus.Labels{remoteName: name, endpoint: conf.URL.String()}),
readQueriesDuration: remoteReadQueryDuration.WithLabelValues(name, conf.URL.String()),
}, nil
}
// NewWriteClient creates a new client for remote write.
func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_write_client")
2017-05-10 09:44:13 +00:00
if err != nil {
return nil, err
}
t := httpClient.Transport
if conf.SigV4Config != nil {
t, err = sigv4.NewSigV4RoundTripper(conf.SigV4Config, httpClient.Transport)
if err != nil {
return nil, err
}
}
if len(conf.Headers) > 0 {
t = newInjectHeadersRoundTripper(conf.Headers, t)
}
httpClient.Transport = otelhttp.NewTransport(t)
return &Client{
remoteName: name,
url: conf.URL,
Client: httpClient,
retryOnRateLimit: conf.RetryOnRateLimit,
timeout: time.Duration(conf.Timeout),
2017-05-10 09:44:13 +00:00
}, nil
}
func newInjectHeadersRoundTripper(h map[string]string, underlyingRT http.RoundTripper) *injectHeadersRoundTripper {
return &injectHeadersRoundTripper{headers: h, RoundTripper: underlyingRT}
}
type injectHeadersRoundTripper struct {
headers map[string]string
http.RoundTripper
}
func (t *injectHeadersRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
for key, value := range t.headers {
req.Header.Set(key, value)
}
return t.RoundTripper.RoundTrip(req)
}
const defaultBackoff = 0
type RecoverableError struct {
2017-05-10 09:44:13 +00:00
error
retryAfter model.Duration
2017-05-10 09:44:13 +00:00
}
Tail the TSDB WAL for remote_write This change switches the remote_write API to use the TSDB WAL. This should reduce memory usage and prevent sample loss when the remote end point is down. We use the new LiveReader from TSDB to tail WAL segments. Logic for finding the tracking segment is included in this PR. The WAL is tailed once for each remote_write endpoint specified. Reading from the segment is based on a ticker rather than relying on fsnotify write events, which were found to be complicated and unreliable in early prototypes. Enqueuing a sample for sending via remote_write can now block, to provide back pressure. Queues are still required to acheive parallelism and batching. We have updated the queue config based on new defaults for queue capacity and pending samples values - much smaller values are now possible. The remote_write resharding code has been updated to prevent deadlocks, and extra tests have been added for these cases. As part of this change, we attempt to guarantee that samples are not lost; however this initial version doesn't guarantee this across Prometheus restarts or non-retryable errors from the remote end (eg 400s). This changes also includes the following optimisations: - only marshal the proto request once, not once per retry - maintain a single copy of the labels for given series to reduce GC pressure Other minor tweaks: - only reshard if we've also successfully sent recently - add pending samples, latest sent timestamp, WAL events processed metrics Co-authored-by: Chris Marchbanks <csmarchbanks.com> (initial prototype) Co-authored-by: Tom Wilkie <tom.wilkie@gmail.com> (sharding changes) Signed-off-by: Callum Styan <callumstyan@gmail.com>
2018-09-07 21:26:04 +00:00
// Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled
// and encoded bytes from codec.go.
func (c *Client) Store(ctx context.Context, req []byte) error {
Tail the TSDB WAL for remote_write This change switches the remote_write API to use the TSDB WAL. This should reduce memory usage and prevent sample loss when the remote end point is down. We use the new LiveReader from TSDB to tail WAL segments. Logic for finding the tracking segment is included in this PR. The WAL is tailed once for each remote_write endpoint specified. Reading from the segment is based on a ticker rather than relying on fsnotify write events, which were found to be complicated and unreliable in early prototypes. Enqueuing a sample for sending via remote_write can now block, to provide back pressure. Queues are still required to acheive parallelism and batching. We have updated the queue config based on new defaults for queue capacity and pending samples values - much smaller values are now possible. The remote_write resharding code has been updated to prevent deadlocks, and extra tests have been added for these cases. As part of this change, we attempt to guarantee that samples are not lost; however this initial version doesn't guarantee this across Prometheus restarts or non-retryable errors from the remote end (eg 400s). This changes also includes the following optimisations: - only marshal the proto request once, not once per retry - maintain a single copy of the labels for given series to reduce GC pressure Other minor tweaks: - only reshard if we've also successfully sent recently - add pending samples, latest sent timestamp, WAL events processed metrics Co-authored-by: Chris Marchbanks <csmarchbanks.com> (initial prototype) Co-authored-by: Tom Wilkie <tom.wilkie@gmail.com> (sharding changes) Signed-off-by: Callum Styan <callumstyan@gmail.com>
2018-09-07 21:26:04 +00:00
httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(req))
2017-05-10 09:44:13 +00:00
if err != nil {
// Errors from NewRequest are from unparsable URLs, so are not
2017-05-10 09:44:13 +00:00
// recoverable.
return err
}
2017-05-10 09:44:13 +00:00
httpReq.Header.Add("Content-Encoding", "snappy")
httpReq.Header.Set("Content-Type", "application/x-protobuf")
httpReq.Header.Set("User-Agent", UserAgent)
2017-05-10 09:44:13 +00:00
httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
ctx, cancel := context.WithTimeout(ctx, c.timeout)
2017-05-10 09:44:13 +00:00
defer cancel()
ctx, span := otel.Tracer("").Start(ctx, "Remote Store", trace.WithSpanKind(trace.SpanKindClient))
defer span.End()
httpResp, err := c.Client.Do(httpReq.WithContext(ctx))
2017-05-10 09:44:13 +00:00
if err != nil {
// Errors from Client.Do are from (for example) network errors, so are
2017-05-10 09:44:13 +00:00
// recoverable.
return RecoverableError{err, defaultBackoff}
2017-05-10 09:44:13 +00:00
}
defer func() {
io.Copy(io.Discard, httpResp.Body)
httpResp.Body.Close()
}()
2017-05-10 09:44:13 +00:00
if httpResp.StatusCode/100 != 2 {
scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen))
line := ""
if scanner.Scan() {
line = scanner.Text()
}
err = errors.Errorf("server returned HTTP status %s: %s", httpResp.Status, line)
2017-05-10 09:44:13 +00:00
}
if httpResp.StatusCode/100 == 5 {
return RecoverableError{err, defaultBackoff}
}
if c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests {
return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
2017-05-10 09:44:13 +00:00
}
return err
}
// retryAfterDuration returns the duration for the Retry-After header. In case of any errors, it
// returns the defaultBackoff as if the header was never supplied.
func retryAfterDuration(t string) model.Duration {
parsedDuration, err := time.Parse(http.TimeFormat, t)
if err == nil {
s := time.Until(parsedDuration).Seconds()
return model.Duration(s) * model.Duration(time.Second)
}
// The duration can be in seconds.
d, err := strconv.Atoi(t)
if err != nil {
return defaultBackoff
}
return model.Duration(d) * model.Duration(time.Second)
}
// Name uniquely identifies the client.
func (c Client) Name() string {
return c.remoteName
}
// Endpoint is the remote read or write endpoint.
func (c Client) Endpoint() string {
return c.url.String()
2017-05-10 09:44:13 +00:00
}
// Read reads from a remote endpoint.
func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryResult, error) {
c.readQueries.Inc()
defer c.readQueries.Dec()
2017-10-23 13:44:57 +00:00
req := &prompb.ReadRequest{
// TODO: Support batching multiple queries into one read request,
// as the protobuf interface allows for it.
2017-10-23 13:44:57 +00:00
Queries: []*prompb.Query{
query,
},
}
2017-05-10 09:44:13 +00:00
data, err := proto.Marshal(req)
if err != nil {
return nil, errors.Wrapf(err, "unable to marshal read request")
2017-05-10 09:44:13 +00:00
}
compressed := snappy.Encode(nil, data)
httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(compressed))
if err != nil {
return nil, errors.Wrap(err, "unable to create request")
2017-05-10 09:44:13 +00:00
}
httpReq.Header.Add("Content-Encoding", "snappy")
httpReq.Header.Add("Accept-Encoding", "snappy")
2017-05-10 09:44:13 +00:00
httpReq.Header.Set("Content-Type", "application/x-protobuf")
httpReq.Header.Set("User-Agent", UserAgent)
2017-05-10 09:44:13 +00:00
httpReq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0")
ctx, cancel := context.WithTimeout(ctx, c.timeout)
defer cancel()
ctx, span := otel.Tracer("").Start(ctx, "Remote Read", trace.WithSpanKind(trace.SpanKindClient))
defer span.End()
start := time.Now()
httpResp, err := c.Client.Do(httpReq.WithContext(ctx))
2017-05-10 09:44:13 +00:00
if err != nil {
return nil, errors.Wrap(err, "error sending request")
2017-05-10 09:44:13 +00:00
}
defer func() {
io.Copy(io.Discard, httpResp.Body)
httpResp.Body.Close()
}()
c.readQueriesDuration.Observe(time.Since(start).Seconds())
c.readQueriesTotal.WithLabelValues(strconv.Itoa(httpResp.StatusCode)).Inc()
compressed, err = io.ReadAll(httpResp.Body)
2017-05-10 09:44:13 +00:00
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("error reading response. HTTP status code: %s", httpResp.Status))
}
if httpResp.StatusCode/100 != 2 {
return nil, errors.Errorf("remote server %s returned HTTP status %s: %s", c.url.String(), httpResp.Status, strings.TrimSpace(string(compressed)))
2017-05-10 09:44:13 +00:00
}
uncompressed, err := snappy.Decode(nil, compressed)
if err != nil {
return nil, errors.Wrap(err, "error reading response")
2017-05-10 09:44:13 +00:00
}
2017-07-12 21:06:35 +00:00
var resp prompb.ReadResponse
2017-05-10 09:44:13 +00:00
err = proto.Unmarshal(uncompressed, &resp)
if err != nil {
return nil, errors.Wrap(err, "unable to unmarshal response body")
2017-05-10 09:44:13 +00:00
}
if len(resp.Results) != len(req.Queries) {
return nil, errors.Errorf("responses: want %d, got %d", len(req.Queries), len(resp.Results))
2017-05-10 09:44:13 +00:00
}
2017-10-23 20:28:17 +00:00
return resp.Results[0], nil
2017-05-10 09:44:13 +00:00
}