2021-01-30 11:04:48 +00:00
|
|
|
// Copyright 2021 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package remote
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-07-01 16:59:50 +00:00
|
|
|
"errors"
|
2021-09-21 20:53:27 +00:00
|
|
|
"fmt"
|
2021-01-30 11:04:48 +00:00
|
|
|
"net/http"
|
|
|
|
|
2021-06-11 16:17:59 +00:00
|
|
|
"github.com/go-kit/log"
|
|
|
|
"github.com/go-kit/log/level"
|
2021-09-21 20:53:27 +00:00
|
|
|
|
2022-12-09 07:27:56 +00:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
|
2021-11-08 14:23:17 +00:00
|
|
|
"github.com/prometheus/prometheus/model/exemplar"
|
2021-01-30 11:04:48 +00:00
|
|
|
"github.com/prometheus/prometheus/prompb"
|
|
|
|
"github.com/prometheus/prometheus/storage"
|
2023-07-28 10:35:28 +00:00
|
|
|
otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
|
2021-01-30 11:04:48 +00:00
|
|
|
)
|
|
|
|
|
2021-02-26 16:43:19 +00:00
|
|
|
type writeHandler struct {
|
2021-01-30 11:04:48 +00:00
|
|
|
logger log.Logger
|
|
|
|
appendable storage.Appendable
|
2022-12-09 07:27:56 +00:00
|
|
|
|
|
|
|
samplesWithInvalidLabelsTotal prometheus.Counter
|
2021-01-30 11:04:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewWriteHandler creates a http.Handler that accepts remote write requests and
|
|
|
|
// writes them to the provided appendable.
|
2022-12-09 07:27:56 +00:00
|
|
|
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable) http.Handler {
|
|
|
|
h := &writeHandler{
|
2021-01-30 11:04:48 +00:00
|
|
|
logger: logger,
|
|
|
|
appendable: appendable,
|
2022-12-09 07:27:56 +00:00
|
|
|
|
|
|
|
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
|
|
|
Namespace: "prometheus",
|
|
|
|
Subsystem: "api",
|
|
|
|
Name: "remote_write_invalid_labels_samples_total",
|
|
|
|
Help: "The total number of remote write samples which contains invalid labels.",
|
|
|
|
}),
|
2021-01-30 11:04:48 +00:00
|
|
|
}
|
2022-12-09 07:27:56 +00:00
|
|
|
if reg != nil {
|
|
|
|
reg.MustRegister(h.samplesWithInvalidLabelsTotal)
|
|
|
|
}
|
|
|
|
return h
|
2021-01-30 11:04:48 +00:00
|
|
|
}
|
|
|
|
|
2021-02-26 16:43:19 +00:00
|
|
|
func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
2021-01-30 11:04:48 +00:00
|
|
|
req, err := DecodeWriteRequest(r.Body)
|
|
|
|
if err != nil {
|
|
|
|
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error())
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
err = h.write(r.Context(), req)
|
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp:
|
|
|
|
// Indicated an out of order sample is a bad request to prevent retries.
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error())
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
|
2021-09-21 20:53:27 +00:00
|
|
|
// checkAppendExemplarError modifies the AppendExamplar's returned error based on the error cause.
|
|
|
|
func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error {
|
2022-12-15 11:11:25 +00:00
|
|
|
unwrappedErr := errors.Unwrap(err)
|
|
|
|
if unwrappedErr == nil {
|
|
|
|
unwrappedErr = err
|
|
|
|
}
|
2022-07-01 16:59:50 +00:00
|
|
|
switch {
|
2022-12-15 11:11:25 +00:00
|
|
|
case errors.Is(unwrappedErr, storage.ErrNotFound):
|
2021-09-21 20:53:27 +00:00
|
|
|
return storage.ErrNotFound
|
2022-12-15 11:11:25 +00:00
|
|
|
case errors.Is(unwrappedErr, storage.ErrOutOfOrderExemplar):
|
2021-09-21 20:53:27 +00:00
|
|
|
*outOfOrderErrs++
|
|
|
|
level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e))
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-26 16:43:19 +00:00
|
|
|
func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
|
2021-10-22 08:06:44 +00:00
|
|
|
outOfOrderExemplarErrs := 0
|
2022-12-09 07:27:56 +00:00
|
|
|
samplesWithInvalidLabels := 0
|
2021-09-21 20:53:27 +00:00
|
|
|
|
2021-01-30 11:04:48 +00:00
|
|
|
app := h.appendable.Appender(ctx)
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2021-09-21 20:53:27 +00:00
|
|
|
_ = app.Rollback()
|
2021-01-30 11:04:48 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
err = app.Commit()
|
|
|
|
}()
|
|
|
|
|
2021-09-21 20:53:27 +00:00
|
|
|
var exemplarErr error
|
2021-01-30 11:04:48 +00:00
|
|
|
for _, ts := range req.Timeseries {
|
|
|
|
labels := labelProtosToLabels(ts.Labels)
|
2022-12-09 07:27:56 +00:00
|
|
|
if !labels.IsValid() {
|
|
|
|
level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", labels.String())
|
|
|
|
samplesWithInvalidLabels++
|
|
|
|
continue
|
|
|
|
}
|
2023-07-22 13:24:46 +00:00
|
|
|
var ref storage.SeriesRef
|
2021-01-30 11:04:48 +00:00
|
|
|
for _, s := range ts.Samples {
|
2023-07-22 13:24:46 +00:00
|
|
|
ref, err = app.Append(ref, labels, s.Timestamp, s.Value)
|
2021-01-30 11:04:48 +00:00
|
|
|
if err != nil {
|
2022-12-15 11:11:25 +00:00
|
|
|
unwrappedErr := errors.Unwrap(err)
|
|
|
|
if unwrappedErr == nil {
|
|
|
|
unwrappedErr = err
|
|
|
|
}
|
|
|
|
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
2021-12-08 15:07:51 +00:00
|
|
|
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
|
|
|
|
}
|
2021-01-30 11:04:48 +00:00
|
|
|
return err
|
|
|
|
}
|
2021-09-21 20:53:27 +00:00
|
|
|
|
2021-01-30 11:04:48 +00:00
|
|
|
}
|
2021-09-21 20:53:27 +00:00
|
|
|
|
|
|
|
for _, ep := range ts.Exemplars {
|
|
|
|
e := exemplarProtoToExemplar(ep)
|
|
|
|
|
|
|
|
_, exemplarErr = app.AppendExemplar(0, labels, e)
|
|
|
|
exemplarErr = h.checkAppendExemplarError(exemplarErr, e, &outOfOrderExemplarErrs)
|
|
|
|
if exemplarErr != nil {
|
|
|
|
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors.
|
|
|
|
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
|
|
|
}
|
|
|
|
}
|
2022-07-14 13:13:12 +00:00
|
|
|
|
|
|
|
for _, hp := range ts.Histograms {
|
2023-04-21 18:27:15 +00:00
|
|
|
if hp.IsFloatHistogram() {
|
2023-03-28 00:02:20 +00:00
|
|
|
fhs := FloatHistogramProtoToFloatHistogram(hp)
|
2023-01-13 11:09:20 +00:00
|
|
|
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, fhs)
|
|
|
|
} else {
|
|
|
|
hs := HistogramProtoToHistogram(hp)
|
|
|
|
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hs, nil)
|
|
|
|
}
|
2022-07-14 13:13:12 +00:00
|
|
|
if err != nil {
|
|
|
|
unwrappedErr := errors.Unwrap(err)
|
2022-12-15 11:11:25 +00:00
|
|
|
if unwrappedErr == nil {
|
|
|
|
unwrappedErr = err
|
|
|
|
}
|
2023-01-13 11:09:20 +00:00
|
|
|
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
|
2022-07-14 13:13:12 +00:00
|
|
|
// a note indicating its inclusion in the future.
|
|
|
|
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
|
|
|
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2021-09-21 20:53:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if outOfOrderExemplarErrs > 0 {
|
|
|
|
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
|
2021-01-30 11:04:48 +00:00
|
|
|
}
|
2022-12-09 07:27:56 +00:00
|
|
|
if samplesWithInvalidLabels > 0 {
|
|
|
|
h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels))
|
|
|
|
}
|
2021-01-30 11:04:48 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2023-07-28 10:35:28 +00:00
|
|
|
|
|
|
|
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
|
|
|
|
// writes them to the provided appendable.
|
|
|
|
func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler {
|
|
|
|
rwHandler := &writeHandler{
|
|
|
|
logger: logger,
|
|
|
|
appendable: appendable,
|
|
|
|
}
|
|
|
|
|
|
|
|
return &otlpWriteHandler{
|
|
|
|
logger: logger,
|
|
|
|
rwHandler: rwHandler,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type otlpWriteHandler struct {
|
|
|
|
logger log.Logger
|
|
|
|
rwHandler *writeHandler
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
|
|
req, err := DecodeOTLPWriteRequest(r)
|
|
|
|
if err != nil {
|
|
|
|
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error())
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
prwMetricsMap, errs := otlptranslator.FromMetrics(req.Metrics(), otlptranslator.Settings{})
|
|
|
|
if errs != nil {
|
|
|
|
level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", errs)
|
|
|
|
}
|
|
|
|
|
|
|
|
prwMetrics := make([]prompb.TimeSeries, 0, len(prwMetricsMap))
|
|
|
|
|
|
|
|
for _, ts := range prwMetricsMap {
|
|
|
|
prwMetrics = append(prwMetrics, *ts)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{
|
|
|
|
Timeseries: prwMetrics,
|
|
|
|
})
|
|
|
|
|
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp:
|
|
|
|
// Indicated an out of order sample is a bad request to prevent retries.
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error())
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.WriteHeader(http.StatusOK)
|
|
|
|
}
|