2024-04-30 09:29:52 +00:00
|
|
|
// Copyright 2024 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheusremotewrite/metrics_to_prw.go
|
|
|
|
// Provenance-includes-license: Apache-2.0
|
|
|
|
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
2023-07-28 10:35:28 +00:00
|
|
|
|
2024-04-30 09:29:52 +00:00
|
|
|
package prometheusremotewrite
|
2023-07-28 10:35:28 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
2024-04-30 09:29:52 +00:00
|
|
|
"sort"
|
|
|
|
"strconv"
|
2023-07-28 10:35:28 +00:00
|
|
|
|
|
|
|
"go.opentelemetry.io/collector/pdata/pcommon"
|
|
|
|
"go.opentelemetry.io/collector/pdata/pmetric"
|
|
|
|
"go.uber.org/multierr"
|
|
|
|
|
2024-04-30 09:29:52 +00:00
|
|
|
"github.com/prometheus/prometheus/prompb"
|
2023-07-28 10:35:28 +00:00
|
|
|
prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
|
|
|
|
)
|
|
|
|
|
|
|
|
type Settings struct {
|
|
|
|
Namespace string
|
|
|
|
ExternalLabels map[string]string
|
|
|
|
DisableTargetInfo bool
|
|
|
|
ExportCreatedMetric bool
|
2023-11-15 14:09:15 +00:00
|
|
|
AddMetricSuffixes bool
|
2024-02-22 08:09:41 +00:00
|
|
|
SendMetadata bool
|
2023-07-28 10:35:28 +00:00
|
|
|
}
|
|
|
|
|
2024-02-22 08:09:41 +00:00
|
|
|
// FromMetrics converts pmetric.Metrics to Prometheus remote write format.
|
2024-04-30 09:29:52 +00:00
|
|
|
func FromMetrics(md pmetric.Metrics, settings Settings) (map[string]*prompb.TimeSeries, error) {
|
|
|
|
c := newPrometheusConverter()
|
|
|
|
errs := c.fromMetrics(md, settings)
|
|
|
|
tss := c.timeSeries()
|
|
|
|
out := make(map[string]*prompb.TimeSeries, len(tss))
|
|
|
|
for i := range tss {
|
|
|
|
out[strconv.Itoa(i)] = &tss[i]
|
|
|
|
}
|
|
|
|
|
|
|
|
return out, errs
|
|
|
|
}
|
|
|
|
|
|
|
|
// prometheusConverter converts from OTel write format to Prometheus write format.
|
|
|
|
type prometheusConverter struct {
|
|
|
|
unique map[uint64]*prompb.TimeSeries
|
|
|
|
conflicts map[uint64][]*prompb.TimeSeries
|
|
|
|
}
|
|
|
|
|
|
|
|
func newPrometheusConverter() *prometheusConverter {
|
|
|
|
return &prometheusConverter{
|
|
|
|
unique: map[uint64]*prompb.TimeSeries{},
|
|
|
|
conflicts: map[uint64][]*prompb.TimeSeries{},
|
|
|
|
}
|
|
|
|
}
|
2023-07-28 10:35:28 +00:00
|
|
|
|
2024-04-30 09:29:52 +00:00
|
|
|
// fromMetrics converts pmetric.Metrics to Prometheus remote write format.
|
|
|
|
func (c *prometheusConverter) fromMetrics(md pmetric.Metrics, settings Settings) (errs error) {
|
2023-07-28 10:35:28 +00:00
|
|
|
resourceMetricsSlice := md.ResourceMetrics()
|
|
|
|
for i := 0; i < resourceMetricsSlice.Len(); i++ {
|
|
|
|
resourceMetrics := resourceMetricsSlice.At(i)
|
|
|
|
resource := resourceMetrics.Resource()
|
|
|
|
scopeMetricsSlice := resourceMetrics.ScopeMetrics()
|
|
|
|
// keep track of the most recent timestamp in the ResourceMetrics for
|
|
|
|
// use with the "target" info metric
|
|
|
|
var mostRecentTimestamp pcommon.Timestamp
|
|
|
|
for j := 0; j < scopeMetricsSlice.Len(); j++ {
|
2024-04-30 09:29:52 +00:00
|
|
|
metricSlice := scopeMetricsSlice.At(j).Metrics()
|
2023-07-28 10:35:28 +00:00
|
|
|
|
|
|
|
// TODO: decide if instrumentation library information should be exported as labels
|
|
|
|
for k := 0; k < metricSlice.Len(); k++ {
|
|
|
|
metric := metricSlice.At(k)
|
2024-04-30 01:45:20 +00:00
|
|
|
mostRecentTimestamp = max(mostRecentTimestamp, mostRecentTimestampInMetric(metric))
|
2023-07-28 10:35:28 +00:00
|
|
|
|
|
|
|
if !isValidAggregationTemporality(metric) {
|
|
|
|
errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name()))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-02-22 08:09:41 +00:00
|
|
|
promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes)
|
|
|
|
|
2024-04-30 09:29:52 +00:00
|
|
|
// handle individual metrics based on type
|
2023-11-15 14:09:15 +00:00
|
|
|
//exhaustive:enforce
|
2023-07-28 10:35:28 +00:00
|
|
|
switch metric.Type() {
|
|
|
|
case pmetric.MetricTypeGauge:
|
|
|
|
dataPoints := metric.Gauge().DataPoints()
|
|
|
|
if dataPoints.Len() == 0 {
|
|
|
|
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
2024-04-30 09:29:52 +00:00
|
|
|
break
|
2023-07-28 10:35:28 +00:00
|
|
|
}
|
2024-04-30 09:29:52 +00:00
|
|
|
c.addGaugeNumberDataPoints(dataPoints, resource, settings, promName)
|
2023-07-28 10:35:28 +00:00
|
|
|
case pmetric.MetricTypeSum:
|
|
|
|
dataPoints := metric.Sum().DataPoints()
|
|
|
|
if dataPoints.Len() == 0 {
|
|
|
|
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
2024-04-30 09:29:52 +00:00
|
|
|
break
|
2023-07-28 10:35:28 +00:00
|
|
|
}
|
2024-04-30 09:29:52 +00:00
|
|
|
c.addSumNumberDataPoints(dataPoints, resource, metric, settings, promName)
|
2023-07-28 10:35:28 +00:00
|
|
|
case pmetric.MetricTypeHistogram:
|
|
|
|
dataPoints := metric.Histogram().DataPoints()
|
|
|
|
if dataPoints.Len() == 0 {
|
|
|
|
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
2024-04-30 09:29:52 +00:00
|
|
|
break
|
2023-07-28 10:35:28 +00:00
|
|
|
}
|
2024-04-30 09:29:52 +00:00
|
|
|
c.addHistogramDataPoints(dataPoints, resource, settings, promName)
|
2023-07-28 10:35:28 +00:00
|
|
|
case pmetric.MetricTypeExponentialHistogram:
|
|
|
|
dataPoints := metric.ExponentialHistogram().DataPoints()
|
|
|
|
if dataPoints.Len() == 0 {
|
|
|
|
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
2024-04-30 09:29:52 +00:00
|
|
|
break
|
2023-07-28 10:35:28 +00:00
|
|
|
}
|
2024-04-30 09:29:52 +00:00
|
|
|
errs = multierr.Append(errs, c.addExponentialHistogramDataPoints(
|
|
|
|
dataPoints,
|
|
|
|
resource,
|
|
|
|
settings,
|
|
|
|
promName,
|
|
|
|
))
|
2023-07-28 10:35:28 +00:00
|
|
|
case pmetric.MetricTypeSummary:
|
|
|
|
dataPoints := metric.Summary().DataPoints()
|
|
|
|
if dataPoints.Len() == 0 {
|
|
|
|
errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name()))
|
2024-04-30 09:29:52 +00:00
|
|
|
break
|
2023-07-28 10:35:28 +00:00
|
|
|
}
|
2024-04-30 09:29:52 +00:00
|
|
|
c.addSummaryDataPoints(dataPoints, resource, settings, promName)
|
2023-07-28 10:35:28 +00:00
|
|
|
default:
|
|
|
|
errs = multierr.Append(errs, errors.New("unsupported metric type"))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-04-30 09:29:52 +00:00
|
|
|
addResourceTargetInfo(resource, settings, mostRecentTimestamp, c)
|
2023-07-28 10:35:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
2024-04-30 09:29:52 +00:00
|
|
|
|
|
|
|
// timeSeries returns a slice of the prompb.TimeSeries that were converted from OTel format.
|
|
|
|
func (c *prometheusConverter) timeSeries() []prompb.TimeSeries {
|
|
|
|
conflicts := 0
|
|
|
|
for _, ts := range c.conflicts {
|
|
|
|
conflicts += len(ts)
|
|
|
|
}
|
|
|
|
allTS := make([]prompb.TimeSeries, 0, len(c.unique)+conflicts)
|
|
|
|
for _, ts := range c.unique {
|
|
|
|
allTS = append(allTS, *ts)
|
|
|
|
}
|
|
|
|
for _, cTS := range c.conflicts {
|
|
|
|
for _, ts := range cTS {
|
|
|
|
allTS = append(allTS, *ts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return allTS
|
|
|
|
}
|
|
|
|
|
|
|
|
func isSameMetric(ts *prompb.TimeSeries, lbls []prompb.Label) bool {
|
|
|
|
if len(ts.Labels) != len(lbls) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for i, l := range ts.Labels {
|
|
|
|
if l.Name != ts.Labels[i].Name || l.Value != ts.Labels[i].Value {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// addExemplars adds exemplars for the dataPoint. For each exemplar, if it can find a bucket bound corresponding to its value,
|
|
|
|
// the exemplar is added to the bucket bound's time series, provided that the time series' has samples.
|
|
|
|
func (c *prometheusConverter) addExemplars(dataPoint pmetric.HistogramDataPoint, bucketBounds []bucketBoundsData) {
|
|
|
|
if len(bucketBounds) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
exemplars := getPromExemplars(dataPoint)
|
|
|
|
if len(exemplars) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Sort(byBucketBoundsData(bucketBounds))
|
|
|
|
for _, exemplar := range exemplars {
|
|
|
|
for _, bound := range bucketBounds {
|
|
|
|
if len(bound.ts.Samples) > 0 && exemplar.Value <= bound.bound {
|
|
|
|
bound.ts.Exemplars = append(bound.ts.Exemplars, exemplar)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// addSample finds a TimeSeries that corresponds to lbls, and adds sample to it.
|
|
|
|
// If there is no corresponding TimeSeries already, it's created.
|
|
|
|
// The corresponding TimeSeries is returned.
|
|
|
|
// If either lbls is nil/empty or sample is nil, nothing is done.
|
|
|
|
func (c *prometheusConverter) addSample(sample *prompb.Sample, lbls []prompb.Label) *prompb.TimeSeries {
|
|
|
|
if sample == nil || len(lbls) == 0 {
|
|
|
|
// This shouldn't happen
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ts, _ := c.getOrCreateTimeSeries(lbls)
|
|
|
|
ts.Samples = append(ts.Samples, *sample)
|
|
|
|
return ts
|
|
|
|
}
|