|
|
|
@ -38,6 +38,7 @@ import (
|
|
|
|
|
"github.com/prometheus/prometheus/config" |
|
|
|
|
"github.com/prometheus/prometheus/model/histogram" |
|
|
|
|
"github.com/prometheus/prometheus/model/labels" |
|
|
|
|
"github.com/prometheus/prometheus/model/relabel" |
|
|
|
|
"github.com/prometheus/prometheus/model/timestamp" |
|
|
|
|
"github.com/prometheus/prometheus/prompb" |
|
|
|
|
"github.com/prometheus/prometheus/scrape" |
|
|
|
@ -868,29 +869,30 @@ func (c *NopWriteClient) Store(context.Context, []byte, int) error { return nil
|
|
|
|
|
func (c *NopWriteClient) Name() string { return "nopwriteclient" } |
|
|
|
|
func (c *NopWriteClient) Endpoint() string { return "http://test-remote.com/1234" } |
|
|
|
|
|
|
|
|
|
// Extra labels to make a more realistic workload - taken from Kubernetes' embedded cAdvisor metrics.
|
|
|
|
|
var extraLabels []labels.Label = []labels.Label{ |
|
|
|
|
{Name: "kubernetes_io_arch", Value: "amd64"}, |
|
|
|
|
{Name: "kubernetes_io_instance_type", Value: "c3.somesize"}, |
|
|
|
|
{Name: "kubernetes_io_os", Value: "linux"}, |
|
|
|
|
{Name: "container_name", Value: "some-name"}, |
|
|
|
|
{Name: "failure_domain_kubernetes_io_region", Value: "somewhere-1"}, |
|
|
|
|
{Name: "failure_domain_kubernetes_io_zone", Value: "somewhere-1b"}, |
|
|
|
|
{Name: "id", Value: "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28"}, |
|
|
|
|
{Name: "image", Value: "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506"}, |
|
|
|
|
{Name: "instance", Value: "ip-111-11-1-11.ec2.internal"}, |
|
|
|
|
{Name: "job", Value: "kubernetes-cadvisor"}, |
|
|
|
|
{Name: "kubernetes_io_hostname", Value: "ip-111-11-1-11"}, |
|
|
|
|
{Name: "monitor", Value: "prod"}, |
|
|
|
|
{Name: "name", Value: "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0"}, |
|
|
|
|
{Name: "namespace", Value: "kube-system"}, |
|
|
|
|
{Name: "pod_name", Value: "some-other-name-5j8s8"}, |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
func BenchmarkSampleSend(b *testing.B) { |
|
|
|
|
// Send one sample per series, which is the typical remote_write case
|
|
|
|
|
const numSamples = 1 |
|
|
|
|
const numSeries = 10000 |
|
|
|
|
|
|
|
|
|
// Extra labels to make a more realistic workload - taken from Kubernetes' embedded cAdvisor metrics.
|
|
|
|
|
extraLabels := []labels.Label{ |
|
|
|
|
{Name: "kubernetes_io_arch", Value: "amd64"}, |
|
|
|
|
{Name: "kubernetes_io_instance_type", Value: "c3.somesize"}, |
|
|
|
|
{Name: "kubernetes_io_os", Value: "linux"}, |
|
|
|
|
{Name: "container_name", Value: "some-name"}, |
|
|
|
|
{Name: "failure_domain_kubernetes_io_region", Value: "somewhere-1"}, |
|
|
|
|
{Name: "failure_domain_kubernetes_io_zone", Value: "somewhere-1b"}, |
|
|
|
|
{Name: "id", Value: "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28"}, |
|
|
|
|
{Name: "image", Value: "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506"}, |
|
|
|
|
{Name: "instance", Value: "ip-111-11-1-11.ec2.internal"}, |
|
|
|
|
{Name: "job", Value: "kubernetes-cadvisor"}, |
|
|
|
|
{Name: "kubernetes_io_hostname", Value: "ip-111-11-1-11"}, |
|
|
|
|
{Name: "monitor", Value: "prod"}, |
|
|
|
|
{Name: "name", Value: "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0"}, |
|
|
|
|
{Name: "namespace", Value: "kube-system"}, |
|
|
|
|
{Name: "pod_name", Value: "some-other-name-5j8s8"}, |
|
|
|
|
} |
|
|
|
|
samples, series := createTimeseries(numSamples, numSeries, extraLabels...) |
|
|
|
|
|
|
|
|
|
c := NewNopWriteClient() |
|
|
|
@ -921,6 +923,58 @@ func BenchmarkSampleSend(b *testing.B) {
|
|
|
|
|
b.StopTimer() |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Check how long it takes to add N series, including external labels processing.
|
|
|
|
|
func BenchmarkStoreSeries(b *testing.B) { |
|
|
|
|
externalLabels := []labels.Label{ |
|
|
|
|
{Name: "cluster", Value: "mycluster"}, |
|
|
|
|
{Name: "replica", Value: "1"}, |
|
|
|
|
} |
|
|
|
|
relabelConfigs := []*relabel.Config{{ |
|
|
|
|
SourceLabels: model.LabelNames{"namespace"}, |
|
|
|
|
Separator: ";", |
|
|
|
|
Regex: relabel.MustNewRegexp("kube.*"), |
|
|
|
|
TargetLabel: "job", |
|
|
|
|
Replacement: "$1", |
|
|
|
|
Action: relabel.Replace, |
|
|
|
|
}} |
|
|
|
|
testCases := []struct { |
|
|
|
|
name string |
|
|
|
|
externalLabels []labels.Label |
|
|
|
|
ts []prompb.TimeSeries |
|
|
|
|
relabelConfigs []*relabel.Config |
|
|
|
|
}{ |
|
|
|
|
{name: "plain"}, |
|
|
|
|
{name: "externalLabels", externalLabels: externalLabels}, |
|
|
|
|
{name: "relabel", relabelConfigs: relabelConfigs}, |
|
|
|
|
{ |
|
|
|
|
name: "externalLabels+relabel", |
|
|
|
|
externalLabels: externalLabels, |
|
|
|
|
relabelConfigs: relabelConfigs, |
|
|
|
|
}, |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// numSeries chosen to be big enough that StoreSeries dominates creating a new queue manager.
|
|
|
|
|
const numSeries = 1000 |
|
|
|
|
_, series := createTimeseries(0, numSeries, extraLabels...) |
|
|
|
|
|
|
|
|
|
for _, tc := range testCases { |
|
|
|
|
b.Run(tc.name, func(b *testing.B) { |
|
|
|
|
for i := 0; i < b.N; i++ { |
|
|
|
|
c := NewTestWriteClient() |
|
|
|
|
dir := b.TempDir() |
|
|
|
|
cfg := config.DefaultQueueConfig |
|
|
|
|
mcfg := config.DefaultMetadataConfig |
|
|
|
|
metrics := newQueueManagerMetrics(nil, "", "") |
|
|
|
|
m := NewQueueManager(metrics, nil, nil, nil, dir, newEWMARate(ewmaWeight, shardUpdateDuration), cfg, mcfg, labels.EmptyLabels(), nil, c, defaultFlushDeadline, newPool(), newHighestTimestampMetric(), nil, false, false) |
|
|
|
|
m.externalLabels = tc.externalLabels |
|
|
|
|
m.relabelConfigs = tc.relabelConfigs |
|
|
|
|
|
|
|
|
|
m.StoreSeries(series, 0) |
|
|
|
|
} |
|
|
|
|
}) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
func BenchmarkStartup(b *testing.B) { |
|
|
|
|
dir := os.Getenv("WALDIR") |
|
|
|
|
if dir == "" { |
|
|
|
|