2020-10-20 09:42:30 +00:00
local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
local g = import 'github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet';
2019-10-17 22:40:58 +00:00
local dashboard = grafana.dashboard;
local row = grafana.row;
local singlestat = grafana.singlestat;
local prometheus = grafana.prometheus;
local graphPanel = grafana.graphPanel;
local tablePanel = grafana.tablePanel;
local template = grafana.template;
2018-08-07 12:14:00 +00:00
{
2019-07-11 13:30:57 +00:00
grafanaDashboards+:: {
2018-08-07 12:14:00 +00:00
'prometheus.json':
2024-05-07 16:41:59 +00:00
local showMultiCluster = $._config.showMultiCluster;
local dashboard = g.dashboard(
2020-12-30 16:47:04 +00:00
'%(prefix)sOverview' % $._config.grafanaPrometheus
2024-05-07 16:41:59 +00:00
);
local templatedDashboard = if showMultiCluster then
dashboard
.addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, $._config.clusterLabel)
.addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job')
.addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance')
else
dashboard
.addMultiTemplate('job', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'job')
.addMultiTemplate('instance', 'prometheus_build_info{job=~"$job"}', 'instance');
templatedDashboard
2018-08-07 12:14:00 +00:00
.addRow(
g.row('Prometheus Stats')
.addPanel(
g.panel('Prometheus Stats') +
2024-05-07 16:41:59 +00:00
g.tablePanel(if showMultiCluster then [
2023-11-23 16:04:57 +00:00
'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})',
'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})',
2024-05-07 16:41:59 +00:00
] else [
'count by (job, instance, version) (prometheus_build_info{job=~"$job", instance=~"$instance"})',
'max by (job, instance) (time() - process_start_time_seconds{job=~"$job", instance=~"$instance"})',
2018-08-07 12:14:00 +00:00
], {
2024-05-07 16:41:59 +00:00
cluster: { alias: if showMultiCluster then 'Cluster' else '' },
2018-08-07 12:14:00 +00:00
job: { alias: 'Job' },
instance: { alias: 'Instance' },
2019-06-26 21:22:16 +00:00
version: { alias: 'Version' },
2018-08-07 12:14:00 +00:00
'Value #A': { alias: 'Count', type: 'hidden' },
2022-11-08 13:39:14 +00:00
'Value #B': { alias: 'Uptime', type: 'number', unit: 's' },
2018-08-07 12:14:00 +00:00
})
)
)
.addRow(
g.row('Discovery')
.addPanel(
g.panel('Target Sync') +
2024-05-07 16:41:59 +00:00
g.queryPanel(if showMultiCluster then 'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3'
else 'sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3',
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}'
else '{{scrape_job}}') +
2018-08-07 12:14:00 +00:00
{ yaxes: g.yaxes('ms') }
)
.addPanel(
g.panel('Targets') +
2024-05-07 16:41:59 +00:00
g.queryPanel(if showMultiCluster then 'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})'
else 'sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})',
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}'
else 'Targets') +
2018-08-07 12:14:00 +00:00
g.stack
)
)
.addRow(
g.row('Retrieval')
.addPanel(
2019-06-26 21:22:16 +00:00
g.panel('Average Scrape Interval Duration') +
2024-05-07 16:41:59 +00:00
g.queryPanel(if showMultiCluster then 'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3'
else 'rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3',
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}} {{interval}} configured'
else '{{interval}} configured') +
2018-08-07 12:14:00 +00:00
{ yaxes: g.yaxes('ms') }
)
.addPanel(
g.panel('Scrape failures') +
2024-05-07 16:41:59 +00:00
g.queryPanel(if showMultiCluster then [
2023-11-23 16:04:57 +00:00
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
2024-05-07 16:41:59 +00:00
] else [
'sum by (job) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))',
], if showMultiCluster then [
2024-01-10 18:37:10 +00:00
'exceeded body size limit: {{cluster}} {{job}} {{instance}}',
'exceeded sample limit: {{cluster}} {{job}} {{instance}}',
'duplicate timestamp: {{cluster}} {{job}} {{instance}}',
'out of bounds: {{cluster}} {{job}} {{instance}}',
'out of order: {{cluster}} {{job}} {{instance}}',
2024-05-07 16:41:59 +00:00
] else [
'exceeded body size limit: {{job}}',
'exceeded sample limit: {{job}}',
'duplicate timestamp: {{job}}',
'out of bounds: {{job}}',
'out of order: {{job}}',
2018-08-07 12:14:00 +00:00
]) +
g.stack
)
.addPanel(
g.panel('Appended Samples') +
2024-05-07 16:41:59 +00:00
g.queryPanel(if showMultiCluster then 'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])'
else 'rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])',
if showMultiCluster then '{{cluster}} {{job}} {{instance}}'
else '{{job}} {{instance}}') +
2018-08-07 12:14:00 +00:00
g.stack
)
)
.addRow(
g.row('Storage')
.addPanel(
g.panel('Head Series') +
2024-05-07 16:41:59 +00:00
g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}'
else 'prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}',
if showMultiCluster then '{{cluster}} {{job}} {{instance}} head series'
else '{{job}} {{instance}} head series') +
2018-08-07 12:14:00 +00:00
g.stack
)
.addPanel(
g.panel('Head Chunks') +
2024-05-07 16:41:59 +00:00
g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}'
else 'prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}',
if showMultiCluster then '{{cluster}} {{job}} {{instance}} head chunks'
else '{{job}} {{instance}} head chunks') +
2018-08-07 12:14:00 +00:00
g.stack
)
)
.addRow(
g.row('Query')
.addPanel(
g.panel('Query Rate') +
2024-05-07 16:41:59 +00:00
g.queryPanel(if showMultiCluster then 'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])'
else 'rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])',
if showMultiCluster then '{{cluster}} {{job}} {{instance}}'
else '{{job}} {{instance}}') +
2018-08-07 12:14:00 +00:00
g.stack,
)
.addPanel(
g.panel('Stage Duration') +
2024-05-07 16:41:59 +00:00
g.queryPanel(if showMultiCluster then 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3'
else 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3',
if showMultiCluster then '{{slice}}'
else '{{slice}}') +
2018-08-07 12:14:00 +00:00
{ yaxes: g.yaxes('ms') } +
g.stack,
)
2020-12-16 17:49:06 +00:00
) + {
2020-12-30 16:47:04 +00:00
tags: $._config.grafanaPrometheus.tags,
refresh: $._config.grafanaPrometheus.refresh,
2020-12-16 17:49:06 +00:00
},
2019-06-26 14:23:09 +00:00
// Remote write specific dashboard.
2019-06-17 22:02:42 +00:00
'prometheus-remote-write.json':
2020-08-25 13:59:41 +00:00
local timestampComparison =
2019-10-17 22:40:58 +00:00
graphPanel.new(
'Highest Timestamp In vs. Highest Timestamp Sent',
datasource='$datasource',
span=6,
2019-06-26 14:23:09 +00:00
)
2019-10-17 22:40:58 +00:00
.addTarget(prometheus.target(
|||
(
prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}
-
2022-05-20 10:35:16 +00:00
ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"} != 0)
2019-10-17 22:40:58 +00:00
)
|||,
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}',
2019-10-17 22:40:58 +00:00
));
2020-08-25 13:59:41 +00:00
local timestampComparisonRate =
2019-10-17 22:40:58 +00:00
graphPanel.new(
'Rate[5m]',
datasource='$datasource',
span=6,
2019-06-26 14:23:09 +00:00
)
2019-10-17 22:40:58 +00:00
.addTarget(prometheus.target(
|||
2020-10-15 07:15:59 +00:00
clamp_min(
2019-10-17 22:40:58 +00:00
rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~"$cluster", instance=~"$instance"}[5m])
-
2022-05-20 10:35:16 +00:00
ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])
2020-10-15 07:15:59 +00:00
, 0)
2019-10-17 22:40:58 +00:00
|||,
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}',
2019-10-17 22:40:58 +00:00
));
local samplesRate =
graphPanel.new(
'Rate, in vs. succeeded or dropped [5m]',
datasource='$datasource',
span=12,
2019-06-26 14:23:09 +00:00
)
2019-10-17 22:40:58 +00:00
.addTarget(prometheus.target(
|||
rate(
prometheus_remote_storage_samples_in_total{cluster=~"$cluster", instance=~"$instance"}[5m])
-
2022-05-20 10:35:16 +00:00
ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]))
2019-10-17 22:40:58 +00:00
-
2022-05-20 10:35:16 +00:00
(rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]))
2019-10-17 22:40:58 +00:00
|||,
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
2019-10-17 22:40:58 +00:00
));
2019-11-19 03:58:07 +00:00
local currentShards =
2019-10-17 22:40:58 +00:00
graphPanel.new(
2019-11-19 03:58:07 +00:00
'Current Shards',
2019-10-17 22:40:58 +00:00
datasource='$datasource',
span=12,
min_span=6,
2019-11-19 03:58:07 +00:00
)
.addTarget(prometheus.target(
2022-05-20 10:35:16 +00:00
'prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance", url=~"$url"}',
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
2019-11-19 03:58:07 +00:00
));
local maxShards =
graphPanel.new(
'Max Shards',
datasource='$datasource',
span=4,
2019-06-26 14:23:09 +00:00
)
2019-10-17 22:40:58 +00:00
.addTarget(prometheus.target(
2022-05-20 10:35:16 +00:00
'prometheus_remote_storage_shards_max{cluster=~"$cluster", instance=~"$instance", url=~"$url"}',
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
2019-11-19 03:58:07 +00:00
));
local minShards =
graphPanel.new(
'Min Shards',
datasource='$datasource',
span=4,
)
2019-10-17 22:40:58 +00:00
.addTarget(prometheus.target(
2022-05-20 10:35:16 +00:00
'prometheus_remote_storage_shards_min{cluster=~"$cluster", instance=~"$instance", url=~"$url"}',
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
2019-11-19 03:58:07 +00:00
));
local desiredShards =
graphPanel.new(
'Desired Shards',
datasource='$datasource',
span=4,
)
2019-10-17 22:40:58 +00:00
.addTarget(prometheus.target(
2022-05-20 10:35:16 +00:00
'prometheus_remote_storage_shards_desired{cluster=~"$cluster", instance=~"$instance", url=~"$url"}',
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
2019-11-19 03:58:07 +00:00
));
2019-10-17 22:40:58 +00:00
local shardsCapacity =
graphPanel.new(
2019-11-19 03:58:07 +00:00
'Shard Capacity',
2019-10-17 22:40:58 +00:00
datasource='$datasource',
span=6,
2019-10-09 16:59:02 +00:00
)
2019-10-17 22:40:58 +00:00
.addTarget(prometheus.target(
2022-05-20 10:35:16 +00:00
'prometheus_remote_storage_shard_capacity{cluster=~"$cluster", instance=~"$instance", url=~"$url"}',
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
2019-10-17 22:40:58 +00:00
));
2020-08-25 13:59:41 +00:00
2019-10-17 22:40:58 +00:00
local pendingSamples =
graphPanel.new(
2019-11-19 03:58:07 +00:00
'Pending Samples',
2019-10-17 22:40:58 +00:00
datasource='$datasource',
span=6,
2019-10-09 16:59:02 +00:00
)
2019-10-17 22:40:58 +00:00
.addTarget(prometheus.target(
2022-05-20 10:35:16 +00:00
'prometheus_remote_storage_pending_samples{cluster=~"$cluster", instance=~"$instance", url=~"$url"} or prometheus_remote_storage_samples_pending{cluster=~"$cluster", instance=~"$instance", url=~"$url"}',
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
2019-10-17 22:40:58 +00:00
));
2020-08-25 13:59:41 +00:00
local walSegment =
2019-10-17 22:40:58 +00:00
graphPanel.new(
'TSDB Current Segment',
datasource='$datasource',
span=6,
formatY1='none',
2019-10-09 16:59:02 +00:00
)
2019-10-17 22:40:58 +00:00
.addTarget(prometheus.target(
'prometheus_tsdb_wal_segment_current{cluster=~"$cluster", instance=~"$instance"}',
legendFormat='{{cluster}}:{{instance}}'
));
2020-08-25 13:59:41 +00:00
local queueSegment =
2019-10-17 22:40:58 +00:00
graphPanel.new(
'Remote Write Current Segment',
datasource='$datasource',
span=6,
formatY1='none',
2019-10-09 16:59:02 +00:00
)
2019-10-17 22:40:58 +00:00
.addTarget(prometheus.target(
'prometheus_wal_watcher_current_segment{cluster=~"$cluster", instance=~"$instance"}',
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{consumer}}'
2019-10-17 22:40:58 +00:00
));
local droppedSamples =
graphPanel.new(
'Dropped Samples',
datasource='$datasource',
span=3,
2019-10-09 16:59:02 +00:00
)
2019-10-17 22:40:58 +00:00
.addTarget(prometheus.target(
2022-05-20 10:35:16 +00:00
'rate(prometheus_remote_storage_dropped_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])',
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
2019-10-17 22:40:58 +00:00
));
local failedSamples =
graphPanel.new(
'Failed Samples',
datasource='$datasource',
span=3,
)
.addTarget(prometheus.target(
2022-05-20 10:35:16 +00:00
'rate(prometheus_remote_storage_failed_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])',
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
2019-10-17 22:40:58 +00:00
));
local retriedSamples =
graphPanel.new(
'Retried Samples',
datasource='$datasource',
span=3,
2019-06-26 14:23:09 +00:00
)
2019-10-17 22:40:58 +00:00
.addTarget(prometheus.target(
2022-05-20 10:35:16 +00:00
'rate(prometheus_remote_storage_retried_samples_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])',
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
2019-10-17 22:40:58 +00:00
));
local enqueueRetries =
graphPanel.new(
'Enqueue Retries',
datasource='$datasource',
span=3,
)
.addTarget(prometheus.target(
2022-05-20 10:35:16 +00:00
'rate(prometheus_remote_storage_enqueue_retries_total{cluster=~"$cluster", instance=~"$instance", url=~"$url"}[5m])',
2020-04-08 19:56:00 +00:00
legendFormat='{{cluster}}:{{instance}} {{remote_name}}:{{url}}'
2019-10-17 22:40:58 +00:00
));
2020-12-16 17:49:06 +00:00
dashboard.new(
2020-12-30 16:47:04 +00:00
title='%(prefix)sRemote Write' % $._config.grafanaPrometheus,
2020-12-16 17:49:06 +00:00
editable=true
)
2019-10-17 22:40:58 +00:00
.addTemplate(
{
hide: 0,
label: null,
name: 'datasource',
options: [],
query: 'prometheus',
refresh: 1,
regex: '',
type: 'datasource',
},
2019-06-26 14:23:09 +00:00
)
2019-10-17 22:40:58 +00:00
.addTemplate(
template.new(
2021-11-12 11:33:22 +00:00
'cluster',
2019-10-17 22:40:58 +00:00
'$datasource',
2022-11-30 13:02:54 +00:00
'label_values(prometheus_build_info, cluster)' % $._config,
2019-10-17 22:40:58 +00:00
refresh='time',
current={
selected: true,
text: 'All',
value: '$__all',
},
2020-08-25 13:59:41 +00:00
includeAll=true,
2019-06-26 14:23:09 +00:00
)
2019-10-17 22:40:58 +00:00
)
.addTemplate(
template.new(
2021-11-12 11:33:22 +00:00
'instance',
2019-10-17 22:40:58 +00:00
'$datasource',
2021-11-12 11:33:22 +00:00
'label_values(prometheus_build_info{cluster=~"$cluster"}, instance)' % $._config,
2019-10-17 22:40:58 +00:00
refresh='time',
current={
selected: true,
text: 'All',
value: '$__all',
},
2020-08-25 13:59:41 +00:00
includeAll=true,
2019-06-26 14:23:09 +00:00
)
2019-10-17 22:40:58 +00:00
)
.addTemplate(
template.new(
2020-04-08 19:56:00 +00:00
'url',
2019-10-17 22:40:58 +00:00
'$datasource',
2020-04-08 19:56:00 +00:00
'label_values(prometheus_remote_storage_shards{cluster=~"$cluster", instance=~"$instance"}, url)' % $._config,
2019-10-17 22:40:58 +00:00
refresh='time',
2020-08-25 13:59:41 +00:00
includeAll=true,
2019-06-26 14:23:09 +00:00
)
2019-10-17 22:40:58 +00:00
)
.addRow(
row.new('Timestamps')
.addPanel(timestampComparison)
.addPanel(timestampComparisonRate)
)
.addRow(
row.new('Samples')
.addPanel(samplesRate)
)
.addRow(
2020-08-25 13:59:41 +00:00
row.new(
'Shards'
2019-06-26 14:23:09 +00:00
)
2019-11-19 03:58:07 +00:00
.addPanel(currentShards)
.addPanel(maxShards)
.addPanel(minShards)
.addPanel(desiredShards)
2019-10-17 22:40:58 +00:00
)
.addRow(
row.new('Shard Details')
.addPanel(shardsCapacity)
.addPanel(pendingSamples)
)
.addRow(
row.new('Segments')
.addPanel(walSegment)
.addPanel(queueSegment)
)
.addRow(
row.new('Misc. Rates')
.addPanel(droppedSamples)
.addPanel(failedSamples)
.addPanel(retriedSamples)
.addPanel(enqueueRetries)
2020-12-16 17:49:06 +00:00
) + {
2020-12-30 16:47:04 +00:00
tags: $._config.grafanaPrometheus.tags,
refresh: $._config.grafanaPrometheus.refresh,
2020-12-16 17:49:06 +00:00
},
2019-06-26 14:23:09 +00:00
},
2018-08-07 12:14:00 +00:00
}