rename latency to duration in metrics

pull/564/head
danielqsj 2019-02-18 17:40:04 +08:00
parent 0bfe4c26b1
commit 79a3eb816c
11 changed files with 57 additions and 57 deletions

View File

@ -284,7 +284,7 @@ func (m *cgroupManagerImpl) Exists(name CgroupName) bool {
func (m *cgroupManagerImpl) Destroy(cgroupConfig *CgroupConfig) error { func (m *cgroupManagerImpl) Destroy(cgroupConfig *CgroupConfig) error {
start := time.Now() start := time.Now()
defer func() { defer func() {
metrics.CgroupManagerLatency.WithLabelValues("destroy").Observe(metrics.SinceInSeconds(start)) metrics.CgroupManagerDuration.WithLabelValues("destroy").Observe(metrics.SinceInSeconds(start))
metrics.DeprecatedCgroupManagerLatency.WithLabelValues("destroy").Observe(metrics.SinceInMicroseconds(start)) metrics.DeprecatedCgroupManagerLatency.WithLabelValues("destroy").Observe(metrics.SinceInMicroseconds(start))
}() }()
@ -412,7 +412,7 @@ func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcont
func (m *cgroupManagerImpl) Update(cgroupConfig *CgroupConfig) error { func (m *cgroupManagerImpl) Update(cgroupConfig *CgroupConfig) error {
start := time.Now() start := time.Now()
defer func() { defer func() {
metrics.CgroupManagerLatency.WithLabelValues("update").Observe(metrics.SinceInSeconds(start)) metrics.CgroupManagerDuration.WithLabelValues("update").Observe(metrics.SinceInSeconds(start))
metrics.DeprecatedCgroupManagerLatency.WithLabelValues("update").Observe(metrics.SinceInMicroseconds(start)) metrics.DeprecatedCgroupManagerLatency.WithLabelValues("update").Observe(metrics.SinceInMicroseconds(start))
}() }()
@ -448,7 +448,7 @@ func (m *cgroupManagerImpl) Update(cgroupConfig *CgroupConfig) error {
func (m *cgroupManagerImpl) Create(cgroupConfig *CgroupConfig) error { func (m *cgroupManagerImpl) Create(cgroupConfig *CgroupConfig) error {
start := time.Now() start := time.Now()
defer func() { defer func() {
metrics.CgroupManagerLatency.WithLabelValues("create").Observe(metrics.SinceInSeconds(start)) metrics.CgroupManagerDuration.WithLabelValues("create").Observe(metrics.SinceInSeconds(start))
metrics.DeprecatedCgroupManagerLatency.WithLabelValues("create").Observe(metrics.SinceInMicroseconds(start)) metrics.DeprecatedCgroupManagerLatency.WithLabelValues("create").Observe(metrics.SinceInMicroseconds(start))
}() }()

View File

@ -697,7 +697,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont
// in a passed in AllocateRequest pointer, and issues a single Allocate call per pod. // in a passed in AllocateRequest pointer, and issues a single Allocate call per pod.
klog.V(3).Infof("Making allocation request for devices %v for device plugin %s", devs, resource) klog.V(3).Infof("Making allocation request for devices %v for device plugin %s", devs, resource)
resp, err := eI.e.allocate(devs) resp, err := eI.e.allocate(devs)
metrics.DevicePluginAllocationLatency.WithLabelValues(resource).Observe(metrics.SinceInSeconds(startRPCTime)) metrics.DevicePluginAllocationDuration.WithLabelValues(resource).Observe(metrics.SinceInSeconds(startRPCTime))
metrics.DeprecatedDevicePluginAllocationLatency.WithLabelValues(resource).Observe(metrics.SinceInMicroseconds(startRPCTime)) metrics.DeprecatedDevicePluginAllocationLatency.WithLabelValues(resource).Observe(metrics.SinceInMicroseconds(startRPCTime))
if err != nil { if err != nil {
// In case of allocation failure, we want to restore m.allocatedDevices // In case of allocation failure, we want to restore m.allocatedDevices

View File

@ -1500,7 +1500,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
if !firstSeenTime.IsZero() { if !firstSeenTime.IsZero() {
// This is the first time we are syncing the pod. Record the latency // This is the first time we are syncing the pod. Record the latency
// since kubelet first saw the pod if firstSeenTime is set. // since kubelet first saw the pod if firstSeenTime is set.
metrics.PodWorkerStartLatency.Observe(metrics.SinceInSeconds(firstSeenTime)) metrics.PodWorkerStartDuration.Observe(metrics.SinceInSeconds(firstSeenTime))
metrics.DeprecatedPodWorkerStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime)) metrics.DeprecatedPodWorkerStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime))
} else { } else {
klog.V(3).Infof("First seen time not recorded for pod %q", pod.UID) klog.V(3).Infof("First seen time not recorded for pod %q", pod.UID)
@ -1518,7 +1518,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
existingStatus, ok := kl.statusManager.GetPodStatus(pod.UID) existingStatus, ok := kl.statusManager.GetPodStatus(pod.UID)
if !ok || existingStatus.Phase == v1.PodPending && apiPodStatus.Phase == v1.PodRunning && if !ok || existingStatus.Phase == v1.PodPending && apiPodStatus.Phase == v1.PodRunning &&
!firstSeenTime.IsZero() { !firstSeenTime.IsZero() {
metrics.PodStartLatency.Observe(metrics.SinceInSeconds(firstSeenTime)) metrics.PodStartDuration.Observe(metrics.SinceInSeconds(firstSeenTime))
metrics.DeprecatedPodStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime)) metrics.DeprecatedPodStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime))
} }
@ -1998,7 +1998,7 @@ func (kl *Kubelet) dispatchWork(pod *v1.Pod, syncType kubetypes.SyncPodType, mir
UpdateType: syncType, UpdateType: syncType,
OnCompleteFunc: func(err error) { OnCompleteFunc: func(err error) {
if err != nil { if err != nil {
metrics.PodWorkerLatency.WithLabelValues(syncType.String()).Observe(metrics.SinceInSeconds(start)) metrics.PodWorkerDuration.WithLabelValues(syncType.String()).Observe(metrics.SinceInSeconds(start))
metrics.DeprecatedPodWorkerLatency.WithLabelValues(syncType.String()).Observe(metrics.SinceInMicroseconds(start)) metrics.DeprecatedPodWorkerLatency.WithLabelValues(syncType.String()).Observe(metrics.SinceInMicroseconds(start))
} }
}, },

View File

@ -50,7 +50,7 @@ func newInstrumentedImageManagerService(service internalapi.ImageManagerService)
func recordOperation(operation string, start time.Time) { func recordOperation(operation string, start time.Time) {
metrics.RuntimeOperations.WithLabelValues(operation).Inc() metrics.RuntimeOperations.WithLabelValues(operation).Inc()
metrics.DeprecatedRuntimeOperations.WithLabelValues(operation).Inc() metrics.DeprecatedRuntimeOperations.WithLabelValues(operation).Inc()
metrics.RuntimeOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInSeconds(start)) metrics.RuntimeOperationsDuration.WithLabelValues(operation).Observe(metrics.SinceInSeconds(start))
metrics.DeprecatedRuntimeOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start)) metrics.DeprecatedRuntimeOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start))
} }

View File

@ -30,7 +30,7 @@ import (
func TestRecordOperation(t *testing.T) { func TestRecordOperation(t *testing.T) {
prometheus.MustRegister(metrics.RuntimeOperations) prometheus.MustRegister(metrics.RuntimeOperations)
prometheus.MustRegister(metrics.RuntimeOperationsLatency) prometheus.MustRegister(metrics.RuntimeOperationsDuration)
prometheus.MustRegister(metrics.RuntimeOperationsErrors) prometheus.MustRegister(metrics.RuntimeOperationsErrors)
temporalServer := "127.0.0.1:1234" temporalServer := "127.0.0.1:1234"
@ -51,7 +51,7 @@ func TestRecordOperation(t *testing.T) {
recordOperation("create_container", time.Now()) recordOperation("create_container", time.Now())
runtimeOperationsCounterExpected := "kubelet_runtime_operations_total{operation_type=\"create_container\"} 1" runtimeOperationsCounterExpected := "kubelet_runtime_operations_total{operation_type=\"create_container\"} 1"
runtimeOperationsLatencyExpected := "kubelet_runtime_operations_latency_seconds_count{operation_type=\"create_container\"} 1" runtimeOperationsDurationExpected := "kubelet_runtime_operations_duration_seconds_count{operation_type=\"create_container\"} 1"
assert.HTTPBodyContains(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { assert.HTTPBodyContains(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
mux.ServeHTTP(w, r) mux.ServeHTTP(w, r)
@ -59,7 +59,7 @@ func TestRecordOperation(t *testing.T) {
assert.HTTPBodyContains(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { assert.HTTPBodyContains(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
mux.ServeHTTP(w, r) mux.ServeHTTP(w, r)
}), "GET", prometheusURL, nil, runtimeOperationsLatencyExpected) }), "GET", prometheusURL, nil, runtimeOperationsDurationExpected)
} }
func TestInstrumentedVersion(t *testing.T) { func TestInstrumentedVersion(t *testing.T) {

View File

@ -34,11 +34,11 @@ const (
KubeletSubsystem = "kubelet" KubeletSubsystem = "kubelet"
NodeNameKey = "node_name" NodeNameKey = "node_name"
NodeLabelKey = "node" NodeLabelKey = "node"
PodWorkerLatencyKey = "pod_worker_latency_seconds" PodWorkerDurationKey = "pod_worker_duration_seconds"
PodStartLatencyKey = "pod_start_latency_seconds" PodStartDurationKey = "pod_start_duration_seconds"
CgroupManagerOperationsKey = "cgroup_manager_latency_seconds" CgroupManagerOperationsKey = "cgroup_manager_duration_seconds"
PodWorkerStartLatencyKey = "pod_worker_start_latency_seconds" PodWorkerStartDurationKey = "pod_worker_start_duration_seconds"
PLEGRelistLatencyKey = "pleg_relist_latency_seconds" PLEGRelistDurationKey = "pleg_relist_duration_seconds"
PLEGDiscardEventsKey = "pleg_discard_events" PLEGDiscardEventsKey = "pleg_discard_events"
PLEGRelistIntervalKey = "pleg_relist_interval_seconds" PLEGRelistIntervalKey = "pleg_relist_interval_seconds"
EvictionStatsAgeKey = "eviction_stats_age_seconds" EvictionStatsAgeKey = "eviction_stats_age_seconds"
@ -57,14 +57,14 @@ const (
VolumeStatsInodesUsedKey = "volume_stats_inodes_used" VolumeStatsInodesUsedKey = "volume_stats_inodes_used"
// Metrics keys of remote runtime operations // Metrics keys of remote runtime operations
RuntimeOperationsKey = "runtime_operations_total" RuntimeOperationsKey = "runtime_operations_total"
RuntimeOperationsLatencyKey = "runtime_operations_latency_seconds" RuntimeOperationsDurationKey = "runtime_operations_duration_seconds"
RuntimeOperationsErrorsKey = "runtime_operations_errors_total" RuntimeOperationsErrorsKey = "runtime_operations_errors_total"
DeprecatedRuntimeOperationsKey = "runtime_operations" DeprecatedRuntimeOperationsKey = "runtime_operations"
DeprecatedRuntimeOperationsLatencyKey = "runtime_operations_latency_microseconds" DeprecatedRuntimeOperationsLatencyKey = "runtime_operations_latency_microseconds"
DeprecatedRuntimeOperationsErrorsKey = "runtime_operations_errors" DeprecatedRuntimeOperationsErrorsKey = "runtime_operations_errors"
// Metrics keys of device plugin operations // Metrics keys of device plugin operations
DevicePluginRegistrationCountKey = "device_plugin_registration_total" DevicePluginRegistrationCountKey = "device_plugin_registration_total"
DevicePluginAllocationLatencyKey = "device_plugin_alloc_latency_seconds" DevicePluginAllocationDurationKey = "device_plugin_alloc_duration_seconds"
DeprecatedDevicePluginRegistrationCountKey = "device_plugin_registration_count" DeprecatedDevicePluginRegistrationCountKey = "device_plugin_registration_count"
DeprecatedDevicePluginAllocationLatencyKey = "device_plugin_alloc_latency_microseconds" DeprecatedDevicePluginAllocationLatencyKey = "device_plugin_alloc_latency_microseconds"
@ -101,45 +101,45 @@ var (
Buckets: prometheus.DefBuckets, Buckets: prometheus.DefBuckets,
}, },
) )
PodWorkerLatency = prometheus.NewHistogramVec( PodWorkerDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{ prometheus.HistogramOpts{
Subsystem: KubeletSubsystem, Subsystem: KubeletSubsystem,
Name: PodWorkerLatencyKey, Name: PodWorkerDurationKey,
Help: "Latency in seconds to sync a single pod. Broken down by operation type: create, update, or sync", Help: "Duration in seconds to sync a single pod. Broken down by operation type: create, update, or sync",
Buckets: prometheus.DefBuckets, Buckets: prometheus.DefBuckets,
}, },
[]string{"operation_type"}, []string{"operation_type"},
) )
PodStartLatency = prometheus.NewHistogram( PodStartDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{ prometheus.HistogramOpts{
Subsystem: KubeletSubsystem, Subsystem: KubeletSubsystem,
Name: PodStartLatencyKey, Name: PodStartDurationKey,
Help: "Latency in seconds for a single pod to go from pending to running.", Help: "Duration in seconds for a single pod to go from pending to running.",
Buckets: prometheus.DefBuckets, Buckets: prometheus.DefBuckets,
}, },
) )
CgroupManagerLatency = prometheus.NewHistogramVec( CgroupManagerDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{ prometheus.HistogramOpts{
Subsystem: KubeletSubsystem, Subsystem: KubeletSubsystem,
Name: CgroupManagerOperationsKey, Name: CgroupManagerOperationsKey,
Help: "Latency in seconds for cgroup manager operations. Broken down by method.", Help: "Duration in seconds for cgroup manager operations. Broken down by method.",
Buckets: prometheus.DefBuckets, Buckets: prometheus.DefBuckets,
}, },
[]string{"operation_type"}, []string{"operation_type"},
) )
PodWorkerStartLatency = prometheus.NewHistogram( PodWorkerStartDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{ prometheus.HistogramOpts{
Subsystem: KubeletSubsystem, Subsystem: KubeletSubsystem,
Name: PodWorkerStartLatencyKey, Name: PodWorkerStartDurationKey,
Help: "Latency in seconds from seeing a pod to starting a worker.", Help: "Duration in seconds from seeing a pod to starting a worker.",
Buckets: prometheus.DefBuckets, Buckets: prometheus.DefBuckets,
}, },
) )
PLEGRelistLatency = prometheus.NewHistogram( PLEGRelistDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{ prometheus.HistogramOpts{
Subsystem: KubeletSubsystem, Subsystem: KubeletSubsystem,
Name: PLEGRelistLatencyKey, Name: PLEGRelistDurationKey,
Help: "Latency in seconds for relisting pods in PLEG.", Help: "Duration in seconds for relisting pods in PLEG.",
Buckets: prometheus.DefBuckets, Buckets: prometheus.DefBuckets,
}, },
) )
@ -168,11 +168,11 @@ var (
}, },
[]string{"operation_type"}, []string{"operation_type"},
) )
RuntimeOperationsLatency = prometheus.NewHistogramVec( RuntimeOperationsDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{ prometheus.HistogramOpts{
Subsystem: KubeletSubsystem, Subsystem: KubeletSubsystem,
Name: RuntimeOperationsLatencyKey, Name: RuntimeOperationsDurationKey,
Help: "Latency in seconds of runtime operations. Broken down by operation type.", Help: "Duration in seconds of runtime operations. Broken down by operation type.",
Buckets: prometheus.DefBuckets, Buckets: prometheus.DefBuckets,
}, },
[]string{"operation_type"}, []string{"operation_type"},
@ -202,11 +202,11 @@ var (
}, },
[]string{"resource_name"}, []string{"resource_name"},
) )
DevicePluginAllocationLatency = prometheus.NewHistogramVec( DevicePluginAllocationDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{ prometheus.HistogramOpts{
Subsystem: KubeletSubsystem, Subsystem: KubeletSubsystem,
Name: DevicePluginAllocationLatencyKey, Name: DevicePluginAllocationDurationKey,
Help: "Latency in seconds to serve a device plugin Allocation request. Broken down by resource name.", Help: "Duration in seconds to serve a device plugin Allocation request. Broken down by resource name.",
Buckets: prometheus.DefBuckets, Buckets: prometheus.DefBuckets,
}, },
[]string{"resource_name"}, []string{"resource_name"},
@ -363,21 +363,21 @@ func Register(containerCache kubecontainer.RuntimeCache, collectors ...prometheu
// Register the metrics. // Register the metrics.
registerMetrics.Do(func() { registerMetrics.Do(func() {
prometheus.MustRegister(NodeName) prometheus.MustRegister(NodeName)
prometheus.MustRegister(PodWorkerLatency) prometheus.MustRegister(PodWorkerDuration)
prometheus.MustRegister(PodStartLatency) prometheus.MustRegister(PodStartDuration)
prometheus.MustRegister(CgroupManagerLatency) prometheus.MustRegister(CgroupManagerDuration)
prometheus.MustRegister(PodWorkerStartLatency) prometheus.MustRegister(PodWorkerStartDuration)
prometheus.MustRegister(ContainersPerPodCount) prometheus.MustRegister(ContainersPerPodCount)
prometheus.MustRegister(newPodAndContainerCollector(containerCache)) prometheus.MustRegister(newPodAndContainerCollector(containerCache))
prometheus.MustRegister(PLEGRelistLatency) prometheus.MustRegister(PLEGRelistDuration)
prometheus.MustRegister(PLEGDiscardEvents) prometheus.MustRegister(PLEGDiscardEvents)
prometheus.MustRegister(PLEGRelistInterval) prometheus.MustRegister(PLEGRelistInterval)
prometheus.MustRegister(RuntimeOperations) prometheus.MustRegister(RuntimeOperations)
prometheus.MustRegister(RuntimeOperationsLatency) prometheus.MustRegister(RuntimeOperationsDuration)
prometheus.MustRegister(RuntimeOperationsErrors) prometheus.MustRegister(RuntimeOperationsErrors)
prometheus.MustRegister(EvictionStatsAge) prometheus.MustRegister(EvictionStatsAge)
prometheus.MustRegister(DevicePluginRegistrationCount) prometheus.MustRegister(DevicePluginRegistrationCount)
prometheus.MustRegister(DevicePluginAllocationLatency) prometheus.MustRegister(DevicePluginAllocationDuration)
prometheus.MustRegister(DeprecatedPodWorkerLatency) prometheus.MustRegister(DeprecatedPodWorkerLatency)
prometheus.MustRegister(DeprecatedPodStartLatency) prometheus.MustRegister(DeprecatedPodStartLatency)
prometheus.MustRegister(DeprecatedCgroupManagerLatency) prometheus.MustRegister(DeprecatedCgroupManagerLatency)

View File

@ -195,7 +195,7 @@ func (g *GenericPLEG) relist() {
timestamp := g.clock.Now() timestamp := g.clock.Now()
defer func() { defer func() {
metrics.PLEGRelistLatency.Observe(metrics.SinceInSeconds(timestamp)) metrics.PLEGRelistDuration.Observe(metrics.SinceInSeconds(timestamp))
metrics.DeprecatedPLEGRelistLatency.Observe(metrics.SinceInMicroseconds(timestamp)) metrics.DeprecatedPLEGRelistLatency.Observe(metrics.SinceInMicroseconds(timestamp))
}() }()

View File

@ -102,13 +102,13 @@ func getKubeletMetrics(c clientset.Interface, nodeName string) (metrics.KubeletM
// Note that the KubeletMetrics passed in should not contain subsystem prefix. // Note that the KubeletMetrics passed in should not contain subsystem prefix.
func GetDefaultKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics { func GetDefaultKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
latencyMetricNames := sets.NewString( latencyMetricNames := sets.NewString(
kubeletmetrics.PodWorkerLatencyKey, kubeletmetrics.PodWorkerDurationKey,
kubeletmetrics.PodWorkerStartLatencyKey, kubeletmetrics.PodWorkerStartDurationKey,
kubeletmetrics.PodStartLatencyKey, kubeletmetrics.PodStartDurationKey,
kubeletmetrics.CgroupManagerOperationsKey, kubeletmetrics.CgroupManagerOperationsKey,
dockermetrics.DockerOperationsLatencyKey, dockermetrics.DockerOperationsLatencyKey,
kubeletmetrics.PodWorkerStartLatencyKey, kubeletmetrics.PodWorkerStartDurationKey,
kubeletmetrics.PLEGRelistLatencyKey, kubeletmetrics.PLEGRelistDurationKey,
) )
return GetKubeletLatencyMetrics(ms, latencyMetricNames) return GetKubeletLatencyMetrics(ms, latencyMetricNames)
} }

View File

@ -168,9 +168,9 @@ var InterestingKubeletMetrics = []string{
"kubelet_docker_errors", "kubelet_docker_errors",
"kubelet_docker_operations_latency_seconds", "kubelet_docker_operations_latency_seconds",
"kubelet_generate_pod_status_latency_microseconds", "kubelet_generate_pod_status_latency_microseconds",
"kubelet_pod_start_latency_seconds", "kubelet_pod_start_duration_seconds",
"kubelet_pod_worker_latency_seconds", "kubelet_pod_worker_duration_seconds",
"kubelet_pod_worker_start_latency_seconds", "kubelet_pod_worker_start_duration_seconds",
"kubelet_sync_pods_latency_microseconds", "kubelet_sync_pods_latency_microseconds",
} }

View File

@ -459,12 +459,12 @@ func getPodStartLatency(node string) (framework.KubeletLatencyMetrics, error) {
for _, samples := range ms { for _, samples := range ms {
for _, sample := range samples { for _, sample := range samples {
if sample.Metric["__name__"] == kubemetrics.KubeletSubsystem+"_"+kubemetrics.PodStartLatencyKey { if sample.Metric["__name__"] == kubemetrics.KubeletSubsystem+"_"+kubemetrics.PodStartDurationKey {
quantile, _ := strconv.ParseFloat(string(sample.Metric["quantile"]), 64) quantile, _ := strconv.ParseFloat(string(sample.Metric["quantile"]), 64)
latencyMetrics = append(latencyMetrics, latencyMetrics = append(latencyMetrics,
framework.KubeletLatencyMetric{ framework.KubeletLatencyMetric{
Quantile: quantile, Quantile: quantile,
Method: kubemetrics.PodStartLatencyKey, Method: kubemetrics.PodStartDurationKey,
Latency: time.Duration(int(sample.Value)) * time.Microsecond}) Latency: time.Duration(int(sample.Value)) * time.Microsecond})
} }
} }

View File

@ -156,7 +156,7 @@ func logDevicePluginMetrics() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
for msKey, samples := range ms { for msKey, samples := range ms {
switch msKey { switch msKey {
case kubeletmetrics.KubeletSubsystem + "_" + kubeletmetrics.DevicePluginAllocationLatencyKey: case kubeletmetrics.KubeletSubsystem + "_" + kubeletmetrics.DevicePluginAllocationDurationKey:
for _, sample := range samples { for _, sample := range samples {
latency := sample.Value latency := sample.Value
resource := string(sample.Metric["resource_name"]) resource := string(sample.Metric["resource_name"])