rename latency to duration in metrics

pull/564/head
danielqsj 2019-02-18 17:40:04 +08:00
parent 0bfe4c26b1
commit 79a3eb816c
11 changed files with 57 additions and 57 deletions

View File

@ -284,7 +284,7 @@ func (m *cgroupManagerImpl) Exists(name CgroupName) bool {
func (m *cgroupManagerImpl) Destroy(cgroupConfig *CgroupConfig) error {
start := time.Now()
defer func() {
metrics.CgroupManagerLatency.WithLabelValues("destroy").Observe(metrics.SinceInSeconds(start))
metrics.CgroupManagerDuration.WithLabelValues("destroy").Observe(metrics.SinceInSeconds(start))
metrics.DeprecatedCgroupManagerLatency.WithLabelValues("destroy").Observe(metrics.SinceInMicroseconds(start))
}()
@ -412,7 +412,7 @@ func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcont
func (m *cgroupManagerImpl) Update(cgroupConfig *CgroupConfig) error {
start := time.Now()
defer func() {
metrics.CgroupManagerLatency.WithLabelValues("update").Observe(metrics.SinceInSeconds(start))
metrics.CgroupManagerDuration.WithLabelValues("update").Observe(metrics.SinceInSeconds(start))
metrics.DeprecatedCgroupManagerLatency.WithLabelValues("update").Observe(metrics.SinceInMicroseconds(start))
}()
@ -448,7 +448,7 @@ func (m *cgroupManagerImpl) Update(cgroupConfig *CgroupConfig) error {
func (m *cgroupManagerImpl) Create(cgroupConfig *CgroupConfig) error {
start := time.Now()
defer func() {
metrics.CgroupManagerLatency.WithLabelValues("create").Observe(metrics.SinceInSeconds(start))
metrics.CgroupManagerDuration.WithLabelValues("create").Observe(metrics.SinceInSeconds(start))
metrics.DeprecatedCgroupManagerLatency.WithLabelValues("create").Observe(metrics.SinceInMicroseconds(start))
}()

View File

@ -697,7 +697,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont
// in a passed in AllocateRequest pointer, and issues a single Allocate call per pod.
klog.V(3).Infof("Making allocation request for devices %v for device plugin %s", devs, resource)
resp, err := eI.e.allocate(devs)
metrics.DevicePluginAllocationLatency.WithLabelValues(resource).Observe(metrics.SinceInSeconds(startRPCTime))
metrics.DevicePluginAllocationDuration.WithLabelValues(resource).Observe(metrics.SinceInSeconds(startRPCTime))
metrics.DeprecatedDevicePluginAllocationLatency.WithLabelValues(resource).Observe(metrics.SinceInMicroseconds(startRPCTime))
if err != nil {
// In case of allocation failure, we want to restore m.allocatedDevices

View File

@ -1500,7 +1500,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
if !firstSeenTime.IsZero() {
// This is the first time we are syncing the pod. Record the latency
// since kubelet first saw the pod if firstSeenTime is set.
metrics.PodWorkerStartLatency.Observe(metrics.SinceInSeconds(firstSeenTime))
metrics.PodWorkerStartDuration.Observe(metrics.SinceInSeconds(firstSeenTime))
metrics.DeprecatedPodWorkerStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime))
} else {
klog.V(3).Infof("First seen time not recorded for pod %q", pod.UID)
@ -1518,7 +1518,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
existingStatus, ok := kl.statusManager.GetPodStatus(pod.UID)
if !ok || existingStatus.Phase == v1.PodPending && apiPodStatus.Phase == v1.PodRunning &&
!firstSeenTime.IsZero() {
metrics.PodStartLatency.Observe(metrics.SinceInSeconds(firstSeenTime))
metrics.PodStartDuration.Observe(metrics.SinceInSeconds(firstSeenTime))
metrics.DeprecatedPodStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime))
}
@ -1998,7 +1998,7 @@ func (kl *Kubelet) dispatchWork(pod *v1.Pod, syncType kubetypes.SyncPodType, mir
UpdateType: syncType,
OnCompleteFunc: func(err error) {
if err != nil {
metrics.PodWorkerLatency.WithLabelValues(syncType.String()).Observe(metrics.SinceInSeconds(start))
metrics.PodWorkerDuration.WithLabelValues(syncType.String()).Observe(metrics.SinceInSeconds(start))
metrics.DeprecatedPodWorkerLatency.WithLabelValues(syncType.String()).Observe(metrics.SinceInMicroseconds(start))
}
},

View File

@ -50,7 +50,7 @@ func newInstrumentedImageManagerService(service internalapi.ImageManagerService)
func recordOperation(operation string, start time.Time) {
metrics.RuntimeOperations.WithLabelValues(operation).Inc()
metrics.DeprecatedRuntimeOperations.WithLabelValues(operation).Inc()
metrics.RuntimeOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInSeconds(start))
metrics.RuntimeOperationsDuration.WithLabelValues(operation).Observe(metrics.SinceInSeconds(start))
metrics.DeprecatedRuntimeOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start))
}

View File

@ -30,7 +30,7 @@ import (
func TestRecordOperation(t *testing.T) {
prometheus.MustRegister(metrics.RuntimeOperations)
prometheus.MustRegister(metrics.RuntimeOperationsLatency)
prometheus.MustRegister(metrics.RuntimeOperationsDuration)
prometheus.MustRegister(metrics.RuntimeOperationsErrors)
temporalServer := "127.0.0.1:1234"
@ -51,7 +51,7 @@ func TestRecordOperation(t *testing.T) {
recordOperation("create_container", time.Now())
runtimeOperationsCounterExpected := "kubelet_runtime_operations_total{operation_type=\"create_container\"} 1"
runtimeOperationsLatencyExpected := "kubelet_runtime_operations_latency_seconds_count{operation_type=\"create_container\"} 1"
runtimeOperationsDurationExpected := "kubelet_runtime_operations_duration_seconds_count{operation_type=\"create_container\"} 1"
assert.HTTPBodyContains(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
mux.ServeHTTP(w, r)
@ -59,7 +59,7 @@ func TestRecordOperation(t *testing.T) {
assert.HTTPBodyContains(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
mux.ServeHTTP(w, r)
}), "GET", prometheusURL, nil, runtimeOperationsLatencyExpected)
}), "GET", prometheusURL, nil, runtimeOperationsDurationExpected)
}
func TestInstrumentedVersion(t *testing.T) {

View File

@ -34,11 +34,11 @@ const (
KubeletSubsystem = "kubelet"
NodeNameKey = "node_name"
NodeLabelKey = "node"
PodWorkerLatencyKey = "pod_worker_latency_seconds"
PodStartLatencyKey = "pod_start_latency_seconds"
CgroupManagerOperationsKey = "cgroup_manager_latency_seconds"
PodWorkerStartLatencyKey = "pod_worker_start_latency_seconds"
PLEGRelistLatencyKey = "pleg_relist_latency_seconds"
PodWorkerDurationKey = "pod_worker_duration_seconds"
PodStartDurationKey = "pod_start_duration_seconds"
CgroupManagerOperationsKey = "cgroup_manager_duration_seconds"
PodWorkerStartDurationKey = "pod_worker_start_duration_seconds"
PLEGRelistDurationKey = "pleg_relist_duration_seconds"
PLEGDiscardEventsKey = "pleg_discard_events"
PLEGRelistIntervalKey = "pleg_relist_interval_seconds"
EvictionStatsAgeKey = "eviction_stats_age_seconds"
@ -57,14 +57,14 @@ const (
VolumeStatsInodesUsedKey = "volume_stats_inodes_used"
// Metrics keys of remote runtime operations
RuntimeOperationsKey = "runtime_operations_total"
RuntimeOperationsLatencyKey = "runtime_operations_latency_seconds"
RuntimeOperationsDurationKey = "runtime_operations_duration_seconds"
RuntimeOperationsErrorsKey = "runtime_operations_errors_total"
DeprecatedRuntimeOperationsKey = "runtime_operations"
DeprecatedRuntimeOperationsLatencyKey = "runtime_operations_latency_microseconds"
DeprecatedRuntimeOperationsErrorsKey = "runtime_operations_errors"
// Metrics keys of device plugin operations
DevicePluginRegistrationCountKey = "device_plugin_registration_total"
DevicePluginAllocationLatencyKey = "device_plugin_alloc_latency_seconds"
DevicePluginAllocationDurationKey = "device_plugin_alloc_duration_seconds"
DeprecatedDevicePluginRegistrationCountKey = "device_plugin_registration_count"
DeprecatedDevicePluginAllocationLatencyKey = "device_plugin_alloc_latency_microseconds"
@ -101,45 +101,45 @@ var (
Buckets: prometheus.DefBuckets,
},
)
PodWorkerLatency = prometheus.NewHistogramVec(
PodWorkerDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: PodWorkerLatencyKey,
Help: "Latency in seconds to sync a single pod. Broken down by operation type: create, update, or sync",
Name: PodWorkerDurationKey,
Help: "Duration in seconds to sync a single pod. Broken down by operation type: create, update, or sync",
Buckets: prometheus.DefBuckets,
},
[]string{"operation_type"},
)
PodStartLatency = prometheus.NewHistogram(
PodStartDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: PodStartLatencyKey,
Help: "Latency in seconds for a single pod to go from pending to running.",
Name: PodStartDurationKey,
Help: "Duration in seconds for a single pod to go from pending to running.",
Buckets: prometheus.DefBuckets,
},
)
CgroupManagerLatency = prometheus.NewHistogramVec(
CgroupManagerDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: CgroupManagerOperationsKey,
Help: "Latency in seconds for cgroup manager operations. Broken down by method.",
Help: "Duration in seconds for cgroup manager operations. Broken down by method.",
Buckets: prometheus.DefBuckets,
},
[]string{"operation_type"},
)
PodWorkerStartLatency = prometheus.NewHistogram(
PodWorkerStartDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: PodWorkerStartLatencyKey,
Help: "Latency in seconds from seeing a pod to starting a worker.",
Name: PodWorkerStartDurationKey,
Help: "Duration in seconds from seeing a pod to starting a worker.",
Buckets: prometheus.DefBuckets,
},
)
PLEGRelistLatency = prometheus.NewHistogram(
PLEGRelistDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: PLEGRelistLatencyKey,
Help: "Latency in seconds for relisting pods in PLEG.",
Name: PLEGRelistDurationKey,
Help: "Duration in seconds for relisting pods in PLEG.",
Buckets: prometheus.DefBuckets,
},
)
@ -168,11 +168,11 @@ var (
},
[]string{"operation_type"},
)
RuntimeOperationsLatency = prometheus.NewHistogramVec(
RuntimeOperationsDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: RuntimeOperationsLatencyKey,
Help: "Latency in seconds of runtime operations. Broken down by operation type.",
Name: RuntimeOperationsDurationKey,
Help: "Duration in seconds of runtime operations. Broken down by operation type.",
Buckets: prometheus.DefBuckets,
},
[]string{"operation_type"},
@ -202,11 +202,11 @@ var (
},
[]string{"resource_name"},
)
DevicePluginAllocationLatency = prometheus.NewHistogramVec(
DevicePluginAllocationDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: KubeletSubsystem,
Name: DevicePluginAllocationLatencyKey,
Help: "Latency in seconds to serve a device plugin Allocation request. Broken down by resource name.",
Name: DevicePluginAllocationDurationKey,
Help: "Duration in seconds to serve a device plugin Allocation request. Broken down by resource name.",
Buckets: prometheus.DefBuckets,
},
[]string{"resource_name"},
@ -363,21 +363,21 @@ func Register(containerCache kubecontainer.RuntimeCache, collectors ...prometheu
// Register the metrics.
registerMetrics.Do(func() {
prometheus.MustRegister(NodeName)
prometheus.MustRegister(PodWorkerLatency)
prometheus.MustRegister(PodStartLatency)
prometheus.MustRegister(CgroupManagerLatency)
prometheus.MustRegister(PodWorkerStartLatency)
prometheus.MustRegister(PodWorkerDuration)
prometheus.MustRegister(PodStartDuration)
prometheus.MustRegister(CgroupManagerDuration)
prometheus.MustRegister(PodWorkerStartDuration)
prometheus.MustRegister(ContainersPerPodCount)
prometheus.MustRegister(newPodAndContainerCollector(containerCache))
prometheus.MustRegister(PLEGRelistLatency)
prometheus.MustRegister(PLEGRelistDuration)
prometheus.MustRegister(PLEGDiscardEvents)
prometheus.MustRegister(PLEGRelistInterval)
prometheus.MustRegister(RuntimeOperations)
prometheus.MustRegister(RuntimeOperationsLatency)
prometheus.MustRegister(RuntimeOperationsDuration)
prometheus.MustRegister(RuntimeOperationsErrors)
prometheus.MustRegister(EvictionStatsAge)
prometheus.MustRegister(DevicePluginRegistrationCount)
prometheus.MustRegister(DevicePluginAllocationLatency)
prometheus.MustRegister(DevicePluginAllocationDuration)
prometheus.MustRegister(DeprecatedPodWorkerLatency)
prometheus.MustRegister(DeprecatedPodStartLatency)
prometheus.MustRegister(DeprecatedCgroupManagerLatency)

View File

@ -195,7 +195,7 @@ func (g *GenericPLEG) relist() {
timestamp := g.clock.Now()
defer func() {
metrics.PLEGRelistLatency.Observe(metrics.SinceInSeconds(timestamp))
metrics.PLEGRelistDuration.Observe(metrics.SinceInSeconds(timestamp))
metrics.DeprecatedPLEGRelistLatency.Observe(metrics.SinceInMicroseconds(timestamp))
}()

View File

@ -102,13 +102,13 @@ func getKubeletMetrics(c clientset.Interface, nodeName string) (metrics.KubeletM
// Note that the KubeletMetrics passed in should not contain subsystem prefix.
func GetDefaultKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
latencyMetricNames := sets.NewString(
kubeletmetrics.PodWorkerLatencyKey,
kubeletmetrics.PodWorkerStartLatencyKey,
kubeletmetrics.PodStartLatencyKey,
kubeletmetrics.PodWorkerDurationKey,
kubeletmetrics.PodWorkerStartDurationKey,
kubeletmetrics.PodStartDurationKey,
kubeletmetrics.CgroupManagerOperationsKey,
dockermetrics.DockerOperationsLatencyKey,
kubeletmetrics.PodWorkerStartLatencyKey,
kubeletmetrics.PLEGRelistLatencyKey,
kubeletmetrics.PodWorkerStartDurationKey,
kubeletmetrics.PLEGRelistDurationKey,
)
return GetKubeletLatencyMetrics(ms, latencyMetricNames)
}

View File

@ -168,9 +168,9 @@ var InterestingKubeletMetrics = []string{
"kubelet_docker_errors",
"kubelet_docker_operations_latency_seconds",
"kubelet_generate_pod_status_latency_microseconds",
"kubelet_pod_start_latency_seconds",
"kubelet_pod_worker_latency_seconds",
"kubelet_pod_worker_start_latency_seconds",
"kubelet_pod_start_duration_seconds",
"kubelet_pod_worker_duration_seconds",
"kubelet_pod_worker_start_duration_seconds",
"kubelet_sync_pods_latency_microseconds",
}

View File

@ -459,12 +459,12 @@ func getPodStartLatency(node string) (framework.KubeletLatencyMetrics, error) {
for _, samples := range ms {
for _, sample := range samples {
if sample.Metric["__name__"] == kubemetrics.KubeletSubsystem+"_"+kubemetrics.PodStartLatencyKey {
if sample.Metric["__name__"] == kubemetrics.KubeletSubsystem+"_"+kubemetrics.PodStartDurationKey {
quantile, _ := strconv.ParseFloat(string(sample.Metric["quantile"]), 64)
latencyMetrics = append(latencyMetrics,
framework.KubeletLatencyMetric{
Quantile: quantile,
Method: kubemetrics.PodStartLatencyKey,
Method: kubemetrics.PodStartDurationKey,
Latency: time.Duration(int(sample.Value)) * time.Microsecond})
}
}

View File

@ -156,7 +156,7 @@ func logDevicePluginMetrics() {
framework.ExpectNoError(err)
for msKey, samples := range ms {
switch msKey {
case kubeletmetrics.KubeletSubsystem + "_" + kubeletmetrics.DevicePluginAllocationLatencyKey:
case kubeletmetrics.KubeletSubsystem + "_" + kubeletmetrics.DevicePluginAllocationDurationKey:
for _, sample := range samples {
latency := sample.Value
resource := string(sample.Metric["resource_name"])