Merge pull request #63896 from mtaufen/refactor-test-metrics

Automatic merge from submit-queue (batch tested with PRs 64013, 63896, 64139, 57527, 62102). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Refactor test utils that deal with Kubelet metrics for clarity

I found these functions hard to understand, because the names did not
accurately reflect their behavior. For example, GetKubeletMetrics
assumed that all of the metrics passed in were measuring latency.
The caller of GetKubeletMetrics was implicitly making this assumption,
but it was not obvious at the call site.

```release-note
NONE
```
pull/8/head
Kubernetes Submit Queue 2018-05-23 19:44:15 -07:00 committed by GitHub
commit 10377f6593
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 22 additions and 17 deletions

View File

@ -97,10 +97,11 @@ func getKubeletMetrics(c clientset.Interface, nodeName string) (metrics.KubeletM
return kubeletMetrics, nil return kubeletMetrics, nil
} }
// GetKubeletLatencyMetrics gets all latency related kubelet metrics. Note that the KubeletMetrcis // GetDefaultKubeletLatencyMetrics calls GetKubeletLatencyMetrics with a set of default metricNames
// passed in should not contain subsystem prefix. // identifying common latency metrics.
func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics { // Note that the KubeletMetrics passed in should not contain subsystem prefix.
latencyMethods := sets.NewString( func GetDefaultKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
latencyMetricNames := sets.NewString(
kubeletmetrics.PodWorkerLatencyKey, kubeletmetrics.PodWorkerLatencyKey,
kubeletmetrics.PodWorkerStartLatencyKey, kubeletmetrics.PodWorkerStartLatencyKey,
kubeletmetrics.PodStartLatencyKey, kubeletmetrics.PodStartLatencyKey,
@ -109,13 +110,15 @@ func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
kubeletmetrics.PodWorkerStartLatencyKey, kubeletmetrics.PodWorkerStartLatencyKey,
kubeletmetrics.PLEGRelistLatencyKey, kubeletmetrics.PLEGRelistLatencyKey,
) )
return GetKubeletMetrics(ms, latencyMethods) return GetKubeletLatencyMetrics(ms, latencyMetricNames)
} }
func GetKubeletMetrics(ms metrics.KubeletMetrics, methods sets.String) KubeletLatencyMetrics { // GetKubeletLatencyMetrics filters ms to include only those contained in the metricNames set,
// then constructs a KubeletLatencyMetrics list based on the samples associated with those metrics.
func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics, filterMetricNames sets.String) KubeletLatencyMetrics {
var latencyMetrics KubeletLatencyMetrics var latencyMetrics KubeletLatencyMetrics
for method, samples := range ms { for name, samples := range ms {
if !methods.Has(method) { if !filterMetricNames.Has(name) {
continue continue
} }
for _, sample := range samples { for _, sample := range samples {
@ -131,7 +134,7 @@ func GetKubeletMetrics(ms metrics.KubeletMetrics, methods sets.String) KubeletLa
latencyMetrics = append(latencyMetrics, KubeletLatencyMetric{ latencyMetrics = append(latencyMetrics, KubeletLatencyMetric{
Operation: operation, Operation: operation,
Method: method, Method: name,
Quantile: quantile, Quantile: quantile,
Latency: time.Duration(int64(latency)) * time.Microsecond, Latency: time.Duration(int64(latency)) * time.Microsecond,
}) })
@ -265,7 +268,7 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration
if err != nil { if err != nil {
return KubeletLatencyMetrics{}, err return KubeletLatencyMetrics{}, err
} }
latencyMetrics := GetKubeletLatencyMetrics(ms) latencyMetrics := GetDefaultKubeletLatencyMetrics(ms)
sort.Sort(latencyMetrics) sort.Sort(latencyMetrics)
var badMetrics KubeletLatencyMetrics var badMetrics KubeletLatencyMetrics
logFunc("\nLatency metrics for node %v", nodeName) logFunc("\nLatency metrics for node %v", nodeName)

View File

@ -412,7 +412,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
framework.Logf("Node does NOT have %s", expectedNodeCondition) framework.Logf("Node does NOT have %s", expectedNodeCondition)
} }
} }
logKubeletMetrics(kubeletmetrics.EvictionStatsAgeKey) logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
logFunc() logFunc()
return verifyEvictionOrdering(f, testSpecs) return verifyEvictionOrdering(f, testSpecs)
}, pressureTimeout, evictionPollInterval).Should(BeNil()) }, pressureTimeout, evictionPollInterval).Should(BeNil())
@ -426,7 +426,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
By(fmt.Sprintf("Waiting for NodeCondition: %s to no longer exist on the node", expectedNodeCondition)) By(fmt.Sprintf("Waiting for NodeCondition: %s to no longer exist on the node", expectedNodeCondition))
Eventually(func() error { Eventually(func() error {
logFunc() logFunc()
logKubeletMetrics(kubeletmetrics.EvictionStatsAgeKey) logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) { if expectedNodeCondition != noPressure && hasNodeCondition(f, expectedNodeCondition) {
return fmt.Errorf("Conditions havent returned to normal, node still has %s", expectedNodeCondition) return fmt.Errorf("Conditions havent returned to normal, node still has %s", expectedNodeCondition)
} }
@ -439,7 +439,7 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
return fmt.Errorf("%s dissappeared and then reappeared", expectedNodeCondition) return fmt.Errorf("%s dissappeared and then reappeared", expectedNodeCondition)
} }
logFunc() logFunc()
logKubeletMetrics(kubeletmetrics.EvictionStatsAgeKey) logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
return verifyEvictionOrdering(f, testSpecs) return verifyEvictionOrdering(f, testSpecs)
}, postTestConditionMonitoringPeriod, evictionPollInterval).Should(BeNil()) }, postTestConditionMonitoringPeriod, evictionPollInterval).Should(BeNil())
}) })

View File

@ -319,17 +319,19 @@ func getLocalNode(f *framework.Framework) *apiv1.Node {
return &nodeList.Items[0] return &nodeList.Items[0]
} }
// logs prometheus metrics from the local kubelet. // logKubeletLatencyMetrics logs KubeletLatencyMetrics computed from the Prometheus
func logKubeletMetrics(metricKeys ...string) { // metrics exposed on the current node and identified by the metricNames.
// The Kubelet subsystem prefix is automatically prepended to these metric names.
func logKubeletLatencyMetrics(metricNames ...string) {
metricSet := sets.NewString() metricSet := sets.NewString()
for _, key := range metricKeys { for _, key := range metricNames {
metricSet.Insert(kubeletmetrics.KubeletSubsystem + "_" + key) metricSet.Insert(kubeletmetrics.KubeletSubsystem + "_" + key)
} }
metric, err := metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName + ":10255") metric, err := metrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName + ":10255")
if err != nil { if err != nil {
framework.Logf("Error getting kubelet metrics: %v", err) framework.Logf("Error getting kubelet metrics: %v", err)
} else { } else {
framework.Logf("Kubelet Metrics: %+v", framework.GetKubeletMetrics(metric, metricSet)) framework.Logf("Kubelet Metrics: %+v", framework.GetKubeletLatencyMetrics(metric, metricSet))
} }
} }