diff --git a/test/e2e/framework/metrics_util.go b/test/e2e/framework/metrics_util.go index 284beafde2..3b6b95c434 100644 --- a/test/e2e/framework/metrics_util.go +++ b/test/e2e/framework/metrics_util.go @@ -43,7 +43,6 @@ const ( // NodeStartupThreshold is a rough estimate of the time allocated for a pod to start on a node. NodeStartupThreshold = 4 * time.Second - podStartupThreshold time.Duration = 5 * time.Second // We are setting 1s threshold for apicalls even in small clusters to avoid flakes. // The problem is that if long GC is happening in small clusters (where we have e.g. // 1-core master machines) and tests are pretty short, it may consume significant @@ -402,17 +401,17 @@ func HighLatencyRequests(c clientset.Interface, nodeCount int) (int, *APIRespons return badMetrics, metrics, nil } -// Verifies whether 50, 90 and 99th percentiles of e2e PodStartupLatency are -// within the threshold. -func VerifyPodStartupLatency(latency *PodStartupLatency) error { - if latency.E2ELatency.Perc50 > podStartupThreshold { - return fmt.Errorf("too high pod startup latency 50th percentile: %v", latency.E2ELatency.Perc50) +// Verifies whether 50, 90 and 99th percentiles of a latency metric are +// within the expected threshold. +func VerifyLatencyWithinThreshold(threshold, actual LatencyMetric, metricName string) error { + if actual.Perc50 > threshold.Perc50 { + return fmt.Errorf("too high %v latency 50th percentile: %v", metricName, actual.Perc50) } - if latency.E2ELatency.Perc90 > podStartupThreshold { - return fmt.Errorf("too high pod startup latency 90th percentile: %v", latency.E2ELatency.Perc90) + if actual.Perc90 > threshold.Perc90 { + return fmt.Errorf("too high %v latency 90th percentile: %v", metricName, actual.Perc90) } - if latency.E2ELatency.Perc99 > podStartupThreshold { - return fmt.Errorf("too high pod startup latency 99th percentile: %v", latency.E2ELatency.Perc99) + if actual.Perc99 > threshold.Perc99 { + return fmt.Errorf("too high %v latency 99th percentile: %v", metricName, actual.Perc99) } return nil } diff --git a/test/e2e/scalability/density.go b/test/e2e/scalability/density.go index 553bc7a316..4f0c197934 100644 --- a/test/e2e/scalability/density.go +++ b/test/e2e/scalability/density.go @@ -50,6 +50,7 @@ import ( ) const ( + PodStartupLatencyThreshold = 5 * time.Second MinSaturationThreshold = 2 * time.Minute MinPodsPerSecondThroughput = 8 DensityPollInterval = 10 * time.Second @@ -846,7 +847,12 @@ var _ = SIGDescribe("Density", func() { f.TestSummaries = append(f.TestSummaries, podStartupLatency) // Test whether e2e pod startup time is acceptable. - framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency)) + podStartupLatencyThreshold := framework.LatencyMetric{ + Perc50: PodStartupLatencyThreshold, + Perc90: PodStartupLatencyThreshold, + Perc99: PodStartupLatencyThreshold, + } + framework.ExpectNoError(framework.VerifyLatencyWithinThreshold(podStartupLatencyThreshold, podStartupLatency.E2ELatency, "pod startup")) framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c) latencyMeasurementPhase.End() diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index 845fff8cfe..9ee85cbe1d 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -472,21 +472,6 @@ func getPodStartLatency(node string) (framework.KubeletLatencyMetrics, error) { return latencyMetrics, nil } -// verifyPodStartupLatency verifies whether 50, 90 and 99th percentiles of PodStartupLatency are -// within the threshold. -func verifyPodStartupLatency(expect, actual framework.LatencyMetric) error { - if actual.Perc50 > expect.Perc50 { - return fmt.Errorf("too high pod startup latency 50th percentile: %v", actual.Perc50) - } - if actual.Perc90 > expect.Perc90 { - return fmt.Errorf("too high pod startup latency 90th percentile: %v", actual.Perc90) - } - if actual.Perc99 > expect.Perc99 { - return fmt.Errorf("too high pod startup latency 99th percentile: %v", actual.Perc99) - } - return nil -} - // newInformerWatchPod creates an informer to check whether all pods are running. func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller { ns := f.Namespace.Name @@ -563,7 +548,7 @@ func logAndVerifyLatency(batchLag time.Duration, e2eLags []framework.PodLatencyD if isVerify { // check whether e2e pod startup time is acceptable. - framework.ExpectNoError(verifyPodStartupLatency(podStartupLimits, podStartupLatency)) + framework.ExpectNoError(framework.VerifyLatencyWithinThreshold(podStartupLimits, podStartupLatency, "pod startup")) // check bactch pod creation latency if podBatchStartupLimit > 0 {