From d1179f1d67d1b5b0e4337cf955fb9a975e438583 Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Mon, 19 Oct 2015 14:45:24 +0200 Subject: [PATCH] Tweak limits in scalability tests --- test/e2e/density.go | 6 ++---- test/e2e/latency.go | 6 ++---- test/e2e/load.go | 2 +- test/e2e/metrics_util.go | 23 +++++++++++++++++------ 4 files changed, 22 insertions(+), 15 deletions(-) diff --git a/test/e2e/density.go b/test/e2e/density.go index 9f8e0266c1..abda23845b 100644 --- a/test/e2e/density.go +++ b/test/e2e/density.go @@ -141,7 +141,7 @@ var _ = Describe("Density", func() { expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after")) // Verify latency metrics - highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second) + highLatencyRequests, err := HighLatencyRequests(c) expectNoError(err) Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests") }) @@ -383,9 +383,7 @@ var _ = Describe("Density", func() { // Test whether e2e pod startup time is acceptable. podStartupLatency := PodStartupLatency{Latency: extractLatencyMetrics(e2eLag)} - // TODO: Switch it to 5 seconds once we are sure our tests are passing. - podStartupThreshold := 8 * time.Second - expectNoError(VerifyPodStartupLatency(podStartupLatency, podStartupThreshold)) + expectNoError(VerifyPodStartupLatency(podStartupLatency)) // Log suspicious latency metrics/docker errors from all nodes that had slow startup times for _, l := range startupLag { diff --git a/test/e2e/latency.go b/test/e2e/latency.go index e1ec34e026..0275dd4975 100644 --- a/test/e2e/latency.go +++ b/test/e2e/latency.go @@ -94,7 +94,7 @@ var _ = Describe("[Performance Suite] Latency", func() { expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after")) // Verify latency metrics - highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second) + highLatencyRequests, err := HighLatencyRequests(c) expectNoError(err) Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests") }) @@ -266,9 +266,7 @@ func runLatencyTest(nodeCount int, c *client.Client, ns string) { // Test whether e2e pod startup time is acceptable. podStartupLatency := PodStartupLatency{Latency: extractLatencyMetrics(e2eLatencies)} - // TODO: Switch it to 5 seconds once we are sure our tests are passing. - podStartupThreshold := 8 * time.Second - expectNoError(VerifyPodStartupLatency(podStartupLatency, podStartupThreshold)) + expectNoError(VerifyPodStartupLatency(podStartupLatency)) // Log suspicious latency metrics/docker errors from all nodes that had slow startup times logSuspiciousLatency(startLatencies, nil, nodeCount, c) diff --git a/test/e2e/load.go b/test/e2e/load.go index 2d85cc7cb6..f45db25a45 100644 --- a/test/e2e/load.go +++ b/test/e2e/load.go @@ -81,7 +81,7 @@ var _ = Describe("Load capacity", func() { deleteAllRC(configs) // Verify latency metrics - highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second) + highLatencyRequests, err := HighLatencyRequests(c) expectNoError(err, "Too many instances metrics above the threshold") Expect(highLatencyRequests).NotTo(BeNumerically(">", 0)) }) diff --git a/test/e2e/metrics_util.go b/test/e2e/metrics_util.go index 9b4d303eae..ae008433c3 100644 --- a/test/e2e/metrics_util.go +++ b/test/e2e/metrics_util.go @@ -36,6 +36,12 @@ import ( "github.com/prometheus/common/model" ) +const ( + podStartupThreshold time.Duration = 5 * time.Second + listPodLatencyThreshold time.Duration = 2 * time.Second + apiCallLatencyThreshold time.Duration = 250 * time.Millisecond +) + // Dashboard metrics type LatencyMetric struct { Perc50 time.Duration `json:"Perc50"` @@ -129,9 +135,9 @@ func readLatencyMetrics(c *client.Client) (APIResponsiveness, error) { return a, err } -// Prints summary metrics for request types with latency above threshold -// and returns number of such request types. -func HighLatencyRequests(c *client.Client, threshold time.Duration) (int, error) { +// Prints top five summary metrics for request types with latency and returns +// number of such request types above threshold. +func HighLatencyRequests(c *client.Client) (int, error) { metrics, err := readLatencyMetrics(c) if err != nil { return 0, err @@ -140,6 +146,11 @@ func HighLatencyRequests(c *client.Client, threshold time.Duration) (int, error) badMetrics := 0 top := 5 for _, metric := range metrics.APICalls { + threshold := apiCallLatencyThreshold + if metric.Verb == "LIST" && metric.Resource == "pods" { + threshold = listPodLatencyThreshold + } + isBad := false if metric.Latency.Perc99 > threshold { badMetrics++ @@ -160,9 +171,9 @@ func HighLatencyRequests(c *client.Client, threshold time.Duration) (int, error) return badMetrics, nil } -// Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are smaller -// than the given threshold (returns error in the oposite case). -func VerifyPodStartupLatency(latency PodStartupLatency, podStartupThreshold time.Duration) error { +// Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are +// within the threshold. +func VerifyPodStartupLatency(latency PodStartupLatency) error { Logf("Pod startup latency: %s", prettyPrintJSON(latency)) if latency.Latency.Perc50 > podStartupThreshold {