Tweak limits in scalability tests

pull/6/head
Wojciech Tyczynski 2015-10-19 14:45:24 +02:00
parent 58f168315e
commit d1179f1d67
4 changed files with 22 additions and 15 deletions

View File

@ -141,7 +141,7 @@ var _ = Describe("Density", func() {
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after")) expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics // Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second) highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err) expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests") Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
}) })
@ -383,9 +383,7 @@ var _ = Describe("Density", func() {
// Test whether e2e pod startup time is acceptable. // Test whether e2e pod startup time is acceptable.
podStartupLatency := PodStartupLatency{Latency: extractLatencyMetrics(e2eLag)} podStartupLatency := PodStartupLatency{Latency: extractLatencyMetrics(e2eLag)}
// TODO: Switch it to 5 seconds once we are sure our tests are passing. expectNoError(VerifyPodStartupLatency(podStartupLatency))
podStartupThreshold := 8 * time.Second
expectNoError(VerifyPodStartupLatency(podStartupLatency, podStartupThreshold))
// Log suspicious latency metrics/docker errors from all nodes that had slow startup times // Log suspicious latency metrics/docker errors from all nodes that had slow startup times
for _, l := range startupLag { for _, l := range startupLag {

View File

@ -94,7 +94,7 @@ var _ = Describe("[Performance Suite] Latency", func() {
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after")) expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics // Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second) highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err) expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests") Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
}) })
@ -266,9 +266,7 @@ func runLatencyTest(nodeCount int, c *client.Client, ns string) {
// Test whether e2e pod startup time is acceptable. // Test whether e2e pod startup time is acceptable.
podStartupLatency := PodStartupLatency{Latency: extractLatencyMetrics(e2eLatencies)} podStartupLatency := PodStartupLatency{Latency: extractLatencyMetrics(e2eLatencies)}
// TODO: Switch it to 5 seconds once we are sure our tests are passing. expectNoError(VerifyPodStartupLatency(podStartupLatency))
podStartupThreshold := 8 * time.Second
expectNoError(VerifyPodStartupLatency(podStartupLatency, podStartupThreshold))
// Log suspicious latency metrics/docker errors from all nodes that had slow startup times // Log suspicious latency metrics/docker errors from all nodes that had slow startup times
logSuspiciousLatency(startLatencies, nil, nodeCount, c) logSuspiciousLatency(startLatencies, nil, nodeCount, c)

View File

@ -81,7 +81,7 @@ var _ = Describe("Load capacity", func() {
deleteAllRC(configs) deleteAllRC(configs)
// Verify latency metrics // Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second) highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err, "Too many instances metrics above the threshold") expectNoError(err, "Too many instances metrics above the threshold")
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0)) Expect(highLatencyRequests).NotTo(BeNumerically(">", 0))
}) })

View File

@ -36,6 +36,12 @@ import (
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
) )
const (
podStartupThreshold time.Duration = 5 * time.Second
listPodLatencyThreshold time.Duration = 2 * time.Second
apiCallLatencyThreshold time.Duration = 250 * time.Millisecond
)
// Dashboard metrics // Dashboard metrics
type LatencyMetric struct { type LatencyMetric struct {
Perc50 time.Duration `json:"Perc50"` Perc50 time.Duration `json:"Perc50"`
@ -129,9 +135,9 @@ func readLatencyMetrics(c *client.Client) (APIResponsiveness, error) {
return a, err return a, err
} }
// Prints summary metrics for request types with latency above threshold // Prints top five summary metrics for request types with latency and returns
// and returns number of such request types. // number of such request types above threshold.
func HighLatencyRequests(c *client.Client, threshold time.Duration) (int, error) { func HighLatencyRequests(c *client.Client) (int, error) {
metrics, err := readLatencyMetrics(c) metrics, err := readLatencyMetrics(c)
if err != nil { if err != nil {
return 0, err return 0, err
@ -140,6 +146,11 @@ func HighLatencyRequests(c *client.Client, threshold time.Duration) (int, error)
badMetrics := 0 badMetrics := 0
top := 5 top := 5
for _, metric := range metrics.APICalls { for _, metric := range metrics.APICalls {
threshold := apiCallLatencyThreshold
if metric.Verb == "LIST" && metric.Resource == "pods" {
threshold = listPodLatencyThreshold
}
isBad := false isBad := false
if metric.Latency.Perc99 > threshold { if metric.Latency.Perc99 > threshold {
badMetrics++ badMetrics++
@ -160,9 +171,9 @@ func HighLatencyRequests(c *client.Client, threshold time.Duration) (int, error)
return badMetrics, nil return badMetrics, nil
} }
// Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are smaller // Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are
// than the given threshold (returns error in the oposite case). // within the threshold.
func VerifyPodStartupLatency(latency PodStartupLatency, podStartupThreshold time.Duration) error { func VerifyPodStartupLatency(latency PodStartupLatency) error {
Logf("Pod startup latency: %s", prettyPrintJSON(latency)) Logf("Pod startup latency: %s", prettyPrintJSON(latency))
if latency.Latency.Perc50 > podStartupThreshold { if latency.Latency.Perc50 > podStartupThreshold {