Merge pull request #14488 from piosz/api-bug

Temporary workaround of problem with not ready endpoints.
pull/6/head
Jerzy Szczepkowski 2015-09-24 16:04:59 +02:00
commit 0662141f67
2 changed files with 4 additions and 2 deletions

View File

@ -57,7 +57,7 @@ var _ = Describe("Autoscaling", func() {
// Consume 60% CPU // Consume 60% CPU
millicoresPerReplica := 600 millicoresPerReplica := 600
rc := NewStaticResourceConsumer("rc", nodeCount*coresPerNode, millicoresPerReplica*nodeCount*coresPerNode, 0, int64(millicoresPerReplica), 100, f) rc := NewStaticResourceConsumer("cpu-utilization", nodeCount*coresPerNode, millicoresPerReplica*nodeCount*coresPerNode, 0, int64(millicoresPerReplica), 100, f)
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute)) expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
rc.CleanUp() rc.CleanUp()
@ -79,7 +79,7 @@ var _ = Describe("Autoscaling", func() {
// Consume 60% of total memory capacity // Consume 60% of total memory capacity
megabytesPerReplica := int(memCapacityMb * 6 / 10 / coresPerNode) megabytesPerReplica := int(memCapacityMb * 6 / 10 / coresPerNode)
rc := NewStaticResourceConsumer("rc", nodeCount*coresPerNode, 0, megabytesPerReplica*nodeCount*coresPerNode, 100, int64(megabytesPerReplica+100), f) rc := NewStaticResourceConsumer("mem-utilization", nodeCount*coresPerNode, 0, megabytesPerReplica*nodeCount*coresPerNode, 100, int64(megabytesPerReplica+100), f)
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute)) expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
rc.CleanUp() rc.CleanUp()

View File

@ -262,4 +262,6 @@ func runServiceAndRCForResourceConsumer(c *client.Client, ns, name string, repli
MemLimit: memLimitMb * 1024 * 1024, // MemLimit is in bytes MemLimit: memLimitMb * 1024 * 1024, // MemLimit is in bytes
} }
expectNoError(RunRC(config)) expectNoError(RunRC(config))
// Wait until endpoints are processed. Temporary workaround until #14477 is fixed.
time.Sleep(10 * time.Second)
} }