Merge pull request #15697 from piosz/autoscaling-e2e-fix

Auto commit by PR queue bot
pull/6/head
k8s-merge-robot 2015-10-27 18:53:40 -07:00
commit 70049d10d8
2 changed files with 30 additions and 16 deletions

View File

@ -63,22 +63,22 @@ type ResourceConsumer struct {
requestSizeInMegabytes int
}
func NewDynamicResourceConsumer(name string, replicas, initCPU, initMemory int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
return newResourceConsumer(name, replicas, initCPU, initMemory, dynamicConsumptionTimeInSeconds, dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, cpuLimit, memLimit, framework)
func NewDynamicResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
return newResourceConsumer(name, replicas, initCPUTotal, initMemoryTotal, dynamicConsumptionTimeInSeconds, dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, cpuLimit, memLimit, framework)
}
func NewStaticResourceConsumer(name string, replicas, initCPU, initMemory int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
return newResourceConsumer(name, replicas, initCPU, initMemory, staticConsumptionTimeInSeconds, initCPU/replicas, initMemory/replicas, cpuLimit, memLimit, framework)
func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
return newResourceConsumer(name, replicas, initCPUTotal, initMemoryTotal, staticConsumptionTimeInSeconds, initCPUTotal/replicas, initMemoryTotal/replicas, cpuLimit, memLimit, framework)
}
/*
NewResourceConsumer creates new ResourceConsumer
initCPU argument is in millicores
initMemory argument is in megabytes
initCPUTotal argument is in millicores
initMemoryTotal argument is in megabytes
memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
*/
func newResourceConsumer(name string, replicas, initCPU, initMemory, consumptionTimeInSeconds, requestSizeInMillicores, requestSizeInMegabytes int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
func newResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, consumptionTimeInSeconds, requestSizeInMillicores, requestSizeInMegabytes int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
runServiceAndRCForResourceConsumer(framework.Client, framework.Namespace.Name, name, replicas, cpuLimit, memLimit)
rc := &ResourceConsumer{
name: name,
@ -93,9 +93,9 @@ func newResourceConsumer(name string, replicas, initCPU, initMemory, consumption
requestSizeInMegabytes: requestSizeInMegabytes,
}
go rc.makeConsumeCPURequests()
rc.ConsumeCPU(initCPU)
rc.ConsumeCPU(initCPUTotal)
go rc.makeConsumeMemRequests()
rc.ConsumeMem(initMemory)
rc.ConsumeMem(initMemoryTotal)
return rc
}

View File

@ -61,11 +61,13 @@ var _ = Describe("Autoscaling", func() {
setUpAutoscaler("cpu/node_utilization", 0.4, nodeCount, nodeCount+1)
// Consume 50% CPU
millicoresPerReplica := 500
rc := NewStaticResourceConsumer("cpu-utilization", nodeCount*coresPerNode, millicoresPerReplica*nodeCount*coresPerNode, 0, int64(millicoresPerReplica), 100, f)
expectNoError(waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout))
rcs := createConsumingRCs(f, "cpu-utilization", nodeCount*coresPerNode, 500, 0)
err := waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)
for _, rc := range rcs {
rc.CleanUp()
}
expectNoError(err)
rc.CleanUp()
expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
})
@ -84,10 +86,13 @@ var _ = Describe("Autoscaling", func() {
// Consume 60% of total memory capacity
megabytesPerReplica := int(memCapacityMb * 6 / 10 / coresPerNode)
rc := NewStaticResourceConsumer("mem-utilization", nodeCount*coresPerNode, 0, megabytesPerReplica*nodeCount*coresPerNode, 100, int64(megabytesPerReplica+100), f)
expectNoError(waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout))
rcs := createConsumingRCs(f, "mem-utilization", nodeCount*coresPerNode, 0, megabytesPerReplica)
err := waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)
for _, rc := range rcs {
rc.CleanUp()
}
expectNoError(err)
rc.CleanUp()
expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
})
@ -116,6 +121,15 @@ func setUpAutoscaler(metric string, target float64, min, max int) {
expectNoError(err, "Output: "+string(out))
}
func createConsumingRCs(f *Framework, name string, count, cpuPerReplica, memPerReplica int) []*ResourceConsumer {
var res []*ResourceConsumer
for i := 1; i <= count; i++ {
name := fmt.Sprintf("%s-%d", name, i)
res = append(res, NewStaticResourceConsumer(name, 1, cpuPerReplica, memPerReplica, int64(cpuPerReplica), int64(memPerReplica+100), f))
}
return res
}
func cleanUpAutoscaler() {
By("Removing autoscaler")
out, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "stop-autoscaling",