Merge pull request #18164 from jayunit100/hpa-parameterize

Auto commit by PR queue bot
pull/6/head
k8s-merge-robot 2015-12-10 04:40:53 -08:00
commit b118b801f3
1 changed files with 61 additions and 21 deletions

View File

@ -26,10 +26,9 @@ import (
)
const (
kindRC = "replicationController"
kindDeployment = "deployment"
subresource = "scale"
stabilityTimeout = 10 * time.Minute
kindRC = "replicationController"
kindDeployment = "deployment"
subresource = "scale"
)
var _ = Describe("Horizontal pod autoscaling (scale resource: CPU) [Skipped]", func() {
@ -60,28 +59,69 @@ var _ = Describe("Horizontal pod autoscaling (scale resource: CPU) [Skipped]", f
})
})
func scaleUp(name, kind string, rc *ResourceConsumer, f *Framework) {
rc = NewDynamicResourceConsumer(name, kind, 1, 250, 0, 500, 100, f)
// HPAScaleTest struct is used by the scale(...) function.
type HPAScaleTest struct {
initPods int
cpuStart int
maxCPU int64
idealCPU int
minPods int
maxPods int
firstScale int
firstScaleStasis time.Duration
cpuBurst int
secondScale int
secondScaleStasis time.Duration
}
// run is a method which runs an HPA lifecycle, from a starting state, to an expected
// The initial state is defined by the initPods parameter.
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
// The second state change is due to the CPU burst parameter, which HPA again responds to.
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *Framework) {
rc = NewDynamicResourceConsumer(name, kind, scaleTest.initPods, scaleTest.cpuStart, 0, scaleTest.maxCPU, 100, f)
defer rc.CleanUp()
createCPUHorizontalPodAutoscaler(rc, 20)
rc.WaitForReplicas(3)
rc.EnsureDesiredReplicas(3, stabilityTimeout)
rc.ConsumeCPU(700)
rc.WaitForReplicas(5)
createCPUHorizontalPodAutoscaler(rc, scaleTest.idealCPU, scaleTest.minPods, scaleTest.maxPods)
rc.WaitForReplicas(scaleTest.firstScale)
rc.EnsureDesiredReplicas(scaleTest.firstScale, scaleTest.firstScaleStasis)
rc.ConsumeCPU(scaleTest.cpuBurst)
rc.WaitForReplicas(scaleTest.secondScale)
}
func scaleUp(name, kind string, rc *ResourceConsumer, f *Framework) {
scaleTest := &HPAScaleTest{
initPods: 1,
cpuStart: 250,
maxCPU: 500,
idealCPU: .2 * 100,
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: 10 * time.Minute,
cpuBurst: 700,
secondScale: 5,
}
scaleTest.run(name, kind, rc, f)
}
func scaleDown(name, kind string, rc *ResourceConsumer, f *Framework) {
rc = NewDynamicResourceConsumer(name, kind, 5, 400, 0, 500, 100, f)
defer rc.CleanUp()
createCPUHorizontalPodAutoscaler(rc, 30)
rc.WaitForReplicas(3)
rc.EnsureDesiredReplicas(3, stabilityTimeout)
rc.ConsumeCPU(100)
rc.WaitForReplicas(1)
scaleTest := &HPAScaleTest{
initPods: 5,
cpuStart: 400,
maxCPU: 500,
idealCPU: .3 * 100,
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: 10 * time.Minute,
cpuBurst: 100,
secondScale: 1,
}
scaleTest.run(name, kind, rc, f)
}
func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu int) {
minReplicas := 1
func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int) {
hpa := &extensions.HorizontalPodAutoscaler{
ObjectMeta: api.ObjectMeta{
Name: rc.name,
@ -94,7 +134,7 @@ func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu int) {
Subresource: subresource,
},
MinReplicas: &minReplicas,
MaxReplicas: 5,
MaxReplicas: maxRepl,
CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: cpu},
},
}