mirror of https://github.com/k3s-io/k3s
Merge pull request #41475 from jszczepkowski/e2e-upgrade-hpa
Automatic merge from submit-queue (batch tested with PRs 41606, 41475) Added e2e upgrade test for Horizontal Pod Autoscaler.pull/6/head
commit
d15d067c79
|
@ -13,7 +13,6 @@ go_library(
|
||||||
srcs = [
|
srcs = [
|
||||||
"addon_update.go",
|
"addon_update.go",
|
||||||
"apparmor.go",
|
"apparmor.go",
|
||||||
"autoscaling_utils.go",
|
|
||||||
"cadvisor.go",
|
"cadvisor.go",
|
||||||
"cluster_logging_es.go",
|
"cluster_logging_es.go",
|
||||||
"cluster_logging_gcl.go",
|
"cluster_logging_gcl.go",
|
||||||
|
@ -111,7 +110,6 @@ go_library(
|
||||||
"//pkg/api/v1/pod:go_default_library",
|
"//pkg/api/v1/pod:go_default_library",
|
||||||
"//pkg/api/v1/service:go_default_library",
|
"//pkg/api/v1/service:go_default_library",
|
||||||
"//pkg/apis/apps/v1beta1:go_default_library",
|
"//pkg/apis/apps/v1beta1:go_default_library",
|
||||||
"//pkg/apis/autoscaling/v1:go_default_library",
|
|
||||||
"//pkg/apis/batch:go_default_library",
|
"//pkg/apis/batch:go_default_library",
|
||||||
"//pkg/apis/batch/v1:go_default_library",
|
"//pkg/apis/batch/v1:go_default_library",
|
||||||
"//pkg/apis/batch/v2alpha1:go_default_library",
|
"//pkg/apis/batch/v2alpha1:go_default_library",
|
||||||
|
|
|
@ -34,6 +34,7 @@ var upgradeTests = []upgrades.Test{
|
||||||
&upgrades.SecretUpgradeTest{},
|
&upgrades.SecretUpgradeTest{},
|
||||||
&upgrades.DeploymentUpgradeTest{},
|
&upgrades.DeploymentUpgradeTest{},
|
||||||
&upgrades.ConfigMapUpgradeTest{},
|
&upgrades.ConfigMapUpgradeTest{},
|
||||||
|
&upgrades.HPAUpgradeTest{},
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
|
var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
|
||||||
|
|
|
@ -10,6 +10,7 @@ load(
|
||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
|
"autoscaling_utils.go",
|
||||||
"configmap.go",
|
"configmap.go",
|
||||||
"container_probe.go",
|
"container_probe.go",
|
||||||
"docker_containers.go",
|
"docker_containers.go",
|
||||||
|
@ -33,7 +34,9 @@ go_library(
|
||||||
"//pkg/api:go_default_library",
|
"//pkg/api:go_default_library",
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/api/v1/pod:go_default_library",
|
"//pkg/api/v1/pod:go_default_library",
|
||||||
|
"//pkg/apis/autoscaling/v1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||||
|
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||||
"//pkg/client/conditions:go_default_library",
|
"//pkg/client/conditions:go_default_library",
|
||||||
"//pkg/kubelet:go_default_library",
|
"//pkg/kubelet:go_default_library",
|
||||||
"//pkg/kubelet/events:go_default_library",
|
"//pkg/kubelet/events:go_default_library",
|
||||||
|
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package e2e
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
@ -25,6 +25,7 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
autoscalingv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
@ -53,6 +54,13 @@ const (
|
||||||
customMetricName = "QPS"
|
customMetricName = "QPS"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
KindRC = "replicationController"
|
||||||
|
KindDeployment = "deployment"
|
||||||
|
KindReplicaSet = "replicaset"
|
||||||
|
subresource = "scale"
|
||||||
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported)
|
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported)
|
||||||
typical use case:
|
typical use case:
|
||||||
|
@ -79,6 +87,10 @@ type ResourceConsumer struct {
|
||||||
requestSizeCustomMetric int
|
requestSizeCustomMetric int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetResourceConsumerImage() string {
|
||||||
|
return resourceConsumerImage
|
||||||
|
}
|
||||||
|
|
||||||
func NewDynamicResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
|
func NewDynamicResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
|
||||||
return newResourceConsumer(name, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
|
return newResourceConsumer(name, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
|
||||||
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, f)
|
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, f)
|
||||||
|
@ -86,7 +98,7 @@ func NewDynamicResourceConsumer(name, kind string, replicas, initCPUTotal, initM
|
||||||
|
|
||||||
// TODO this still defaults to replication controller
|
// TODO this still defaults to replication controller
|
||||||
func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
|
func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
|
||||||
return newResourceConsumer(name, kindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
|
return newResourceConsumer(name, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
|
||||||
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, f)
|
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,6 +172,7 @@ func (rc *ResourceConsumer) makeConsumeCPURequests() {
|
||||||
rc.sendConsumeCPURequest(millicores)
|
rc.sendConsumeCPURequest(millicores)
|
||||||
sleepTime = rc.sleepTime
|
sleepTime = rc.sleepTime
|
||||||
case <-rc.stopCPU:
|
case <-rc.stopCPU:
|
||||||
|
framework.Logf("RC %s: stopping CPU consumer", rc.name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -178,6 +191,7 @@ func (rc *ResourceConsumer) makeConsumeMemRequests() {
|
||||||
rc.sendConsumeMemRequest(megabytes)
|
rc.sendConsumeMemRequest(megabytes)
|
||||||
sleepTime = rc.sleepTime
|
sleepTime = rc.sleepTime
|
||||||
case <-rc.stopMem:
|
case <-rc.stopMem:
|
||||||
|
framework.Logf("RC %s: stopping mem consumer", rc.name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -196,6 +210,7 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() {
|
||||||
rc.sendConsumeCustomMetric(delta)
|
rc.sendConsumeCustomMetric(delta)
|
||||||
sleepTime = rc.sleepTime
|
sleepTime = rc.sleepTime
|
||||||
case <-rc.stopCustomMetric:
|
case <-rc.stopCustomMetric:
|
||||||
|
framework.Logf("RC %s: stopping metric consumer", rc.name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -263,21 +278,21 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
|
||||||
|
|
||||||
func (rc *ResourceConsumer) GetReplicas() int {
|
func (rc *ResourceConsumer) GetReplicas() int {
|
||||||
switch rc.kind {
|
switch rc.kind {
|
||||||
case kindRC:
|
case KindRC:
|
||||||
replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
|
replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if replicationController == nil {
|
if replicationController == nil {
|
||||||
framework.Failf(rcIsNil)
|
framework.Failf(rcIsNil)
|
||||||
}
|
}
|
||||||
return int(replicationController.Status.Replicas)
|
return int(replicationController.Status.Replicas)
|
||||||
case kindDeployment:
|
case KindDeployment:
|
||||||
deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
|
deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if deployment == nil {
|
if deployment == nil {
|
||||||
framework.Failf(deploymentIsNil)
|
framework.Failf(deploymentIsNil)
|
||||||
}
|
}
|
||||||
return int(deployment.Status.Replicas)
|
return int(deployment.Status.Replicas)
|
||||||
case kindReplicaSet:
|
case KindReplicaSet:
|
||||||
rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
|
rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if rs == nil {
|
if rs == nil {
|
||||||
|
@ -314,6 +329,22 @@ func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, timeout t
|
||||||
framework.Logf("Number of replicas was stable over %v", timeout)
|
framework.Logf("Number of replicas was stable over %v", timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Pause stops background goroutines responsible for consuming resources.
|
||||||
|
func (rc *ResourceConsumer) Pause() {
|
||||||
|
By(fmt.Sprintf("HPA pausing RC %s", rc.name))
|
||||||
|
rc.stopCPU <- 0
|
||||||
|
rc.stopMem <- 0
|
||||||
|
rc.stopCustomMetric <- 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pause starts background goroutines responsible for consuming resources.
|
||||||
|
func (rc *ResourceConsumer) Resume() {
|
||||||
|
By(fmt.Sprintf("HPA resuming RC %s", rc.name))
|
||||||
|
go rc.makeConsumeCPURequests()
|
||||||
|
go rc.makeConsumeMemRequests()
|
||||||
|
go rc.makeConsumeCustomMetric()
|
||||||
|
}
|
||||||
|
|
||||||
func (rc *ResourceConsumer) CleanUp() {
|
func (rc *ResourceConsumer) CleanUp() {
|
||||||
By(fmt.Sprintf("Removing consuming RC %s", rc.name))
|
By(fmt.Sprintf("Removing consuming RC %s", rc.name))
|
||||||
close(rc.stopCPU)
|
close(rc.stopCPU)
|
||||||
|
@ -361,16 +392,16 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalCli
|
||||||
}
|
}
|
||||||
|
|
||||||
switch kind {
|
switch kind {
|
||||||
case kindRC:
|
case KindRC:
|
||||||
framework.ExpectNoError(framework.RunRC(rcConfig))
|
framework.ExpectNoError(framework.RunRC(rcConfig))
|
||||||
break
|
break
|
||||||
case kindDeployment:
|
case KindDeployment:
|
||||||
dpConfig := testutils.DeploymentConfig{
|
dpConfig := testutils.DeploymentConfig{
|
||||||
RCConfig: rcConfig,
|
RCConfig: rcConfig,
|
||||||
}
|
}
|
||||||
framework.ExpectNoError(framework.RunDeployment(dpConfig))
|
framework.ExpectNoError(framework.RunDeployment(dpConfig))
|
||||||
break
|
break
|
||||||
case kindReplicaSet:
|
case KindReplicaSet:
|
||||||
rsConfig := testutils.ReplicaSetConfig{
|
rsConfig := testutils.ReplicaSetConfig{
|
||||||
RCConfig: rcConfig,
|
RCConfig: rcConfig,
|
||||||
}
|
}
|
||||||
|
@ -417,3 +448,23 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalCli
|
||||||
framework.ExpectNoError(framework.WaitForServiceEndpointsNum(
|
framework.ExpectNoError(framework.WaitForServiceEndpointsNum(
|
||||||
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
|
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) {
|
||||||
|
hpa := &autoscalingv1.HorizontalPodAutoscaler{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: rc.name,
|
||||||
|
Namespace: rc.framework.Namespace.Name,
|
||||||
|
},
|
||||||
|
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
|
||||||
|
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
|
||||||
|
Kind: rc.kind,
|
||||||
|
Name: rc.name,
|
||||||
|
},
|
||||||
|
MinReplicas: &minReplicas,
|
||||||
|
MaxReplicas: maxRepl,
|
||||||
|
TargetCPUUtilizationPercentage: &cpu,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, errHPA := rc.framework.ClientSet.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
|
||||||
|
framework.ExpectNoError(errHPA)
|
||||||
|
}
|
|
@ -19,24 +19,16 @@ package e2e
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
kindRC = "replicationController"
|
|
||||||
kindDeployment = "deployment"
|
|
||||||
kindReplicaSet = "replicaset"
|
|
||||||
subresource = "scale"
|
|
||||||
)
|
|
||||||
|
|
||||||
// These tests don't seem to be running properly in parallel: issue: #20338.
|
// These tests don't seem to be running properly in parallel: issue: #20338.
|
||||||
//
|
//
|
||||||
var _ = framework.KubeDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", func() {
|
var _ = framework.KubeDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", func() {
|
||||||
var rc *ResourceConsumer
|
var rc *common.ResourceConsumer
|
||||||
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
||||||
|
|
||||||
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
|
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
|
||||||
|
@ -45,20 +37,20 @@ var _ = framework.KubeDescribe("[HPA] Horizontal pod autoscaling (scale resource
|
||||||
framework.KubeDescribe("[Serial] [Slow] Deployment", func() {
|
framework.KubeDescribe("[Serial] [Slow] Deployment", func() {
|
||||||
// CPU tests via deployments
|
// CPU tests via deployments
|
||||||
It(titleUp, func() {
|
It(titleUp, func() {
|
||||||
scaleUp("test-deployment", kindDeployment, false, rc, f)
|
scaleUp("test-deployment", common.KindDeployment, false, rc, f)
|
||||||
})
|
})
|
||||||
It(titleDown, func() {
|
It(titleDown, func() {
|
||||||
scaleDown("test-deployment", kindDeployment, false, rc, f)
|
scaleDown("test-deployment", common.KindDeployment, false, rc, f)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
framework.KubeDescribe("[Serial] [Slow] ReplicaSet", func() {
|
framework.KubeDescribe("[Serial] [Slow] ReplicaSet", func() {
|
||||||
// CPU tests via deployments
|
// CPU tests via deployments
|
||||||
It(titleUp, func() {
|
It(titleUp, func() {
|
||||||
scaleUp("rs", kindReplicaSet, false, rc, f)
|
scaleUp("rs", common.KindReplicaSet, false, rc, f)
|
||||||
})
|
})
|
||||||
It(titleDown, func() {
|
It(titleDown, func() {
|
||||||
scaleDown("rs", kindReplicaSet, false, rc, f)
|
scaleDown("rs", common.KindReplicaSet, false, rc, f)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -66,10 +58,10 @@ var _ = framework.KubeDescribe("[HPA] Horizontal pod autoscaling (scale resource
|
||||||
framework.KubeDescribe("[Serial] [Slow] ReplicationController", func() {
|
framework.KubeDescribe("[Serial] [Slow] ReplicationController", func() {
|
||||||
// CPU tests via replication controllers
|
// CPU tests via replication controllers
|
||||||
It(titleUp+" and verify decision stability", func() {
|
It(titleUp+" and verify decision stability", func() {
|
||||||
scaleUp("rc", kindRC, true, rc, f)
|
scaleUp("rc", common.KindRC, true, rc, f)
|
||||||
})
|
})
|
||||||
It(titleDown+" and verify decision stability", func() {
|
It(titleDown+" and verify decision stability", func() {
|
||||||
scaleDown("rc", kindRC, true, rc, f)
|
scaleDown("rc", common.KindRC, true, rc, f)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -84,7 +76,7 @@ var _ = framework.KubeDescribe("[HPA] Horizontal pod autoscaling (scale resource
|
||||||
maxPods: 2,
|
maxPods: 2,
|
||||||
firstScale: 2,
|
firstScale: 2,
|
||||||
}
|
}
|
||||||
scaleTest.run("rc-light", kindRC, rc, f)
|
scaleTest.run("rc-light", common.KindRC, rc, f)
|
||||||
})
|
})
|
||||||
It("Should scale from 2 pods to 1 pod", func() {
|
It("Should scale from 2 pods to 1 pod", func() {
|
||||||
scaleTest := &HPAScaleTest{
|
scaleTest := &HPAScaleTest{
|
||||||
|
@ -96,7 +88,7 @@ var _ = framework.KubeDescribe("[HPA] Horizontal pod autoscaling (scale resource
|
||||||
maxPods: 2,
|
maxPods: 2,
|
||||||
firstScale: 1,
|
firstScale: 1,
|
||||||
}
|
}
|
||||||
scaleTest.run("rc-light", kindRC, rc, f)
|
scaleTest.run("rc-light", common.KindRC, rc, f)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -121,10 +113,10 @@ type HPAScaleTest struct {
|
||||||
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
|
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
|
||||||
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
|
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
|
||||||
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
||||||
func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *framework.Framework) {
|
func (scaleTest *HPAScaleTest) run(name, kind string, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||||
rc = NewDynamicResourceConsumer(name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f)
|
rc = common.NewDynamicResourceConsumer(name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f)
|
||||||
defer rc.CleanUp()
|
defer rc.CleanUp()
|
||||||
createCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||||
rc.WaitForReplicas(int(scaleTest.firstScale))
|
rc.WaitForReplicas(int(scaleTest.firstScale))
|
||||||
if scaleTest.firstScaleStasis > 0 {
|
if scaleTest.firstScaleStasis > 0 {
|
||||||
rc.EnsureDesiredReplicas(int(scaleTest.firstScale), scaleTest.firstScaleStasis)
|
rc.EnsureDesiredReplicas(int(scaleTest.firstScale), scaleTest.firstScaleStasis)
|
||||||
|
@ -135,7 +127,7 @@ func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *f
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func scaleUp(name, kind string, checkStability bool, rc *ResourceConsumer, f *framework.Framework) {
|
func scaleUp(name, kind string, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||||
stasis := 0 * time.Minute
|
stasis := 0 * time.Minute
|
||||||
if checkStability {
|
if checkStability {
|
||||||
stasis = 10 * time.Minute
|
stasis = 10 * time.Minute
|
||||||
|
@ -155,7 +147,7 @@ func scaleUp(name, kind string, checkStability bool, rc *ResourceConsumer, f *fr
|
||||||
scaleTest.run(name, kind, rc, f)
|
scaleTest.run(name, kind, rc, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
func scaleDown(name, kind string, checkStability bool, rc *ResourceConsumer, f *framework.Framework) {
|
func scaleDown(name, kind string, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||||
stasis := 0 * time.Minute
|
stasis := 0 * time.Minute
|
||||||
if checkStability {
|
if checkStability {
|
||||||
stasis = 10 * time.Minute
|
stasis = 10 * time.Minute
|
||||||
|
@ -174,23 +166,3 @@ func scaleDown(name, kind string, checkStability bool, rc *ResourceConsumer, f *
|
||||||
}
|
}
|
||||||
scaleTest.run(name, kind, rc, f)
|
scaleTest.run(name, kind, rc, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) {
|
|
||||||
hpa := &autoscaling.HorizontalPodAutoscaler{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: rc.name,
|
|
||||||
Namespace: rc.framework.Namespace.Name,
|
|
||||||
},
|
|
||||||
Spec: autoscaling.HorizontalPodAutoscalerSpec{
|
|
||||||
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
|
|
||||||
Kind: rc.kind,
|
|
||||||
Name: rc.name,
|
|
||||||
},
|
|
||||||
MinReplicas: &minReplicas,
|
|
||||||
MaxReplicas: maxRepl,
|
|
||||||
TargetCPUUtilizationPercentage: &cpu,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, errHPA := rc.framework.ClientSet.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
|
|
||||||
framework.ExpectNoError(errHPA)
|
|
||||||
}
|
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -39,13 +40,13 @@ var _ = framework.KubeDescribe("Initial Resources [Feature:InitialResources] [Fl
|
||||||
cpu := 100
|
cpu := 100
|
||||||
mem := 200
|
mem := 200
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
rc := NewStaticResourceConsumer(fmt.Sprintf("ir-%d", i), 1, cpu, mem, 0, int64(2*cpu), int64(2*mem), f)
|
rc := common.NewStaticResourceConsumer(fmt.Sprintf("ir-%d", i), 1, cpu, mem, 0, int64(2*cpu), int64(2*mem), f)
|
||||||
defer rc.CleanUp()
|
defer rc.CleanUp()
|
||||||
}
|
}
|
||||||
// Wait some time to make sure usage data is gathered.
|
// Wait some time to make sure usage data is gathered.
|
||||||
time.Sleep(10 * time.Minute)
|
time.Sleep(10 * time.Minute)
|
||||||
|
|
||||||
pod := runPod(f, "ir-test", resourceConsumerImage)
|
pod := runPod(f, "ir-test", common.GetResourceConsumerImage())
|
||||||
r := pod.Spec.Containers[0].Resources.Requests
|
r := pod.Spec.Containers[0].Resources.Requests
|
||||||
Expect(r.Cpu().MilliValue()).Should(BeNumerically("~", cpu, 10))
|
Expect(r.Cpu().MilliValue()).Should(BeNumerically("~", cpu, 10))
|
||||||
Expect(r.Memory().Value()).Should(BeNumerically("~", mem*1024*1024, 20*1024*1024))
|
Expect(r.Memory().Value()).Should(BeNumerically("~", mem*1024*1024, 20*1024*1024))
|
||||||
|
|
|
@ -12,6 +12,7 @@ go_library(
|
||||||
srcs = [
|
srcs = [
|
||||||
"configmaps.go",
|
"configmaps.go",
|
||||||
"deployments.go",
|
"deployments.go",
|
||||||
|
"horizontal_pod_autoscalers.go",
|
||||||
"secrets.go",
|
"secrets.go",
|
||||||
"services.go",
|
"services.go",
|
||||||
"upgrade.go",
|
"upgrade.go",
|
||||||
|
@ -21,6 +22,7 @@ go_library(
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/controller/deployment/util:go_default_library",
|
"//pkg/controller/deployment/util:go_default_library",
|
||||||
|
"//test/e2e/common:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//vendor:github.com/onsi/ginkgo",
|
"//vendor:github.com/onsi/ginkgo",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
|
|
|
@ -0,0 +1,88 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package upgrades
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HPAUpgradeTest tests that HPA rescales target resource correctly before and after a cluster upgrade.
|
||||||
|
type HPAUpgradeTest struct {
|
||||||
|
rc *common.ResourceConsumer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a resource consumer and an HPA object that autoscales the consumer.
|
||||||
|
func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
|
||||||
|
t.rc = common.NewDynamicResourceConsumer(
|
||||||
|
"resource-consumer-upgrade-test",
|
||||||
|
common.KindRC,
|
||||||
|
1, /* replicas */
|
||||||
|
250, /* initCPUTotal */
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
500, /* cpuLimit */
|
||||||
|
200, /* memLimit */
|
||||||
|
f)
|
||||||
|
common.CreateCPUHorizontalPodAutoscaler(
|
||||||
|
t.rc,
|
||||||
|
20, /* targetCPUUtilizationPercent */
|
||||||
|
1, /* minPods */
|
||||||
|
5) /* maxPods */
|
||||||
|
|
||||||
|
t.rc.Pause()
|
||||||
|
t.test()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test waits for upgrade to complete and verifies if HPA works correctly.
|
||||||
|
func (t *HPAUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
|
||||||
|
// Block until upgrade is done
|
||||||
|
By(fmt.Sprintf("Waiting for upgrade to finish before checking HPA"))
|
||||||
|
<-done
|
||||||
|
t.test()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Teardown cleans up any remaining resources.
|
||||||
|
func (t *HPAUpgradeTest) Teardown(f *framework.Framework) {
|
||||||
|
// rely on the namespace deletion to clean up everything
|
||||||
|
t.rc.CleanUp()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *HPAUpgradeTest) test() {
|
||||||
|
t.rc.Resume()
|
||||||
|
|
||||||
|
By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1."))
|
||||||
|
t.rc.ConsumeCPU(10) /* millicores */
|
||||||
|
By(fmt.Sprintf("HPA waits for 1 replica"))
|
||||||
|
t.rc.WaitForReplicas(1)
|
||||||
|
|
||||||
|
By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores."))
|
||||||
|
t.rc.ConsumeCPU(250) /* millicores */
|
||||||
|
By(fmt.Sprintf("HPA waits for 3 replicas"))
|
||||||
|
t.rc.WaitForReplicas(3)
|
||||||
|
|
||||||
|
By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5."))
|
||||||
|
t.rc.ConsumeCPU(700) /* millicores */
|
||||||
|
By(fmt.Sprintf("HPA waits for 5 replicas"))
|
||||||
|
t.rc.WaitForReplicas(5)
|
||||||
|
|
||||||
|
// We need to pause background goroutines as during upgrade master is unavailable and requests issued by them fail.
|
||||||
|
t.rc.Pause()
|
||||||
|
}
|
Loading…
Reference in New Issue