mirror of https://github.com/k3s-io/k3s
Fix golint failures of test/e2e/autoscaling
parent
81a61ae0e3
commit
c101f40e18
|
@ -590,7 +590,6 @@ staging/src/k8s.io/sample-apiserver/pkg/apis/wardle
|
|||
staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1
|
||||
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer
|
||||
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
|
||||
test/e2e/autoscaling
|
||||
test/e2e/chaosmonkey
|
||||
test/e2e/common
|
||||
test/e2e/lifecycle/bootstrap
|
||||
|
|
|
@ -25,15 +25,15 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling", func() {
|
||||
f := framework.NewDefaultFramework("autoscaling")
|
||||
|
||||
SIGDescribe("Autoscaling a service", func() {
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -41,12 +41,12 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
|||
}
|
||||
})
|
||||
|
||||
Context("from 1 pod and 3 nodes to 8 pods and >=4 nodes", func() {
|
||||
ginkgo.Context("from 1 pod and 3 nodes to 8 pods and >=4 nodes", func() {
|
||||
const nodesNum = 3 // Expect there to be 3 nodes before and after the test.
|
||||
var nodeGroupName string // Set by BeforeEach, used by AfterEach to scale this node group down after the test.
|
||||
var nodes *v1.NodeList // Set by BeforeEach, used by Measure to calculate CPU request based on node's sizes.
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
// Make sure there is only 1 node group, otherwise this test becomes useless.
|
||||
nodeGroups := strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",")
|
||||
if len(nodeGroups) != 1 {
|
||||
|
@ -64,10 +64,10 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
|||
// Make sure all nodes are schedulable, otherwise we are in some kind of a problem state.
|
||||
nodes = framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
schedulableCount := len(nodes.Items)
|
||||
Expect(schedulableCount).To(Equal(nodeGroupSize), "not all nodes are schedulable")
|
||||
gomega.Expect(schedulableCount).To(gomega.Equal(nodeGroupSize), "not all nodes are schedulable")
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
// Attempt cleanup only if a node group was targeted for scale up.
|
||||
// Otherwise the test was probably skipped and we'll get a gcloud error due to invalid parameters.
|
||||
if len(nodeGroupName) > 0 {
|
||||
|
@ -77,7 +77,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
|||
}
|
||||
})
|
||||
|
||||
Measure("takes less than 15 minutes", func(b Benchmarker) {
|
||||
ginkgo.Measure("takes less than 15 minutes", func(b ginkgo.Benchmarker) {
|
||||
// Measured over multiple samples, scaling takes 10 +/- 2 minutes, so 15 minutes should be fully sufficient.
|
||||
const timeToWait = 15 * time.Minute
|
||||
|
||||
|
@ -85,8 +85,8 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
|||
// This test expects that 8 pods will not fit in 'nodesNum' nodes, but will fit in >='nodesNum'+1 nodes.
|
||||
// Make it so that 'nodesNum' pods fit perfectly per node.
|
||||
nodeCpus := nodes.Items[0].Status.Allocatable[v1.ResourceCPU]
|
||||
nodeCpuMillis := (&nodeCpus).MilliValue()
|
||||
cpuRequestMillis := int64(nodeCpuMillis / nodesNum)
|
||||
nodeCPUMillis := (&nodeCpus).MilliValue()
|
||||
cpuRequestMillis := int64(nodeCPUMillis / nodesNum)
|
||||
|
||||
// Start the service we want to scale and wait for it to be up and running.
|
||||
nodeMemoryBytes := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
|
||||
|
@ -99,10 +99,10 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
|||
|
||||
// Enable Horizontal Pod Autoscaler with 50% target utilization and
|
||||
// scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied.
|
||||
targetCpuUtilizationPercent := int32(50)
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCpuUtilizationPercent, 1, 10)
|
||||
targetCPUUtilizationPercent := int32(50)
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10)
|
||||
defer common.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name)
|
||||
cpuLoad := 8 * cpuRequestMillis * int64(targetCpuUtilizationPercent) / 100 // 8 pods utilized to the target level
|
||||
cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level
|
||||
resourceConsumer.ConsumeCPU(int(cpuLoad))
|
||||
|
||||
// Measure the time it takes for the service to scale to 8 pods with 50% CPU utilization each.
|
||||
|
|
|
@ -33,8 +33,8 @@ import (
|
|||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
|
@ -65,7 +65,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||
var originalSizes map[string]int
|
||||
var sum int
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke", "kubemark")
|
||||
|
||||
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
|
||||
|
@ -81,7 +81,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
size, err := framework.GroupSize(mig)
|
||||
framework.ExpectNoError(err)
|
||||
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
ginkgo.By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
originalSizes[mig] = size
|
||||
sum += size
|
||||
}
|
||||
|
@ -91,13 +91,13 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
nodeCount = len(nodes.Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
|
||||
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
|
||||
mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
|
||||
coresPerNode = int((&cpu).MilliValue() / 1000)
|
||||
memCapacityMb = int((&mem).Value() / 1024 / 1024)
|
||||
|
||||
Expect(nodeCount).Should(Equal(sum))
|
||||
gomega.Expect(nodeCount).Should(gomega.Equal(sum))
|
||||
|
||||
if framework.ProviderIs("gke") {
|
||||
val, err := isAutoscalerEnabled(3)
|
||||
|
@ -109,8 +109,8 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By(fmt.Sprintf("Restoring initial size of the cluster"))
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
|
||||
setMigSizes(originalSizes)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
|
@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||
klog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
|
||||
})
|
||||
|
||||
It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func() {
|
||||
ginkgo.It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func() {
|
||||
perNodeReservation := int(float64(memCapacityMb) * 0.95)
|
||||
replicasPerNode := 10
|
||||
|
||||
|
@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||
defer testCleanup()
|
||||
})
|
||||
|
||||
It("should scale up twice [Feature:ClusterAutoscalerScalability2]", func() {
|
||||
ginkgo.It("should scale up twice [Feature:ClusterAutoscalerScalability2]", func() {
|
||||
perNodeReservation := int(float64(memCapacityMb) * 0.95)
|
||||
replicasPerNode := 10
|
||||
additionalNodes1 := int(math.Ceil(0.7 * maxNodes))
|
||||
|
@ -204,7 +204,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||
klog.Infof("Scaled up twice")
|
||||
})
|
||||
|
||||
It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func() {
|
||||
ginkgo.It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func() {
|
||||
perNodeReservation := int(float64(memCapacityMb) * 0.7)
|
||||
replicas := int(math.Ceil(maxNodes * 0.7))
|
||||
totalNodes := maxNodes
|
||||
|
@ -232,7 +232,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||
}, scaleDownTimeout))
|
||||
})
|
||||
|
||||
It("should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]", func() {
|
||||
ginkgo.It("should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]", func() {
|
||||
perPodReservation := int(float64(memCapacityMb) * 0.01)
|
||||
// underutilizedNodes are 10% full
|
||||
underutilizedPerNodeReplicas := 10
|
||||
|
@ -291,7 +291,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||
}, timeout))
|
||||
})
|
||||
|
||||
It("shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]", func() {
|
||||
ginkgo.It("shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]", func() {
|
||||
fullReservation := int(float64(memCapacityMb) * 0.9)
|
||||
hostPortPodReservation := int(float64(memCapacityMb) * 0.3)
|
||||
totalNodes := maxNodes
|
||||
|
@ -307,28 +307,28 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||
fullNodesCount := divider
|
||||
underutilizedNodesCount := totalNodes - fullNodesCount
|
||||
|
||||
By("Reserving full nodes")
|
||||
ginkgo.By("Reserving full nodes")
|
||||
// run RC1 w/o host port
|
||||
cleanup := ReserveMemory(f, "filling-pod", fullNodesCount, fullNodesCount*fullReservation, true, largeScaleUpTimeout*2)
|
||||
defer cleanup()
|
||||
|
||||
By("Reserving host ports on remaining nodes")
|
||||
ginkgo.By("Reserving host ports on remaining nodes")
|
||||
// run RC2 w/ host port
|
||||
cleanup2 := createHostPortPodsWithMemory(f, "underutilizing-host-port-pod", underutilizedNodesCount, reservedPort, underutilizedNodesCount*hostPortPodReservation, largeScaleUpTimeout)
|
||||
defer cleanup2()
|
||||
|
||||
waitForAllCaPodsReadyInNamespace(f, c)
|
||||
// wait and check scale down doesn't occur
|
||||
By(fmt.Sprintf("Sleeping %v minutes...", scaleDownTimeout.Minutes()))
|
||||
ginkgo.By(fmt.Sprintf("Sleeping %v minutes...", scaleDownTimeout.Minutes()))
|
||||
time.Sleep(scaleDownTimeout)
|
||||
|
||||
By("Checking if the number of nodes is as expected")
|
||||
ginkgo.By("Checking if the number of nodes is as expected")
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
|
||||
Expect(len(nodes.Items)).Should(Equal(totalNodes))
|
||||
gomega.Expect(len(nodes.Items)).Should(gomega.Equal(totalNodes))
|
||||
})
|
||||
|
||||
Specify("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func() {
|
||||
ginkgo.Specify("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func() {
|
||||
// Start a number of pods saturating existing nodes.
|
||||
perNodeReservation := int(float64(memCapacityMb) * 0.80)
|
||||
replicasPerNode := 10
|
||||
|
@ -348,7 +348,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)
|
||||
|
||||
// Ensure that no new nodes have been added so far.
|
||||
Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(Equal(nodeCount))
|
||||
gomega.Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(gomega.Equal(nodeCount))
|
||||
|
||||
// Start a number of schedulable pods to ensure CA reacts.
|
||||
additionalNodes := maxNodes - nodeCount
|
||||
|
@ -375,7 +375,7 @@ func anyKey(input map[string]int) string {
|
|||
func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestConfig, tolerateMissingNodeCount int, tolerateMissingPodCount int) func() error {
|
||||
// resize cluster to start size
|
||||
// run rc based on config
|
||||
By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name))
|
||||
ginkgo.By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name))
|
||||
start := time.Now()
|
||||
framework.ExpectNoError(framework.RunRC(*config.extraPods))
|
||||
// check results
|
||||
|
@ -461,7 +461,7 @@ func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) e
|
|||
}
|
||||
|
||||
func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, port, megabytes int, timeout time.Duration) func() error {
|
||||
By(fmt.Sprintf("Running RC which reserves host port and memory"))
|
||||
ginkgo.By(fmt.Sprintf("Running RC which reserves host port and memory"))
|
||||
request := int64(1024 * 1024 * megabytes / replicas)
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
|
|
|
@ -48,8 +48,8 @@ import (
|
|||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
|
@ -94,7 +94,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
var memAllocatableMb int
|
||||
var originalSizes map[string]int
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
|
@ -103,7 +103,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
size, err := framework.GroupSize(mig)
|
||||
framework.ExpectNoError(err)
|
||||
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
ginkgo.By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
originalSizes[mig] = size
|
||||
sum += size
|
||||
}
|
||||
|
@ -117,12 +117,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
quantity := node.Status.Allocatable[v1.ResourceCPU]
|
||||
coreCount += quantity.Value()
|
||||
}
|
||||
By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
ginkgo.By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
|
||||
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
|
||||
mem := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
|
||||
memAllocatableMb = int((&mem).Value() / 1024 / 1024)
|
||||
|
||||
Expect(nodeCount).Should(Equal(sum))
|
||||
gomega.Expect(nodeCount).Should(gomega.Equal(sum))
|
||||
|
||||
if framework.ProviderIs("gke") {
|
||||
val, err := isAutoscalerEnabled(5)
|
||||
|
@ -134,9 +134,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
By(fmt.Sprintf("Restoring initial size of the cluster"))
|
||||
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
|
||||
setMigSizes(originalSizes)
|
||||
expectedNodes := 0
|
||||
for _, size := range originalSizes {
|
||||
|
@ -163,29 +163,29 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
klog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
|
||||
})
|
||||
|
||||
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
By("Creating unschedulable pod")
|
||||
ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.By("Creating unschedulable pod")
|
||||
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
By("Waiting for scale up hoping it won't happen")
|
||||
ginkgo.By("Waiting for scale up hoping it won't happen")
|
||||
// Verify that the appropriate event was generated
|
||||
eventFound := false
|
||||
EventsLoop:
|
||||
for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
|
||||
By("Waiting for NotTriggerScaleUp event")
|
||||
ginkgo.By("Waiting for NotTriggerScaleUp event")
|
||||
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
for _, e := range events.Items {
|
||||
if e.InvolvedObject.Kind == "Pod" && e.Reason == "NotTriggerScaleUp" && strings.Contains(e.Message, "it wouldn't fit if a new node is added") {
|
||||
By("NotTriggerScaleUp event found")
|
||||
ginkgo.By("NotTriggerScaleUp event found")
|
||||
eventFound = true
|
||||
break EventsLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
Expect(eventFound).Should(Equal(true))
|
||||
gomega.Expect(eventFound).Should(gomega.Equal(true))
|
||||
// Verify that cluster size is not changed
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size <= nodeCount }, time.Second))
|
||||
|
@ -201,12 +201,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
}
|
||||
|
||||
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
ginkgo.It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
func() { simpleScaleUpTest(0) })
|
||||
|
||||
gpuType := os.Getenv("TESTED_GPU_TYPE")
|
||||
|
||||
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
if gpuType == "" {
|
||||
framework.Failf("TEST_GPU_TYPE not defined")
|
||||
|
@ -219,21 +219,21 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Enable autoscaler")
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0))
|
||||
|
||||
By("Schedule a pod which requires GPU")
|
||||
ginkgo.By("Schedule a pod which requires GPU")
|
||||
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(1))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
if gpuType == "" {
|
||||
framework.Failf("TEST_GPU_TYPE not defined")
|
||||
|
@ -246,24 +246,24 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Schedule a single pod which requires GPU")
|
||||
ginkgo.By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
By("Enable autoscaler")
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 2)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(1))
|
||||
|
||||
By("Scale GPU deployment")
|
||||
ginkgo.By("Scale GPU deployment")
|
||||
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(2))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
if gpuType == "" {
|
||||
framework.Failf("TEST_GPU_TYPE not defined")
|
||||
|
@ -276,12 +276,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Enable autoscaler")
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0))
|
||||
|
||||
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
|
||||
ginkgo.By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
// Verify that cluster size is increased
|
||||
|
@ -289,10 +289,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
||||
|
||||
// Expect gpu pool to stay intact
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
if gpuType == "" {
|
||||
framework.Failf("TEST_GPU_TYPE not defined")
|
||||
|
@ -305,29 +305,29 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Schedule a single pod which requires GPU")
|
||||
ginkgo.By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
By("Enable autoscaler")
|
||||
ginkgo.By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(1))
|
||||
|
||||
By("Remove the only POD requiring GPU")
|
||||
ginkgo.By("Remove the only POD requiring GPU")
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
gomega.Expect(len(getPoolNodes(f, gpuPoolName))).Should(gomega.Equal(0))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
ginkgo.It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
func() {
|
||||
framework.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleUpTest(1) })
|
||||
})
|
||||
|
||||
It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
// Wait for the situation to stabilize - CA should be running and have up-to-date node readiness info.
|
||||
status, err := waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
|
||||
return s.ready == s.target && s.ready <= nodeCount
|
||||
|
@ -336,7 +336,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
|
||||
unmanagedNodes := nodeCount - status.ready
|
||||
|
||||
By("Schedule more pods than can fit and wait for cluster to scale-up")
|
||||
ginkgo.By("Schedule more pods than can fit and wait for cluster to scale-up")
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
|
@ -347,7 +347,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
target := status.target
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("Expect no more scale-up to be happening after all pods are scheduled")
|
||||
ginkgo.By("Expect no more scale-up to be happening after all pods are scheduled")
|
||||
|
||||
// wait for a while until scale-up finishes; we cannot read CA status immediately
|
||||
// after pods are scheduled as status config map is updated by CA once every loop iteration
|
||||
|
@ -359,16 +359,16 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
if status.target != target {
|
||||
klog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target)
|
||||
}
|
||||
Expect(status.timestamp.Add(freshStatusLimit).Before(time.Now())).Should(Equal(false))
|
||||
Expect(status.status).Should(Equal(caNoScaleUpStatus))
|
||||
Expect(status.ready).Should(Equal(status.target))
|
||||
Expect(len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items)).Should(Equal(status.target + unmanagedNodes))
|
||||
gomega.Expect(status.timestamp.Add(freshStatusLimit).Before(time.Now())).Should(gomega.Equal(false))
|
||||
gomega.Expect(status.status).Should(gomega.Equal(caNoScaleUpStatus))
|
||||
gomega.Expect(status.ready).Should(gomega.Equal(status.target))
|
||||
gomega.Expect(len(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items)).Should(gomega.Equal(status.target + unmanagedNodes))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
By("Creating new node-pool with n1-standard-4 machines")
|
||||
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
|
@ -379,16 +379,16 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
|
||||
klog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
|
||||
|
||||
By("Getting memory available on new nodes, so we can account for it when creating RC")
|
||||
ginkgo.By("Getting memory available on new nodes, so we can account for it when creating RC")
|
||||
nodes := getPoolNodes(f, extraPoolName)
|
||||
Expect(len(nodes)).Should(Equal(extraNodes))
|
||||
gomega.Expect(len(nodes)).Should(gomega.Equal(extraNodes))
|
||||
extraMemMb := 0
|
||||
for _, node := range nodes {
|
||||
mem := node.Status.Allocatable[v1.ResourceMemory]
|
||||
extraMemMb += int((&mem).Value() / 1024 / 1024)
|
||||
}
|
||||
|
||||
By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
|
||||
ginkgo.By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
|
||||
totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout)
|
||||
|
@ -399,10 +399,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
})
|
||||
|
||||
It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
By("Creating new node-pool with n1-standard-4 machines")
|
||||
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
|
@ -412,7 +412,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port")
|
||||
|
||||
|
@ -421,18 +421,18 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
pods := nodeCount
|
||||
newPods := 2
|
||||
labels := map[string]string{
|
||||
"anti-affinity": "yes",
|
||||
}
|
||||
By("starting a pod with anti-affinity on each node")
|
||||
ginkgo.By("starting a pod with anti-affinity on each node")
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("scheduling extra pods with anti-affinity to existing ones")
|
||||
ginkgo.By("scheduling extra pods with anti-affinity to existing ones")
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
|
||||
|
||||
|
@ -440,8 +440,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
By("creating pods")
|
||||
ginkgo.It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.By("creating pods")
|
||||
pods := nodeCount
|
||||
newPods := 1
|
||||
labels := map[string]string{
|
||||
|
@ -450,10 +450,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
|
||||
|
||||
By("waiting for all pods before triggering scale up")
|
||||
ginkgo.By("waiting for all pods before triggering scale up")
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("creating a pod requesting EmptyDir")
|
||||
ginkgo.By("creating a pod requesting EmptyDir")
|
||||
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
|
||||
|
||||
|
@ -461,7 +461,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
volumeLabels := labels.Set{
|
||||
|
@ -469,7 +469,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
}
|
||||
selector := metav1.SetAsLabelSelector(volumeLabels)
|
||||
|
||||
By("creating volume & pvc")
|
||||
ginkgo.By("creating volume & pvc")
|
||||
diskName, err := framework.CreatePDWithRetry()
|
||||
framework.ExpectNoError(err)
|
||||
pvConfig := framework.PersistentVolumeConfig{
|
||||
|
@ -505,7 +505,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
}
|
||||
}()
|
||||
|
||||
By("creating pods")
|
||||
ginkgo.By("creating pods")
|
||||
pods := nodeCount
|
||||
labels := map[string]string{
|
||||
"anti-affinity": "yes",
|
||||
|
@ -516,10 +516,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
klog.Infof("RC and pods not using volume deleted")
|
||||
}()
|
||||
|
||||
By("waiting for all pods before triggering scale up")
|
||||
ginkgo.By("waiting for all pods before triggering scale up")
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("creating a pod requesting PVC")
|
||||
ginkgo.By("creating a pod requesting PVC")
|
||||
pvcPodName := "pvc-pod"
|
||||
newPods := 1
|
||||
volumes := buildVolumes(pv, pvc)
|
||||
|
@ -533,11 +533,11 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
||||
})
|
||||
|
||||
It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
labelKey := "cluster-autoscaling-test.special-node"
|
||||
labelValue := "true"
|
||||
|
||||
By("Finding the smallest MIG")
|
||||
ginkgo.By("Finding the smallest MIG")
|
||||
minMig := ""
|
||||
minSize := nodeCount
|
||||
for mig, size := range originalSizes {
|
||||
|
@ -557,7 +557,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
}
|
||||
|
||||
removeLabels := func(nodesToClean sets.String) {
|
||||
By("Removing labels from nodes")
|
||||
ginkgo.By("Removing labels from nodes")
|
||||
for node := range nodesToClean {
|
||||
framework.RemoveLabelOffNode(c, node, labelKey)
|
||||
}
|
||||
|
@ -567,7 +567,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(err)
|
||||
nodesSet := sets.NewString(nodes...)
|
||||
defer removeLabels(nodesSet)
|
||||
By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
|
||||
ginkgo.By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
|
||||
|
||||
for node := range nodesSet {
|
||||
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
|
||||
|
@ -575,7 +575,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
|
||||
scheduling.CreateNodeSelectorPods(f, "node-selector", minSize+1, map[string]string{labelKey: labelValue}, false)
|
||||
|
||||
By("Waiting for new node to appear and annotating it")
|
||||
ginkgo.By("Waiting for new node to appear and annotating it")
|
||||
framework.WaitForGroupSize(minMig, int32(minSize+1))
|
||||
// Verify that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
|
@ -586,7 +586,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
newNodesSet := sets.NewString(newNodes...)
|
||||
newNodesSet.Delete(nodes...)
|
||||
if len(newNodesSet) > 1 {
|
||||
By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
|
||||
ginkgo.By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
|
||||
klog.Infof("Usually only 1 new node is expected, investigating")
|
||||
klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
|
||||
if output, err := exec.Command("gcloud", "compute", "instances", "list",
|
||||
|
@ -612,7 +612,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
// However at this moment we DO WANT it to crash so that we don't check all test runs for the
|
||||
// rare behavior, but only the broken ones.
|
||||
}
|
||||
By(fmt.Sprintf("New nodes: %v\n", newNodesSet))
|
||||
ginkgo.By(fmt.Sprintf("New nodes: %v\n", newNodesSet))
|
||||
registeredNodes := sets.NewString()
|
||||
for nodeName := range newNodesSet {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
|
@ -622,7 +622,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
klog.Errorf("Failed to get node %v: %v", nodeName, err)
|
||||
}
|
||||
}
|
||||
By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
|
||||
ginkgo.By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
|
||||
for node := range registeredNodes {
|
||||
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
|
||||
}
|
||||
|
@ -633,10 +633,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "node-selector"))
|
||||
})
|
||||
|
||||
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
By("Creating new node-pool with n1-standard-4 machines")
|
||||
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
|
@ -647,7 +647,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
|
||||
extraPods := extraNodes + 1
|
||||
totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb))
|
||||
By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
|
||||
ginkgo.By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout)
|
||||
|
||||
|
@ -663,7 +663,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
defer cleanup()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Manually increase cluster size")
|
||||
ginkgo.By("Manually increase cluster size")
|
||||
increasedSize := 0
|
||||
newSizes := make(map[string]int)
|
||||
for key, val := range originalSizes {
|
||||
|
@ -674,20 +674,20 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
|
||||
func(size int) bool { return size >= increasedSize }, manualResizeTimeout, unready))
|
||||
|
||||
By("Some node should be removed")
|
||||
ginkgo.By("Some node should be removed")
|
||||
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
|
||||
func(size int) bool { return size < increasedSize }, scaleDownTimeout, unready))
|
||||
}
|
||||
|
||||
It("should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]",
|
||||
ginkgo.It("should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]",
|
||||
func() { simpleScaleDownTest(0) })
|
||||
|
||||
It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]",
|
||||
ginkgo.It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]",
|
||||
func() {
|
||||
framework.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleDownTest(1) })
|
||||
})
|
||||
|
||||
It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
||||
|
@ -700,7 +700,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= increasedSize+extraNodes }, scaleUpTimeout))
|
||||
|
||||
By("Some node should be removed")
|
||||
ginkgo.By("Some node should be removed")
|
||||
// Apparently GKE master is restarted couple minutes after the node pool is added
|
||||
// reseting all the timers in scale down code. Adding 10 extra minutes to workaround
|
||||
// this issue.
|
||||
|
@ -709,44 +709,44 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
func(size int) bool { return size < increasedSize+extraNodes }, scaleDownTimeout+10*time.Minute))
|
||||
})
|
||||
|
||||
It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 1, func(increasedSize int) {
|
||||
By("Some node should be removed")
|
||||
ginkgo.By("Some node should be removed")
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
|
||||
})
|
||||
})
|
||||
|
||||
It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
runDrainTest(f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) {
|
||||
By("No nodes should be removed")
|
||||
ginkgo.By("No nodes should be removed")
|
||||
time.Sleep(scaleDownTimeout)
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodes.Items)).Should(Equal(increasedSize))
|
||||
gomega.Expect(len(nodes.Items)).Should(gomega.Equal(increasedSize))
|
||||
})
|
||||
})
|
||||
|
||||
It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
runDrainTest(f, originalSizes, f.Namespace.Name, 2, 1, func(increasedSize int) {
|
||||
By("Some node should be removed")
|
||||
ginkgo.By("Some node should be removed")
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
|
||||
})
|
||||
})
|
||||
|
||||
It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
runDrainTest(f, originalSizes, "kube-system", 2, 1, func(increasedSize int) {
|
||||
By("Some node should be removed")
|
||||
ginkgo.By("Some node should be removed")
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
|
||||
})
|
||||
})
|
||||
|
||||
It("Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
// Provider-specific setup
|
||||
if framework.ProviderIs("gke") {
|
||||
// GKE-specific setup
|
||||
By("Add a new node pool with 0 nodes and min size 0")
|
||||
ginkgo.By("Add a new node pool with 0 nodes and min size 0")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 0)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
|
@ -756,7 +756,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
// on GCE, run only if there are already at least 2 node groups
|
||||
framework.SkipUnlessAtLeast(len(originalSizes), 2, "At least 2 node groups are needed for scale-to-0 tests")
|
||||
|
||||
By("Manually scale smallest node group to 0")
|
||||
ginkgo.By("Manually scale smallest node group to 0")
|
||||
minMig := ""
|
||||
minSize := nodeCount
|
||||
for mig, size := range originalSizes {
|
||||
|
@ -769,7 +769,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize, resizeTimeout))
|
||||
}
|
||||
|
||||
By("Make remaining nodes unschedulable")
|
||||
ginkgo.By("Make remaining nodes unschedulable")
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
|
@ -785,7 +785,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
By("Run a scale-up test")
|
||||
ginkgo.By("Run a scale-up test")
|
||||
ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
|
@ -807,7 +807,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
// verify the targeted node pool/MIG is of size 0
|
||||
gkeScaleToZero := func() {
|
||||
// GKE-specific setup
|
||||
By("Add a new node pool with size 1 and min size 0")
|
||||
ginkgo.By("Add a new node pool with size 1 and min size 0")
|
||||
const extraPoolName = "extra-pool"
|
||||
addNodePool(extraPoolName, "n1-standard-4", 1)
|
||||
defer deleteNodePool(extraPoolName)
|
||||
|
@ -817,9 +817,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
defer disableAutoscaler(extraPoolName, 0, 1)
|
||||
|
||||
ngNodes := getPoolNodes(f, extraPoolName)
|
||||
Expect(len(ngNodes)).To(Equal(extraNodes))
|
||||
gomega.Expect(len(ngNodes)).To(gomega.Equal(extraNodes))
|
||||
for _, node := range ngNodes {
|
||||
By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
|
||||
}
|
||||
|
||||
for _, node := range ngNodes {
|
||||
|
@ -830,12 +830,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
|
||||
// GKE-specific check
|
||||
newSize := getPoolSize(f, extraPoolName)
|
||||
Expect(newSize).Should(Equal(0))
|
||||
gomega.Expect(newSize).Should(gomega.Equal(0))
|
||||
}
|
||||
|
||||
gceScaleToZero := func() {
|
||||
// non-GKE only
|
||||
By("Find smallest node group and manually scale it to a single node")
|
||||
ginkgo.By("Find smallest node group and manually scale it to a single node")
|
||||
minMig := ""
|
||||
minSize := nodeCount
|
||||
for mig, size := range originalSizes {
|
||||
|
@ -848,9 +848,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize+1, resizeTimeout))
|
||||
ngNodes, err := framework.GetGroupNodes(minMig)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(len(ngNodes) == 1).To(BeTrue())
|
||||
gomega.Expect(len(ngNodes) == 1).To(gomega.BeTrue())
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(ngNodes[0], metav1.GetOptions{})
|
||||
By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// this part is identical
|
||||
|
@ -861,10 +861,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
// non-GKE only
|
||||
newSize, err := framework.GroupSize(minMig)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(newSize).Should(Equal(0))
|
||||
gomega.Expect(newSize).Should(gomega.Equal(0))
|
||||
}
|
||||
|
||||
It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
if framework.ProviderIs("gke") { // In GKE, we can just add a node pool
|
||||
gkeScaleToZero()
|
||||
} else if len(originalSizes) >= 2 {
|
||||
|
@ -874,7 +874,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
}
|
||||
})
|
||||
|
||||
It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
clusterSize := nodeCount
|
||||
for clusterSize < unhealthyClusterThreshold+1 {
|
||||
clusterSize = manuallyIncreaseClusterSize(f, originalSizes)
|
||||
|
@ -893,13 +893,13 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
// making no assumptions about minimal node startup time.
|
||||
time.Sleep(2 * time.Minute)
|
||||
|
||||
By("Block network connectivity to some nodes to simulate unhealthy cluster")
|
||||
ginkgo.By("Block network connectivity to some nodes to simulate unhealthy cluster")
|
||||
nodesToBreakCount := int(math.Ceil(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
framework.ExpectNoError(err)
|
||||
Expect(nodesToBreakCount <= len(nodes.Items)).To(BeTrue())
|
||||
gomega.Expect(nodesToBreakCount <= len(nodes.Items)).To(gomega.BeTrue())
|
||||
nodesToBreak := nodes.Items[:nodesToBreakCount]
|
||||
|
||||
// TestUnderTemporaryNetworkFailure only removes connectivity to a single node,
|
||||
|
@ -917,11 +917,11 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
time.Sleep(scaleUpTimeout)
|
||||
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
e2elog.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
|
||||
Expect(len(currentNodes.Items)).Should(Equal(len(nodes.Items) - nodesToBreakCount))
|
||||
gomega.Expect(len(currentNodes.Items)).Should(gomega.Equal(len(nodes.Items) - nodesToBreakCount))
|
||||
status, err := getClusterwideStatus(c)
|
||||
e2elog.Logf("Clusterwide status: %v", status)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(status).Should(Equal("Unhealthy"))
|
||||
gomega.Expect(status).Should(gomega.Equal("Unhealthy"))
|
||||
}
|
||||
}
|
||||
testFunction()
|
||||
|
@ -929,19 +929,19 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(framework.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
|
||||
})
|
||||
|
||||
It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
defer createPriorityClasses(f)()
|
||||
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
|
||||
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
|
||||
defer cleanupFunc()
|
||||
By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
|
||||
ginkgo.By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
|
||||
time.Sleep(scaleUpTimeout)
|
||||
// Verify that cluster size is not changed
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount }, time.Second))
|
||||
})
|
||||
|
||||
It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
defer createPriorityClasses(f)()
|
||||
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
|
||||
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
|
||||
|
@ -951,7 +951,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
func(size int) bool { return size > nodeCount }, time.Second))
|
||||
})
|
||||
|
||||
It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
ginkgo.It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
defer createPriorityClasses(f)()
|
||||
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
||||
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
|
||||
|
@ -963,24 +963,24 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
func(size int) bool { return size == nodeCount }, time.Second))
|
||||
})
|
||||
|
||||
It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
defer createPriorityClasses(f)()
|
||||
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
||||
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
||||
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, expendablePriorityClassName)
|
||||
defer cleanupFunc()
|
||||
By("Waiting for scale down")
|
||||
ginkgo.By("Waiting for scale down")
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
|
||||
})
|
||||
|
||||
It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
ginkgo.It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
defer createPriorityClasses(f)()
|
||||
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
||||
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
||||
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", increasedSize, int(float64(increasedSize)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
|
||||
defer cleanupFunc()
|
||||
By(fmt.Sprintf("Waiting for scale down hoping it won't happen, sleep for %s", scaleDownTimeout.String()))
|
||||
ginkgo.By(fmt.Sprintf("Waiting for scale down hoping it won't happen, sleep for %s", scaleDownTimeout.String()))
|
||||
time.Sleep(scaleDownTimeout)
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == increasedSize }, time.Second))
|
||||
|
@ -988,7 +988,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
})
|
||||
|
||||
func installNvidiaDriversDaemonSet() {
|
||||
By("Add daemonset which installs nvidia drivers")
|
||||
ginkgo.By("Add daemonset which installs nvidia drivers")
|
||||
// the link differs from one in GKE documentation; discussed with @mindprince this one should be used
|
||||
framework.RunKubectlOrDie("apply", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml")
|
||||
}
|
||||
|
@ -1012,7 +1012,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
|
|||
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, "reschedulable-pods")
|
||||
|
||||
By("Create a PodDisruptionBudget")
|
||||
ginkgo.By("Create a PodDisruptionBudget")
|
||||
minAvailable := intstr.FromInt(numPods - pdbSize)
|
||||
pdb := &policy.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -1034,15 +1034,15 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
|
|||
verifyFunction(increasedSize)
|
||||
}
|
||||
|
||||
func getGkeApiEndpoint() string {
|
||||
gkeApiEndpoint := os.Getenv("CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER")
|
||||
if gkeApiEndpoint == "" {
|
||||
gkeApiEndpoint = "https://test-container.sandbox.googleapis.com"
|
||||
func getGkeAPIEndpoint() string {
|
||||
gkeAPIEndpoint := os.Getenv("CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER")
|
||||
if gkeAPIEndpoint == "" {
|
||||
gkeAPIEndpoint = "https://test-container.sandbox.googleapis.com"
|
||||
}
|
||||
if strings.HasSuffix(gkeApiEndpoint, "/") {
|
||||
gkeApiEndpoint = gkeApiEndpoint[:len(gkeApiEndpoint)-1]
|
||||
if strings.HasSuffix(gkeAPIEndpoint, "/") {
|
||||
gkeAPIEndpoint = gkeAPIEndpoint[:len(gkeAPIEndpoint)-1]
|
||||
}
|
||||
return gkeApiEndpoint
|
||||
return gkeAPIEndpoint
|
||||
}
|
||||
|
||||
func getGKEURL(apiVersion string, suffix string) string {
|
||||
|
@ -1051,7 +1051,7 @@ func getGKEURL(apiVersion string, suffix string) string {
|
|||
token := strings.Replace(string(out), "\n", "", -1)
|
||||
|
||||
return fmt.Sprintf("%s/%s/%s?access_token=%s",
|
||||
getGkeApiEndpoint(),
|
||||
getGkeAPIEndpoint(),
|
||||
apiVersion,
|
||||
suffix,
|
||||
token)
|
||||
|
@ -1064,12 +1064,11 @@ func getGKEClusterURL(apiVersion string) string {
|
|||
framework.TestContext.CloudConfig.ProjectID,
|
||||
framework.TestContext.CloudConfig.Region,
|
||||
framework.TestContext.CloudConfig.Cluster))
|
||||
} else {
|
||||
return getGKEURL(apiVersion, fmt.Sprintf("projects/%s/zones/%s/clusters/%s",
|
||||
framework.TestContext.CloudConfig.ProjectID,
|
||||
framework.TestContext.CloudConfig.Zone,
|
||||
framework.TestContext.CloudConfig.Cluster))
|
||||
}
|
||||
return getGKEURL(apiVersion, fmt.Sprintf("projects/%s/zones/%s/clusters/%s",
|
||||
framework.TestContext.CloudConfig.ProjectID,
|
||||
framework.TestContext.CloudConfig.Zone,
|
||||
framework.TestContext.CloudConfig.Cluster))
|
||||
}
|
||||
|
||||
func getCluster(apiVersion string) (string, error) {
|
||||
|
@ -1107,9 +1106,8 @@ func isAutoscalerEnabled(expectedMaxNodeCountInTargetPool int) (bool, error) {
|
|||
func getClusterLocation() string {
|
||||
if isRegionalCluster() {
|
||||
return "--region=" + framework.TestContext.CloudConfig.Region
|
||||
} else {
|
||||
return "--zone=" + framework.TestContext.CloudConfig.Zone
|
||||
}
|
||||
return "--zone=" + framework.TestContext.CloudConfig.Zone
|
||||
}
|
||||
|
||||
func getGcloudCommandFromTrack(commandTrack string, args []string) []string {
|
||||
|
@ -1248,7 +1246,7 @@ func getPoolInitialSize(poolName string) int {
|
|||
klog.Infof("Node-pool initial size: %s", output)
|
||||
framework.ExpectNoError(err, string(output))
|
||||
fields := strings.Fields(string(output))
|
||||
Expect(len(fields)).Should(Equal(1))
|
||||
gomega.Expect(len(fields)).Should(gomega.Equal(1))
|
||||
size, err := strconv.ParseInt(fields[0], 10, 64)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
|
@ -1274,7 +1272,7 @@ func getPoolSize(f *framework.Framework, poolName string) int {
|
|||
}
|
||||
|
||||
func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration, priorityClassName string) func() error {
|
||||
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
|
||||
ginkgo.By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
|
||||
request := int64(1024 * 1024 * megabytes / replicas)
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
|
@ -1311,7 +1309,7 @@ func ReserveMemoryWithPriority(f *framework.Framework, id string, replicas, mega
|
|||
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, priorityClassName)
|
||||
}
|
||||
|
||||
// ReserveMemoryWithSelector creates a replication controller with pods with node selector that, in summation,
|
||||
// ReserveMemoryWithSelectorAndTolerations creates a replication controller with pods with node selector that, in summation,
|
||||
// request the specified amount of memory.
|
||||
func ReserveMemoryWithSelectorAndTolerations(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration) func() error {
|
||||
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, tolerations, "")
|
||||
|
@ -1418,7 +1416,7 @@ func setMigSizes(sizes map[string]int) bool {
|
|||
currentSize, err := framework.GroupSize(mig)
|
||||
framework.ExpectNoError(err)
|
||||
if desiredSize != currentSize {
|
||||
By(fmt.Sprintf("Setting size of %s to %d", mig, desiredSize))
|
||||
ginkgo.By(fmt.Sprintf("Setting size of %s to %d", mig, desiredSize))
|
||||
err = framework.ResizeGroup(mig, int32(desiredSize))
|
||||
framework.ExpectNoError(err)
|
||||
madeChanges = true
|
||||
|
@ -1428,10 +1426,10 @@ func setMigSizes(sizes map[string]int) bool {
|
|||
}
|
||||
|
||||
func drainNode(f *framework.Framework, node *v1.Node) {
|
||||
By("Make the single node unschedulable")
|
||||
ginkgo.By("Make the single node unschedulable")
|
||||
makeNodeUnschedulable(f.ClientSet, node)
|
||||
|
||||
By("Manually drain the single node")
|
||||
ginkgo.By("Manually drain the single node")
|
||||
podOpts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -1442,7 +1440,7 @@ func drainNode(f *framework.Framework, node *v1.Node) {
|
|||
}
|
||||
|
||||
func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error {
|
||||
By(fmt.Sprintf("Taint node %s", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("Taint node %s", node.Name))
|
||||
for j := 0; j < 3; j++ {
|
||||
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -1479,7 +1477,7 @@ func (CriticalAddonsOnlyError) Error() string {
|
|||
}
|
||||
|
||||
func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAddonsOnly bool) error {
|
||||
By(fmt.Sprintf("Remove taint from node %s", node.Name))
|
||||
ginkgo.By(fmt.Sprintf("Remove taint from node %s", node.Name))
|
||||
for j := 0; j < 3; j++ {
|
||||
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -1634,7 +1632,7 @@ func buildAntiAffinity(labels map[string]string) *v1.Affinity {
|
|||
// 3a. enable scheduling on that node
|
||||
// 3b. increase number of replicas in RC by podsPerNode
|
||||
func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) error {
|
||||
By("Run a pod on each node")
|
||||
ginkgo.By("Run a pod on each node")
|
||||
for _, node := range nodes {
|
||||
err := makeNodeUnschedulable(f.ClientSet, &node)
|
||||
|
||||
|
@ -1709,7 +1707,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
|
|||
// Increase cluster size by newNodesForScaledownTests to create some unused nodes
|
||||
// that can be later removed by cluster autoscaler.
|
||||
func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[string]int) int {
|
||||
By("Manually increase cluster size")
|
||||
ginkgo.By("Manually increase cluster size")
|
||||
increasedSize := 0
|
||||
newSizes := make(map[string]int)
|
||||
for key, val := range originalSizes {
|
||||
|
@ -1857,13 +1855,13 @@ func waitForScaleUpStatus(c clientset.Interface, cond func(s *scaleUpStatus) boo
|
|||
// This is a temporary fix to allow CA to migrate some kube-system pods
|
||||
// TODO: Remove this when the PDB is added for some of those components
|
||||
func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
|
||||
By("Create PodDisruptionBudgets for kube-system components, so they can be migrated if required")
|
||||
ginkgo.By("Create PodDisruptionBudgets for kube-system components, so they can be migrated if required")
|
||||
|
||||
var newPdbs []string
|
||||
cleanup := func() {
|
||||
var finalErr error
|
||||
for _, newPdbName := range newPdbs {
|
||||
By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName))
|
||||
ginkgo.By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName))
|
||||
err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
// log error, but attempt to remove other pdbs
|
||||
|
@ -1888,7 +1886,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
|
|||
{label: "glbc", minAvailable: 0},
|
||||
}
|
||||
for _, pdbData := range pdbsToAdd {
|
||||
By(fmt.Sprintf("Create PodDisruptionBudget for %v", pdbData.label))
|
||||
ginkgo.By(fmt.Sprintf("Create PodDisruptionBudget for %v", pdbData.label))
|
||||
labelMap := map[string]string{"k8s-app": pdbData.label}
|
||||
pdbName := fmt.Sprintf("test-pdb-for-%v", pdbData.label)
|
||||
minAvailable := intstr.FromInt(pdbData.minAvailable)
|
||||
|
@ -1922,7 +1920,7 @@ func createPriorityClasses(f *framework.Framework) func() {
|
|||
if err != nil {
|
||||
klog.Errorf("Error creating priority class: %v", err)
|
||||
}
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
|
||||
gomega.Expect(err == nil || errors.IsAlreadyExists(err)).To(gomega.Equal(true))
|
||||
}
|
||||
|
||||
return func() {
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
|
@ -45,13 +45,13 @@ const (
|
|||
)
|
||||
|
||||
var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver)", func() {
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale down with Custom Metric of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
|
@ -66,7 +66,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
|||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Object from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale down with Custom Metric of type Object from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
|
@ -83,7 +83,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
|||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with External Metric with target value from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale down with External Metric with target value from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := externalMetricValue
|
||||
|
@ -106,7 +106,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
|||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with External Metric with target average value from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale down with External Metric with target average value from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := externalMetricValue
|
||||
|
@ -129,7 +129,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
|||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver with Prometheus [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale down with Custom Metric of type Pod from Stackdriver with Prometheus [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
|
@ -144,7 +144,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
|||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale up with two metrics of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale up with two metrics of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 1
|
||||
// metric 1 would cause a scale down, if not for metric 2
|
||||
metric1Value := int64(100)
|
||||
|
@ -175,7 +175,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
|||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale up with two External metrics from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
ginkgo.It("should scale up with two External metrics from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 1
|
||||
// metric 1 would cause a scale down, if not for metric 2
|
||||
metric1Value := externalMetricValue
|
||||
|
@ -216,6 +216,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
|||
})
|
||||
})
|
||||
|
||||
// CustomMetricTestCase is a struct for test cases.
|
||||
type CustomMetricTestCase struct {
|
||||
framework *framework.Framework
|
||||
hpa *as.HorizontalPodAutoscaler
|
||||
|
@ -226,8 +227,9 @@ type CustomMetricTestCase struct {
|
|||
scaledReplicas int
|
||||
}
|
||||
|
||||
// Run starts test case.
|
||||
func (tc *CustomMetricTestCase) Run() {
|
||||
projectId := framework.TestContext.CloudConfig.ProjectID
|
||||
projectID := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
|
||||
|
@ -251,11 +253,11 @@ func (tc *CustomMetricTestCase) Run() {
|
|||
}
|
||||
|
||||
// Set up a cluster: create a custom metric and set up k8s-sd adapter
|
||||
err = monitoring.CreateDescriptors(gcmService, projectId)
|
||||
err = monitoring.CreateDescriptors(gcmService, projectID)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create metric descriptor: %v", err)
|
||||
}
|
||||
defer monitoring.CleanupDescriptors(gcmService, projectId)
|
||||
defer monitoring.CleanupDescriptors(gcmService, projectID)
|
||||
|
||||
err = monitoring.CreateAdapter(monitoring.AdapterDefault)
|
||||
if err != nil {
|
||||
|
|
|
@ -31,10 +31,11 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// Constants used in dns-autoscaling test.
|
||||
const (
|
||||
DNSdefaultTimeout = 5 * time.Minute
|
||||
ClusterAddonLabelKey = "k8s-app"
|
||||
|
@ -47,18 +48,18 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
|||
var c clientset.Interface
|
||||
var previousParams map[string]string
|
||||
var originDNSReplicasCount int
|
||||
var DNSParams_1 DNSParamsLinear
|
||||
var DNSParams_2 DNSParamsLinear
|
||||
var DNSParams_3 DNSParamsLinear
|
||||
var DNSParams1 DNSParamsLinear
|
||||
var DNSParams2 DNSParamsLinear
|
||||
var DNSParams3 DNSParamsLinear
|
||||
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
c = f.ClientSet
|
||||
|
||||
nodeCount := len(framework.GetReadySchedulableNodesOrDie(c).Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
gomega.Expect(nodeCount).NotTo(gomega.BeZero())
|
||||
|
||||
By("Collecting original replicas count and DNS scaling params")
|
||||
ginkgo.By("Collecting original replicas count and DNS scaling params")
|
||||
var err error
|
||||
originDNSReplicasCount, err = getDNSReplicas(c)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -68,13 +69,13 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
|||
previousParams = pcm.Data
|
||||
|
||||
if nodeCount <= 500 {
|
||||
DNSParams_1 = DNSParamsLinear{
|
||||
DNSParams1 = DNSParamsLinear{
|
||||
nodesPerReplica: 1,
|
||||
}
|
||||
DNSParams_2 = DNSParamsLinear{
|
||||
DNSParams2 = DNSParamsLinear{
|
||||
nodesPerReplica: 2,
|
||||
}
|
||||
DNSParams_3 = DNSParamsLinear{
|
||||
DNSParams3 = DNSParamsLinear{
|
||||
nodesPerReplica: 3,
|
||||
coresPerReplica: 3,
|
||||
}
|
||||
|
@ -84,13 +85,13 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
|||
// The default setup is: 256 cores/replica, 16 nodes/replica.
|
||||
// With nodeCount > 500, nodes/13, nodes/14, nodes/15 and nodes/16
|
||||
// are different numbers.
|
||||
DNSParams_1 = DNSParamsLinear{
|
||||
DNSParams1 = DNSParamsLinear{
|
||||
nodesPerReplica: 13,
|
||||
}
|
||||
DNSParams_2 = DNSParamsLinear{
|
||||
DNSParams2 = DNSParamsLinear{
|
||||
nodesPerReplica: 14,
|
||||
}
|
||||
DNSParams_3 = DNSParamsLinear{
|
||||
DNSParams3 = DNSParamsLinear{
|
||||
nodesPerReplica: 15,
|
||||
coresPerReplica: 15,
|
||||
}
|
||||
|
@ -99,25 +100,25 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
|||
|
||||
// This test is separated because it is slow and need to run serially.
|
||||
// Will take around 5 minutes to run on a 4 nodes cluster.
|
||||
It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() {
|
||||
ginkgo.It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() {
|
||||
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
ginkgo.By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1)))
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
By("Restoring initial dns autoscaling parameters")
|
||||
ginkgo.By("Restoring initial dns autoscaling parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Wait for number of running and ready kube-dns pods recover")
|
||||
ginkgo.By("Wait for number of running and ready kube-dns pods recover")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
|
||||
_, err := framework.WaitForPodsWithLabelRunningReady(c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
}()
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams1)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
|
@ -125,11 +126,11 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
|||
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
size, err := framework.GroupSize(mig)
|
||||
framework.ExpectNoError(err)
|
||||
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
ginkgo.By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
originalSizes[mig] = size
|
||||
}
|
||||
|
||||
By("Manually increase cluster size")
|
||||
ginkgo.By("Manually increase cluster size")
|
||||
increasedSizes := make(map[string]int)
|
||||
for key, val := range originalSizes {
|
||||
increasedSizes[key] = val + 1
|
||||
|
@ -139,87 +140,88 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
|||
func(size int) bool { return size == numNodes+len(originalSizes) }, scaleUpTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams1)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_3)))
|
||||
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams3)))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_3)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams3)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Restoring cluster size")
|
||||
ginkgo.By("Restoring cluster size")
|
||||
setMigSizes(originalSizes)
|
||||
err = framework.WaitForReadyNodes(c, numNodes, scaleDownTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #55779 is fixed.
|
||||
It("[DisabledForLargeClusters] kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() {
|
||||
ginkgo.It("[DisabledForLargeClusters] kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() {
|
||||
|
||||
By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
ginkgo.By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1)))
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
By("Restoring initial dns autoscaling parameters")
|
||||
ginkgo.By("Restoring initial dns autoscaling parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))
|
||||
framework.ExpectNoError(err)
|
||||
}()
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams1)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("--- Scenario: should scale kube-dns based on changed parameters ---")
|
||||
By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_3)))
|
||||
ginkgo.By("--- Scenario: should scale kube-dns based on changed parameters ---")
|
||||
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams3)))
|
||||
framework.ExpectNoError(err)
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_3)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams3)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("--- Scenario: should re-create scaling parameters with default value when parameters got deleted ---")
|
||||
By("Delete the ConfigMap for autoscaler")
|
||||
ginkgo.By("--- Scenario: should re-create scaling parameters with default value when parameters got deleted ---")
|
||||
ginkgo.By("Delete the ConfigMap for autoscaler")
|
||||
err = deleteDNSScalingConfigMap(c)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Wait for the ConfigMap got re-created")
|
||||
ginkgo.By("Wait for the ConfigMap got re-created")
|
||||
_, err = waitForDNSConfigMapCreated(c, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_2)))
|
||||
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams2)))
|
||||
framework.ExpectNoError(err)
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_2)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams2)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("--- Scenario: should recover after autoscaler pod got deleted ---")
|
||||
By("Delete the autoscaler pod for kube-dns")
|
||||
ginkgo.By("--- Scenario: should recover after autoscaler pod got deleted ---")
|
||||
ginkgo.By("Delete the autoscaler pod for kube-dns")
|
||||
err = deleteDNSAutoscalerPod(c)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
ginkgo.By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams1)))
|
||||
framework.ExpectNoError(err)
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams1)
|
||||
err = waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
|
||||
// DNSParamsLinear is a struct for number of DNS pods.
|
||||
type DNSParamsLinear struct {
|
||||
nodesPerReplica float64
|
||||
coresPerReplica float64
|
||||
|
|
|
@ -18,6 +18,7 @@ package autoscaling
|
|||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
// SIGDescribe annotates the test with the SIG label.
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-autoscaling] "+text, body)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
// These tests don't seem to be running properly in parallel: issue: #20338.
|
||||
|
@ -37,20 +37,20 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
|
|||
|
||||
SIGDescribe("[Serial] [Slow] Deployment", func() {
|
||||
// CPU tests via deployments
|
||||
It(titleUp, func() {
|
||||
ginkgo.It(titleUp, func() {
|
||||
scaleUp("test-deployment", common.KindDeployment, false, rc, f)
|
||||
})
|
||||
It(titleDown, func() {
|
||||
ginkgo.It(titleDown, func() {
|
||||
scaleDown("test-deployment", common.KindDeployment, false, rc, f)
|
||||
})
|
||||
})
|
||||
|
||||
SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
|
||||
// CPU tests via ReplicaSets
|
||||
It(titleUp, func() {
|
||||
ginkgo.It(titleUp, func() {
|
||||
scaleUp("rs", common.KindReplicaSet, false, rc, f)
|
||||
})
|
||||
It(titleDown, func() {
|
||||
ginkgo.It(titleDown, func() {
|
||||
scaleDown("rs", common.KindReplicaSet, false, rc, f)
|
||||
})
|
||||
})
|
||||
|
@ -58,16 +58,16 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
|
|||
// These tests take ~20 minutes each.
|
||||
SIGDescribe("[Serial] [Slow] ReplicationController", func() {
|
||||
// CPU tests via replication controllers
|
||||
It(titleUp+" and verify decision stability", func() {
|
||||
ginkgo.It(titleUp+" and verify decision stability", func() {
|
||||
scaleUp("rc", common.KindRC, true, rc, f)
|
||||
})
|
||||
It(titleDown+" and verify decision stability", func() {
|
||||
ginkgo.It(titleDown+" and verify decision stability", func() {
|
||||
scaleDown("rc", common.KindRC, true, rc, f)
|
||||
})
|
||||
})
|
||||
|
||||
SIGDescribe("ReplicationController light", func() {
|
||||
It("Should scale from 1 pod to 2 pods", func() {
|
||||
ginkgo.It("Should scale from 1 pod to 2 pods", func() {
|
||||
scaleTest := &HPAScaleTest{
|
||||
initPods: 1,
|
||||
totalInitialCPUUsage: 150,
|
||||
|
@ -79,7 +79,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
|
|||
}
|
||||
scaleTest.run("rc-light", common.KindRC, rc, f)
|
||||
})
|
||||
It("Should scale from 2 pods to 1 pod", func() {
|
||||
ginkgo.It("Should scale from 2 pods to 1 pod", func() {
|
||||
scaleTest := &HPAScaleTest{
|
||||
initPods: 2,
|
||||
totalInitialCPUUsage: 50,
|
||||
|
|
Loading…
Reference in New Issue