mirror of https://github.com/k3s-io/k3s
Merge pull request #52650 from aleksandra-malinowska/autoscaling-test-allocatable
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.. In cluster size autoscaling tests, use allocatable instead of capacity for node memory This makes cluster size autoscaling e2e tests use node allocatable resources when computing how much memory we need to consume in order to trigger scale up/prevent scale down. It should fix failing tests in GKE.pull/6/head
commit
ab9838b779
|
@ -79,8 +79,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
f := framework.NewDefaultFramework("autoscaling")
|
||||
var c clientset.Interface
|
||||
var nodeCount int
|
||||
var coresPerNode int
|
||||
var memCapacityMb int
|
||||
var memAllocatableMb int
|
||||
var originalSizes map[string]int
|
||||
|
||||
BeforeEach(func() {
|
||||
|
@ -103,10 +102,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
nodeCount = len(nodes.Items)
|
||||
By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
|
||||
mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
|
||||
coresPerNode = int((&cpu).MilliValue() / 1000)
|
||||
memCapacityMb = int((&mem).Value() / 1024 / 1024)
|
||||
mem := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
|
||||
memAllocatableMb = int((&mem).Value() / 1024 / 1024)
|
||||
|
||||
Expect(nodeCount).Should(Equal(sum))
|
||||
|
||||
|
@ -150,7 +147,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
|
||||
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
By("Creating unschedulable pod")
|
||||
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memCapacityMb)), false, defaultTimeout)
|
||||
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
By("Waiting for scale up hoping it won't happen")
|
||||
|
@ -177,7 +174,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
})
|
||||
|
||||
simpleScaleUpTest := func(unready int) {
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false, 1*time.Second)
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Verify that cluster size is increased
|
||||
|
@ -200,7 +197,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
unmanagedNodes := nodeCount - status.ready
|
||||
|
||||
By("Schedule more pods than can fit and wait for cluster to scale-up")
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false, 1*time.Second)
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
status, err = waitForScaleUpStatus(c, caOngoingScaleUpStatus, scaleUpTriggerTimeout)
|
||||
|
@ -235,7 +232,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
extraMem := nodes[0].Status.Capacity[v1.ResourceMemory]
|
||||
extraMemMb := int((&extraMem).Value() / 1024 / 1024)
|
||||
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb+extraMemMb, false, defaultTimeout)
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb+extraMemMb, false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Verify, that cluster size is increased
|
||||
|
@ -480,7 +477,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
|
||||
|
||||
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
|
||||
ReserveMemory(f, "memory-reservation", 2, int(2.1*float64(memCapacityMb)), false, defaultTimeout)
|
||||
ReserveMemory(f, "memory-reservation", 2, int(2.1*float64(memAllocatableMb)), false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Apparently GKE master is restarted couple minutes after the node pool is added
|
||||
|
@ -683,7 +680,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
nodesToBreak = nodesToBreak[1:]
|
||||
framework.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction)
|
||||
} else {
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false, defaultTimeout)
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
time.Sleep(scaleUpTimeout)
|
||||
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
|
|
Loading…
Reference in New Issue