mirror of https://github.com/k3s-io/k3s
Merge pull request #71208 from losipiuk/lo/fix-flaky-scalup-test
Fix flaky 'shouldn't trigger additional scale-ups during processing scale-up' testpull/58/head
commit
f7b9b669e5
|
@ -349,8 +349,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("Expect no more scale-up to be happening after all pods are scheduled")
|
||||
status, err = getScaleUpStatus(c)
|
||||
|
||||
// wait for a while until scale-up finishes; we cannot read CA status immediately
|
||||
// after pods are scheduled as status config map is updated by CA once every loop iteration
|
||||
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
|
||||
return s.status == caNoScaleUpStatus
|
||||
}, 2*freshStatusLimit)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if status.target != target {
|
||||
klog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue