mirror of https://github.com/k3s-io/k3s
Fix flaky 'shouldn't trigger additional scale-ups during processing scale-up' test
parent
13c80a99a3
commit
4d154179c4
|
@ -231,8 +231,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("Expect no more scale-up to be happening after all pods are scheduled")
|
||||
status, err = getScaleUpStatus(c)
|
||||
|
||||
// wait for a while until scale-up finishes; we cannot read CA status immediately
|
||||
// after pods are scheduled as status config map is updated by CA once every loop iteration
|
||||
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
|
||||
return s.status == caNoScaleUpStatus
|
||||
}, 2*freshStatusLimit)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if status.target != target {
|
||||
glog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue