From 4d154179c441f89cc085b1251e3a7cb76135bab2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Mon, 19 Nov 2018 13:37:50 +0100 Subject: [PATCH] Fix flaky 'shouldn't trigger additional scale-ups during processing scale-up' test --- test/e2e/autoscaling/cluster_size_autoscaling.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index ec1dee63a7..2bf1114608 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -231,8 +231,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) By("Expect no more scale-up to be happening after all pods are scheduled") - status, err = getScaleUpStatus(c) + + // wait for a while until scale-up finishes; we cannot read CA status immediately + // after pods are scheduled as status config map is updated by CA once every loop iteration + status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool { + return s.status == caNoScaleUpStatus + }, 2*freshStatusLimit) framework.ExpectNoError(err) + if status.target != target { glog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target) }