diff --git a/test/e2e/density.go b/test/e2e/density.go index 99e3a1c604..329498192a 100644 --- a/test/e2e/density.go +++ b/test/e2e/density.go @@ -158,6 +158,11 @@ var _ = framework.KubeDescribe("Density", func() { c = f.Client ns = f.Namespace.Name + // In large clusters we may get to this point but still have a bunch + // of nodes without Routes created. Since this would make a node + // unschedulable, we need to wait until all of them are schedulable. + framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c)) + nodes := framework.GetReadySchedulableNodesOrDie(c) nodeCount = len(nodes.Items) Expect(nodeCount).NotTo(BeZero()) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 00b5a4b118..6af23b2a62 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -2596,6 +2596,17 @@ func waitListSchedulableNodesOrDie(c *client.Client) *api.NodeList { return nodes } +// Node is schedulable if: +// 1) doesn't have "unschedulable" field set +// 2) it's Ready condition is set to true +// 3) doesn't have NetworkUnavailable condition set to true +func isNodeSchedulable(node *api.Node) bool { + nodeReady := IsNodeConditionSetAsExpected(node, api.NodeReady, true) + networkReady := IsNodeConditionUnset(node, api.NodeNetworkUnavailable) || + IsNodeConditionSetAsExpected(node, api.NodeNetworkUnavailable, false) + return !node.Spec.Unschedulable && nodeReady && networkReady +} + // GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on. // 1) Needs to be schedulable. // 2) Needs to be ready. @@ -2605,14 +2616,32 @@ func GetReadySchedulableNodesOrDie(c *client.Client) (nodes *api.NodeList) { // previous tests may have cause failures of some nodes. Let's skip // 'Not Ready' nodes, just in case (there is no need to fail the test). FilterNodes(nodes, func(node api.Node) bool { - nodeReady := IsNodeConditionSetAsExpected(&node, api.NodeReady, true) - networkReady := IsNodeConditionUnset(&node, api.NodeNetworkUnavailable) || - IsNodeConditionSetAsExpected(&node, api.NodeNetworkUnavailable, false) - return !node.Spec.Unschedulable && nodeReady && networkReady + return isNodeSchedulable(&node) }) return nodes } +func WaitForAllNodesSchedulable(c *client.Client) error { + return wait.PollImmediate(30*time.Second, 2*time.Hour, func() (bool, error) { + opts := api.ListOptions{ + ResourceVersion: "0", + FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector(), + } + nodes, err := c.Nodes().List(opts) + if err != nil { + Logf("Unexpected error listing nodes: %v", err) + // Ignore the error here - it will be retried. + return false, nil + } + for _, node := range nodes.Items { + if !isNodeSchedulable(&node) { + return false, nil + } + } + return true, nil + }) +} + func ScaleRC(c *client.Client, ns, name string, size uint, wait bool) error { By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size)) scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), c) diff --git a/test/e2e/load.go b/test/e2e/load.go index a9cc468e7c..26410e1f4f 100644 --- a/test/e2e/load.go +++ b/test/e2e/load.go @@ -82,6 +82,11 @@ var _ = framework.KubeDescribe("Load capacity", func() { BeforeEach(func() { c = f.Client + // In large clusters we may get to this point but still have a bunch + // of nodes without Routes created. Since this would make a node + // unschedulable, we need to wait until all of them are schedulable. + framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c)) + ns = f.Namespace.Name nodes := framework.GetReadySchedulableNodesOrDie(c) nodeCount = len(nodes.Items)