Merge pull request #15144 from gmarek/predicates

Check for number of scheduled, not running nodes in scheduler_predicates test
pull/6/head
Marek Grabowski 2015-10-06 11:24:47 +02:00
commit bc61bcc1ad
1 changed files with 25 additions and 22 deletions

View File

@ -32,13 +32,13 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
// Returns a number of currently running and not running Pods. // Returns a number of currently scheduled and not scheduled Pods.
func getPodsNumbers(pods *api.PodList) (runningPods, notRunningPods int) { func getPodsScheduledNumbers(pods *api.PodList) (scheduledPods, notScheduledPods int) {
for _, pod := range pods.Items { for _, pod := range pods.Items {
if pod.Status.Phase == api.PodRunning { if pod.Spec.NodeName != "" {
runningPods += 1 scheduledPods += 1
} else { } else {
notRunningPods += 1 notScheduledPods += 1
} }
} }
return return
@ -49,7 +49,7 @@ func getPodsNumbers(pods *api.PodList) (runningPods, notRunningPods int) {
func startPods(c *client.Client, replicas int, ns string, podNamePrefix string, pod api.Pod) { func startPods(c *client.Client, replicas int, ns string, podNamePrefix string, pod api.Pod) {
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()) allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
expectNoError(err) expectNoError(err)
podsRunningBefore, _ := getPodsNumbers(allPods) podsScheduledBefore, _ := getPodsScheduledNumbers(allPods)
for i := 0; i < replicas; i++ { for i := 0; i < replicas; i++ {
podName := fmt.Sprintf("%v-%v", podNamePrefix, i) podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
@ -66,25 +66,25 @@ func startPods(c *client.Client, replicas int, ns string, podNamePrefix string,
// completely broken vs. running slowly. // completely broken vs. running slowly.
timeout := 10 * time.Minute timeout := 10 * time.Minute
startTime := time.Now() startTime := time.Now()
currentlyRunningPods := 0 currentlyScheduledPods := 0
for podsRunningBefore+replicas != currentlyRunningPods { for podsScheduledBefore+replicas != currentlyScheduledPods {
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()) allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
expectNoError(err) expectNoError(err)
runningPods := 0 scheduledPods := 0
for _, pod := range allPods.Items { for _, pod := range allPods.Items {
if pod.Status.Phase == api.PodRunning { if pod.Spec.NodeName != "" {
runningPods += 1 scheduledPods += 1
} }
} }
currentlyRunningPods = runningPods currentlyScheduledPods = scheduledPods
Logf("%v pods running", currentlyRunningPods) Logf("%v pods running", currentlyScheduledPods)
if startTime.Add(timeout).Before(time.Now()) { if startTime.Add(timeout).Before(time.Now()) {
Logf("Timed out after %v waiting for pods to start running.", timeout) Logf("Timed out after %v waiting for pods to start running.", timeout)
break break
} }
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
} }
Expect(currentlyRunningPods).To(Equal(podsRunningBefore + replicas)) Expect(currentlyScheduledPods).To(Equal(podsScheduledBefore + replicas))
} }
func getRequestedCPU(pod api.Pod) int64 { func getRequestedCPU(pod api.Pod) int64 {
@ -95,10 +95,10 @@ func getRequestedCPU(pod api.Pod) int64 {
return result return result
} }
func verifyResult(c *client.Client, podName string, ns string, oldNotRunning int) { func verifyResult(c *client.Client, podName string, ns string, oldNotScheduled int) {
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()) allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
expectNoError(err) expectNoError(err)
_, notRunningPods := getPodsNumbers(allPods) _, notScheduledPods := getPodsScheduledNumbers(allPods)
schedEvents, err := c.Events(ns).List( schedEvents, err := c.Events(ns).List(
labels.Everything(), labels.Everything(),
@ -121,7 +121,7 @@ func verifyResult(c *client.Client, podName string, ns string, oldNotRunning int
} }
} }
Expect(notRunningPods).To(Equal(1+oldNotRunning), printOnce(fmt.Sprintf("Pods found in the cluster: %#v", allPods))) Expect(notScheduledPods).To(Equal(1+oldNotScheduled), printOnce(fmt.Sprintf("Pods found in the cluster: %#v", allPods)))
Expect(schedEvents.Items).ToNot(BeEmpty(), printOnce(fmt.Sprintf("Pods found in the cluster: %#v", allPods))) Expect(schedEvents.Items).ToNot(BeEmpty(), printOnce(fmt.Sprintf("Pods found in the cluster: %#v", allPods)))
} }
@ -147,6 +147,9 @@ var _ = Describe("SchedulerPredicates", func() {
framework.beforeEach() framework.beforeEach()
c = framework.Client c = framework.Client
ns = framework.Namespace.Name ns = framework.Namespace.Name
var err error
nodeList, err = c.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err)
}) })
AfterEach(func() { AfterEach(func() {
@ -174,8 +177,8 @@ var _ = Describe("SchedulerPredicates", func() {
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()) allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
expectNoError(err) expectNoError(err)
currentlyRunningPods, currentlyDeadPods := getPodsNumbers(allPods) currentlyScheduledPods, currentlyNotScheduledPods := getPodsScheduledNumbers(allPods)
podsNeededForSaturation := int(totalPodCapacity) - currentlyRunningPods podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation)) By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
@ -221,7 +224,7 @@ var _ = Describe("SchedulerPredicates", func() {
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
verifyResult(c, podName, ns, currentlyDeadPods) verifyResult(c, podName, ns, currentlyNotScheduledPods)
cleanupPods(c, ns) cleanupPods(c, ns)
}) })
@ -323,7 +326,7 @@ var _ = Describe("SchedulerPredicates", func() {
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()) allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
expectNoError(err) expectNoError(err)
_, currentlyDeadPods := getPodsNumbers(allPods) _, currentlyNotScheduledPods := getPodsScheduledNumbers(allPods)
_, err = c.Pods(ns).Create(&api.Pod{ _, err = c.Pods(ns).Create(&api.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
@ -351,7 +354,7 @@ var _ = Describe("SchedulerPredicates", func() {
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
verifyResult(c, podName, ns, currentlyDeadPods) verifyResult(c, podName, ns, currentlyNotScheduledPods)
cleanupPods(c, ns) cleanupPods(c, ns)
}) })