2015-07-13 14:27:35 +00:00
|
|
|
/*
|
|
|
|
Copyright 2015 The Kubernetes Authors All rights reserved.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package e2e
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api"
|
|
|
|
"k8s.io/kubernetes/pkg/api/resource"
|
2015-09-09 21:59:11 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
2015-08-13 19:01:50 +00:00
|
|
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/fields"
|
|
|
|
"k8s.io/kubernetes/pkg/labels"
|
|
|
|
"k8s.io/kubernetes/pkg/util"
|
2015-07-13 14:27:35 +00:00
|
|
|
|
|
|
|
. "github.com/onsi/ginkgo"
|
|
|
|
. "github.com/onsi/gomega"
|
|
|
|
)
|
|
|
|
|
2015-10-06 08:07:00 +00:00
|
|
|
// Returns a number of currently scheduled and not scheduled Pods.
|
2015-10-07 09:10:48 +00:00
|
|
|
func getPodsScheduled(pods *api.PodList) (scheduledPods, notScheduledPods []api.Pod) {
|
2015-08-14 07:23:10 +00:00
|
|
|
for _, pod := range pods.Items {
|
2015-10-06 08:07:00 +00:00
|
|
|
if pod.Spec.NodeName != "" {
|
2015-10-07 09:10:48 +00:00
|
|
|
scheduledPods = append(scheduledPods, pod)
|
2015-08-14 07:23:10 +00:00
|
|
|
} else {
|
2015-10-07 09:10:48 +00:00
|
|
|
notScheduledPods = append(notScheduledPods, pod)
|
2015-08-14 07:23:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-07-13 14:27:35 +00:00
|
|
|
// Simplified version of RunRC, that does not create RC, but creates plain Pods and
|
|
|
|
// requires passing whole Pod definition, which is needed to test various Scheduler predicates.
|
|
|
|
func startPods(c *client.Client, replicas int, ns string, podNamePrefix string, pod api.Pod) {
|
2015-08-14 07:23:10 +00:00
|
|
|
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
|
2015-07-13 14:27:35 +00:00
|
|
|
expectNoError(err)
|
2015-10-07 09:10:48 +00:00
|
|
|
podsScheduledBefore, _ := getPodsScheduled(allPods)
|
2015-07-13 14:27:35 +00:00
|
|
|
|
|
|
|
for i := 0; i < replicas; i++ {
|
|
|
|
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
|
|
|
|
pod.ObjectMeta.Name = podName
|
|
|
|
pod.ObjectMeta.Labels["name"] = podName
|
|
|
|
pod.Spec.Containers[0].Name = podName
|
|
|
|
_, err = c.Pods(ns).Create(&pod)
|
|
|
|
expectNoError(err)
|
|
|
|
}
|
|
|
|
|
2015-08-05 05:59:30 +00:00
|
|
|
// Wait for pods to start running. Note: this is a functional
|
|
|
|
// test, not a performance test, so the timeout needs to be
|
|
|
|
// sufficiently long that it's only triggered if things are
|
|
|
|
// completely broken vs. running slowly.
|
|
|
|
timeout := 10 * time.Minute
|
2015-07-13 14:27:35 +00:00
|
|
|
startTime := time.Now()
|
2015-10-06 08:07:00 +00:00
|
|
|
currentlyScheduledPods := 0
|
2015-10-07 09:10:48 +00:00
|
|
|
for len(podsScheduledBefore)+replicas != currentlyScheduledPods {
|
2015-07-13 14:27:35 +00:00
|
|
|
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
|
|
|
|
expectNoError(err)
|
2015-10-06 08:07:00 +00:00
|
|
|
scheduledPods := 0
|
2015-07-13 14:27:35 +00:00
|
|
|
for _, pod := range allPods.Items {
|
2015-10-06 08:07:00 +00:00
|
|
|
if pod.Spec.NodeName != "" {
|
|
|
|
scheduledPods += 1
|
2015-07-13 14:27:35 +00:00
|
|
|
}
|
|
|
|
}
|
2015-10-06 08:07:00 +00:00
|
|
|
currentlyScheduledPods = scheduledPods
|
|
|
|
Logf("%v pods running", currentlyScheduledPods)
|
2015-07-13 14:27:35 +00:00
|
|
|
if startTime.Add(timeout).Before(time.Now()) {
|
2015-08-05 05:59:30 +00:00
|
|
|
Logf("Timed out after %v waiting for pods to start running.", timeout)
|
2015-07-13 14:27:35 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
}
|
2015-10-07 09:10:48 +00:00
|
|
|
Expect(currentlyScheduledPods).To(Equal(len(podsScheduledBefore) + replicas))
|
2015-07-13 14:27:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func getRequestedCPU(pod api.Pod) int64 {
|
|
|
|
var result int64
|
|
|
|
for _, container := range pod.Spec.Containers {
|
|
|
|
result += container.Resources.Limits.Cpu().MilliValue()
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2015-10-06 08:07:00 +00:00
|
|
|
func verifyResult(c *client.Client, podName string, ns string, oldNotScheduled int) {
|
2015-07-13 14:27:35 +00:00
|
|
|
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
|
|
|
|
expectNoError(err)
|
2015-10-07 09:10:48 +00:00
|
|
|
scheduledPods, notScheduledPods := getPodsScheduled(allPods)
|
2015-07-13 14:27:35 +00:00
|
|
|
|
|
|
|
schedEvents, err := c.Events(ns).List(
|
|
|
|
labels.Everything(),
|
|
|
|
fields.Set{
|
|
|
|
"involvedObject.kind": "Pod",
|
|
|
|
"involvedObject.name": podName,
|
|
|
|
"involvedObject.namespace": ns,
|
|
|
|
"source": "scheduler",
|
2015-08-14 07:23:10 +00:00
|
|
|
"reason": "FailedScheduling",
|
2015-07-13 14:27:35 +00:00
|
|
|
}.AsSelector())
|
|
|
|
expectNoError(err)
|
|
|
|
|
|
|
|
printed := false
|
|
|
|
printOnce := func(msg string) string {
|
|
|
|
if !printed {
|
|
|
|
printed = true
|
|
|
|
return msg
|
|
|
|
} else {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-07 09:10:48 +00:00
|
|
|
Expect(len(notScheduledPods)).To(Equal(1+oldNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
|
|
|
|
Expect(schedEvents.Items).ToNot(BeEmpty(), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
|
2015-07-13 14:27:35 +00:00
|
|
|
}
|
|
|
|
|
2015-08-19 09:29:35 +00:00
|
|
|
func cleanupPods(c *client.Client, ns string) {
|
|
|
|
By("Removing all pods in namespace " + ns)
|
|
|
|
pods, err := c.Pods(ns).List(labels.Everything(), fields.Everything())
|
|
|
|
expectNoError(err)
|
|
|
|
opt := api.NewDeleteOptions(0)
|
|
|
|
for _, p := range pods.Items {
|
|
|
|
expectNoError(c.Pods(ns).Delete(p.ObjectMeta.Name, opt))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-13 14:27:35 +00:00
|
|
|
var _ = Describe("SchedulerPredicates", func() {
|
2015-10-02 21:39:29 +00:00
|
|
|
framework := Framework{BaseName: "sched-pred"}
|
2015-07-13 14:27:35 +00:00
|
|
|
var c *client.Client
|
|
|
|
var nodeList *api.NodeList
|
|
|
|
var totalPodCapacity int64
|
|
|
|
var RCName string
|
|
|
|
var ns string
|
|
|
|
|
|
|
|
BeforeEach(func() {
|
2015-10-02 21:39:29 +00:00
|
|
|
framework.beforeEach()
|
|
|
|
c = framework.Client
|
|
|
|
ns = framework.Namespace.Name
|
2015-10-06 08:07:00 +00:00
|
|
|
var err error
|
|
|
|
nodeList, err = c.Nodes().List(labels.Everything(), fields.Everything())
|
|
|
|
expectNoError(err)
|
2015-07-13 14:27:35 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
AfterEach(func() {
|
|
|
|
rc, err := c.ReplicationControllers(ns).Get(RCName)
|
|
|
|
if err == nil && rc.Spec.Replicas != 0 {
|
|
|
|
By("Cleaning up the replication controller")
|
|
|
|
err := DeleteRC(c, ns, RCName)
|
|
|
|
expectNoError(err)
|
|
|
|
}
|
2015-10-02 21:39:29 +00:00
|
|
|
framework.afterEach()
|
2015-07-13 14:27:35 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable
|
|
|
|
// and cannot be run in parallel with any other test that touches Nodes or Pods. It is so because to check
|
|
|
|
// if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds.
|
2015-10-07 14:09:24 +00:00
|
|
|
It("validates MaxPods limit number of pods that are allowed to run", func() {
|
2015-07-13 14:27:35 +00:00
|
|
|
totalPodCapacity = 0
|
|
|
|
|
|
|
|
for _, node := range nodeList.Items {
|
|
|
|
podCapacity, found := node.Status.Capacity["pods"]
|
|
|
|
Expect(found).To(Equal(true))
|
|
|
|
totalPodCapacity += podCapacity.Value()
|
|
|
|
Logf("Node: %v", node)
|
|
|
|
}
|
|
|
|
|
2015-08-14 07:23:10 +00:00
|
|
|
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
|
2015-07-13 14:27:35 +00:00
|
|
|
expectNoError(err)
|
2015-10-07 09:10:48 +00:00
|
|
|
currentlyScheduledPods, currentlyNotScheduledPods := getPodsScheduled(allPods)
|
|
|
|
podsNeededForSaturation := int(totalPodCapacity) - len(currentlyScheduledPods)
|
2015-07-13 14:27:35 +00:00
|
|
|
|
|
|
|
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
|
|
|
|
|
|
|
|
startPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{
|
2015-09-09 21:59:11 +00:00
|
|
|
TypeMeta: unversioned.TypeMeta{
|
2015-07-13 14:27:35 +00:00
|
|
|
Kind: "Pod",
|
|
|
|
},
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: "",
|
|
|
|
Labels: map[string]string{"name": ""},
|
|
|
|
},
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{
|
|
|
|
Name: "",
|
2015-10-06 00:23:17 +00:00
|
|
|
Image: "beta.gcr.io/google_containers/pause:2.0",
|
2015-07-13 14:27:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
podName := "additional-pod"
|
|
|
|
_, err = c.Pods(ns).Create(&api.Pod{
|
2015-09-09 21:59:11 +00:00
|
|
|
TypeMeta: unversioned.TypeMeta{
|
2015-07-13 14:27:35 +00:00
|
|
|
Kind: "Pod",
|
|
|
|
},
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: podName,
|
|
|
|
Labels: map[string]string{"name": "additional"},
|
|
|
|
},
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{
|
|
|
|
Name: podName,
|
2015-10-06 00:23:17 +00:00
|
|
|
Image: "beta.gcr.io/google_containers/pause:2.0",
|
2015-07-13 14:27:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
expectNoError(err)
|
|
|
|
// Wait a bit to allow scheduler to do its thing
|
2015-08-05 05:59:30 +00:00
|
|
|
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
|
|
|
|
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
|
2015-07-13 14:27:35 +00:00
|
|
|
time.Sleep(10 * time.Second)
|
|
|
|
|
2015-10-07 09:10:48 +00:00
|
|
|
verifyResult(c, podName, ns, len(currentlyNotScheduledPods))
|
2015-08-19 09:29:35 +00:00
|
|
|
cleanupPods(c, ns)
|
2015-07-13 14:27:35 +00:00
|
|
|
})
|
|
|
|
|
2015-08-28 17:48:13 +00:00
|
|
|
// This test verifies we don't allow scheduling of pods in a way that sum of limits of pods is greater than machines capacity.
|
2015-07-13 14:27:35 +00:00
|
|
|
// It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods.
|
|
|
|
// It is so because we need to have precise control on what's running in the cluster.
|
2015-10-07 14:09:24 +00:00
|
|
|
It("validates resource limits of pods that are allowed to run [Conformance]", func() {
|
2015-07-13 14:27:35 +00:00
|
|
|
nodeToCapacityMap := make(map[string]int64)
|
|
|
|
for _, node := range nodeList.Items {
|
|
|
|
capacity, found := node.Status.Capacity["cpu"]
|
|
|
|
Expect(found).To(Equal(true))
|
|
|
|
nodeToCapacityMap[node.Name] = capacity.MilliValue()
|
|
|
|
}
|
|
|
|
|
|
|
|
pods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
|
|
|
|
expectNoError(err)
|
2015-08-14 07:23:10 +00:00
|
|
|
var currentlyDeadPods int
|
2015-07-13 14:27:35 +00:00
|
|
|
for _, pod := range pods.Items {
|
|
|
|
_, found := nodeToCapacityMap[pod.Spec.NodeName]
|
|
|
|
Expect(found).To(Equal(true))
|
2015-08-14 07:23:10 +00:00
|
|
|
if pod.Status.Phase == api.PodRunning {
|
|
|
|
Logf("Pod %v requesting capacity %v on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
|
|
|
|
nodeToCapacityMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
|
|
|
|
} else {
|
|
|
|
currentlyDeadPods += 1
|
|
|
|
}
|
2015-07-13 14:27:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var podsNeededForSaturation int
|
2015-08-28 17:48:13 +00:00
|
|
|
milliCpuPerPod := int64(500)
|
2015-07-13 14:27:35 +00:00
|
|
|
for name, leftCapacity := range nodeToCapacityMap {
|
|
|
|
Logf("Node: %v has capacity: %v", name, leftCapacity)
|
2015-08-28 17:48:13 +00:00
|
|
|
podsNeededForSaturation += (int)(leftCapacity / milliCpuPerPod)
|
2015-07-13 14:27:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster CPU and trying to start another one", podsNeededForSaturation))
|
|
|
|
|
|
|
|
startPods(c, podsNeededForSaturation, ns, "overcommit", api.Pod{
|
2015-09-09 21:59:11 +00:00
|
|
|
TypeMeta: unversioned.TypeMeta{
|
2015-07-13 14:27:35 +00:00
|
|
|
Kind: "Pod",
|
|
|
|
},
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: "",
|
|
|
|
Labels: map[string]string{"name": ""},
|
|
|
|
},
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{
|
|
|
|
Name: "",
|
2015-10-06 00:23:17 +00:00
|
|
|
Image: "beta.gcr.io/google_containers/pause:2.0",
|
2015-07-13 14:27:35 +00:00
|
|
|
Resources: api.ResourceRequirements{
|
|
|
|
Limits: api.ResourceList{
|
2015-08-28 17:48:13 +00:00
|
|
|
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
2015-07-13 14:27:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
podName := "additional-pod"
|
|
|
|
_, err = c.Pods(ns).Create(&api.Pod{
|
2015-09-09 21:59:11 +00:00
|
|
|
TypeMeta: unversioned.TypeMeta{
|
2015-07-13 14:27:35 +00:00
|
|
|
Kind: "Pod",
|
|
|
|
},
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: podName,
|
|
|
|
Labels: map[string]string{"name": "additional"},
|
|
|
|
},
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{
|
|
|
|
Name: podName,
|
2015-10-06 00:23:17 +00:00
|
|
|
Image: "beta.gcr.io/google_containers/pause:2.0",
|
2015-07-13 14:27:35 +00:00
|
|
|
Resources: api.ResourceRequirements{
|
|
|
|
Limits: api.ResourceList{
|
2015-08-28 17:48:13 +00:00
|
|
|
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
|
2015-07-13 14:27:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
expectNoError(err)
|
|
|
|
// Wait a bit to allow scheduler to do its thing
|
2015-08-05 05:59:30 +00:00
|
|
|
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
|
|
|
|
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
|
2015-07-13 14:27:35 +00:00
|
|
|
time.Sleep(10 * time.Second)
|
|
|
|
|
2015-08-14 07:23:10 +00:00
|
|
|
verifyResult(c, podName, ns, currentlyDeadPods)
|
2015-08-19 09:29:35 +00:00
|
|
|
cleanupPods(c, ns)
|
2015-07-13 14:27:35 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
|
|
|
|
// nonempty Selector set.
|
2015-10-07 14:09:24 +00:00
|
|
|
It("validates that NodeSelector is respected if not matching [Conformance]", func() {
|
2015-07-13 14:27:35 +00:00
|
|
|
By("Trying to schedule Pod with nonempty NodeSelector.")
|
|
|
|
podName := "restricted-pod"
|
|
|
|
|
2015-08-14 07:23:10 +00:00
|
|
|
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
|
|
|
|
expectNoError(err)
|
2015-10-07 09:10:48 +00:00
|
|
|
_, currentlyNotScheduledPods := getPodsScheduled(allPods)
|
2015-08-14 07:23:10 +00:00
|
|
|
|
|
|
|
_, err = c.Pods(ns).Create(&api.Pod{
|
2015-09-09 21:59:11 +00:00
|
|
|
TypeMeta: unversioned.TypeMeta{
|
2015-07-13 14:27:35 +00:00
|
|
|
Kind: "Pod",
|
|
|
|
},
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: podName,
|
|
|
|
Labels: map[string]string{"name": "restricted"},
|
|
|
|
},
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{
|
|
|
|
Name: podName,
|
2015-10-06 00:23:17 +00:00
|
|
|
Image: "beta.gcr.io/google_containers/pause:2.0",
|
2015-07-13 14:27:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
NodeSelector: map[string]string{
|
|
|
|
"label": "nonempty",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
expectNoError(err)
|
|
|
|
// Wait a bit to allow scheduler to do its thing
|
2015-08-05 05:59:30 +00:00
|
|
|
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
|
|
|
|
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
|
2015-07-13 14:27:35 +00:00
|
|
|
time.Sleep(10 * time.Second)
|
|
|
|
|
2015-10-07 09:10:48 +00:00
|
|
|
verifyResult(c, podName, ns, len(currentlyNotScheduledPods))
|
2015-08-19 09:29:35 +00:00
|
|
|
cleanupPods(c, ns)
|
2015-07-13 14:27:35 +00:00
|
|
|
})
|
2015-09-11 20:48:37 +00:00
|
|
|
|
2015-10-07 14:09:24 +00:00
|
|
|
It("validates that NodeSelector is respected if matching [Conformance]", func() {
|
2015-09-11 20:48:37 +00:00
|
|
|
// launch a pod to find a node which can launch a pod. We intentionally do
|
|
|
|
// not just take the node list and choose the first of them. Depending on the
|
|
|
|
// cluster and the scheduler it might be that a "normal" pod cannot be
|
|
|
|
// scheduled onto it.
|
|
|
|
By("Trying to launch a pod without a label to get a node which can launch it.")
|
|
|
|
podName := "without-label"
|
|
|
|
_, err := c.Pods(ns).Create(&api.Pod{
|
|
|
|
TypeMeta: unversioned.TypeMeta{
|
|
|
|
Kind: "Pod",
|
|
|
|
},
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: podName,
|
|
|
|
},
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{
|
|
|
|
Name: podName,
|
2015-10-06 00:23:17 +00:00
|
|
|
Image: "beta.gcr.io/google_containers/pause:2.0",
|
2015-09-11 20:48:37 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
expectNoError(err)
|
|
|
|
expectNoError(waitForPodRunningInNamespace(c, podName, ns))
|
|
|
|
pod, err := c.Pods(ns).Get(podName)
|
|
|
|
expectNoError(err)
|
|
|
|
|
|
|
|
nodeName := pod.Spec.NodeName
|
|
|
|
err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0))
|
|
|
|
expectNoError(err)
|
|
|
|
|
|
|
|
By("Trying to apply a random label on the found node.")
|
|
|
|
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(util.NewUUID()))
|
|
|
|
v := "42"
|
|
|
|
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v)
|
|
|
|
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
|
|
|
|
expectNoError(err)
|
|
|
|
|
|
|
|
node, err := c.Nodes().Get(nodeName)
|
|
|
|
expectNoError(err)
|
|
|
|
Expect(node.Labels[k]).To(Equal(v))
|
|
|
|
|
|
|
|
By("Trying to relaunch the pod, now with labels.")
|
|
|
|
labelPodName := "with-labels"
|
|
|
|
_, err = c.Pods(ns).Create(&api.Pod{
|
|
|
|
TypeMeta: unversioned.TypeMeta{
|
|
|
|
Kind: "Pod",
|
|
|
|
},
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Name: labelPodName,
|
|
|
|
},
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{
|
|
|
|
Name: labelPodName,
|
2015-10-06 00:23:17 +00:00
|
|
|
Image: "beta.gcr.io/google_containers/pause:2.0",
|
2015-09-11 20:48:37 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
NodeSelector: map[string]string{
|
|
|
|
"kubernetes.io/hostname": nodeName,
|
|
|
|
k: v,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
expectNoError(err)
|
|
|
|
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
|
|
|
|
expectNoError(waitForPodRunningInNamespace(c, labelPodName, ns))
|
|
|
|
labelPod, err := c.Pods(ns).Get(labelPodName)
|
|
|
|
expectNoError(err)
|
|
|
|
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
|
|
|
})
|
2015-07-13 14:27:35 +00:00
|
|
|
})
|