2015-08-27 17:18:06 +00:00
|
|
|
/*
|
2016-06-03 00:25:58 +00:00
|
|
|
Copyright 2015 The Kubernetes Authors.
|
2015-08-27 17:18:06 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package e2e
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-09-24 12:56:28 +00:00
|
|
|
"reflect"
|
|
|
|
"strings"
|
2015-08-27 17:18:06 +00:00
|
|
|
"time"
|
|
|
|
|
2017-01-13 17:48:50 +00:00
|
|
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
2017-01-11 14:09:48 +00:00
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
"k8s.io/apimachinery/pkg/labels"
|
|
|
|
"k8s.io/apimachinery/pkg/runtime"
|
2017-03-10 23:23:40 +00:00
|
|
|
"k8s.io/apimachinery/pkg/types"
|
2017-01-11 14:09:48 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
2015-08-27 17:18:06 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api"
|
2016-11-18 20:55:17 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api/v1"
|
2017-04-17 17:56:40 +00:00
|
|
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
2016-11-18 20:55:17 +00:00
|
|
|
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
|
|
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
2017-01-10 08:49:34 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
2017-01-23 23:17:26 +00:00
|
|
|
"k8s.io/kubernetes/pkg/controller"
|
2017-03-02 00:04:54 +00:00
|
|
|
"k8s.io/kubernetes/pkg/controller/daemon"
|
2015-08-27 17:18:21 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubectl"
|
2017-03-02 00:04:54 +00:00
|
|
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
2016-04-07 17:21:31 +00:00
|
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
2015-08-27 17:18:06 +00:00
|
|
|
|
|
|
|
. "github.com/onsi/ginkgo"
|
|
|
|
. "github.com/onsi/gomega"
|
|
|
|
)
|
|
|
|
|
2015-09-17 20:42:17 +00:00
|
|
|
const (
|
2015-10-01 23:51:41 +00:00
|
|
|
// this should not be a multiple of 5, because node status updates
|
|
|
|
// every 5 seconds. See https://github.com/kubernetes/kubernetes/pull/14915.
|
2017-03-23 00:25:52 +00:00
|
|
|
dsRetryPeriod = 1 * time.Second
|
2016-01-25 20:48:22 +00:00
|
|
|
dsRetryTimeout = 5 * time.Minute
|
2015-10-05 19:36:11 +00:00
|
|
|
|
2015-09-24 12:56:28 +00:00
|
|
|
daemonsetLabelPrefix = "daemonset-"
|
|
|
|
daemonsetNameLabel = daemonsetLabelPrefix + "name"
|
|
|
|
daemonsetColorLabel = daemonsetLabelPrefix + "color"
|
2015-09-17 20:42:17 +00:00
|
|
|
)
|
|
|
|
|
2016-02-29 22:45:04 +00:00
|
|
|
// This test must be run in serial because it assumes the Daemon Set pods will
|
|
|
|
// always get scheduled. If we run other tests in parallel, this may not
|
|
|
|
// happen. In the future, running in parallel may work if we have an eviction
|
|
|
|
// model which lets the DS controller kick out other pods to make room.
|
|
|
|
// See http://issues.k8s.io/21767 for more details
|
2016-04-07 17:21:31 +00:00
|
|
|
var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
|
|
|
|
var f *framework.Framework
|
2015-10-27 13:07:51 +00:00
|
|
|
|
|
|
|
AfterEach(func() {
|
2017-01-23 23:17:26 +00:00
|
|
|
// Clean up
|
|
|
|
daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
|
|
|
|
Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets")
|
|
|
|
if daemonsets != nil && len(daemonsets.Items) > 0 {
|
|
|
|
for _, ds := range daemonsets.Items {
|
|
|
|
By(fmt.Sprintf("Deleting DaemonSet %q with reaper", ds.Name))
|
|
|
|
dsReaper, err := kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), f.InternalClientset)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
err = dsReaper.Stop(f.Namespace.Name, ds.Name, 0, nil)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
|
2017-01-23 23:17:26 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
|
|
|
|
}
|
|
|
|
}
|
2017-01-22 03:36:02 +00:00
|
|
|
if daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
2017-01-12 18:17:43 +00:00
|
|
|
framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(api.Registry.EnabledVersions()...), daemonsets))
|
2016-01-25 20:48:22 +00:00
|
|
|
} else {
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.Logf("unable to dump daemonsets: %v", err)
|
2016-01-25 20:48:22 +00:00
|
|
|
}
|
2017-01-22 03:36:02 +00:00
|
|
|
if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
2017-01-12 18:17:43 +00:00
|
|
|
framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(api.Registry.EnabledVersions()...), pods))
|
2016-01-25 20:48:22 +00:00
|
|
|
} else {
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.Logf("unable to dump pods: %v", err)
|
2016-01-25 20:48:22 +00:00
|
|
|
}
|
2017-01-23 23:17:26 +00:00
|
|
|
err = clearDaemonSetNodeLabels(f.ClientSet)
|
2015-10-27 13:07:51 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
})
|
|
|
|
|
2016-04-07 17:21:31 +00:00
|
|
|
f = framework.NewDefaultFramework("daemonsets")
|
2015-08-27 17:18:06 +00:00
|
|
|
|
2017-04-24 07:08:39 +00:00
|
|
|
image := framework.ServeHostnameImage
|
2015-10-05 19:36:11 +00:00
|
|
|
dsName := "daemon-set"
|
|
|
|
|
|
|
|
var ns string
|
2016-10-18 13:00:38 +00:00
|
|
|
var c clientset.Interface
|
2015-10-05 19:36:11 +00:00
|
|
|
|
2015-08-27 17:18:06 +00:00
|
|
|
BeforeEach(func() {
|
2015-10-05 19:36:11 +00:00
|
|
|
ns = f.Namespace.Name
|
2016-10-18 13:00:38 +00:00
|
|
|
|
|
|
|
c = f.ClientSet
|
|
|
|
err := clearDaemonSetNodeLabels(c)
|
2015-08-27 17:18:06 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
})
|
|
|
|
|
2015-10-05 19:36:11 +00:00
|
|
|
It("should run and stop simple daemon", func() {
|
|
|
|
label := map[string]string{daemonsetNameLabel: dsName}
|
|
|
|
|
2017-01-23 23:17:26 +00:00
|
|
|
By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
2017-03-02 00:04:54 +00:00
|
|
|
ds, err := c.Extensions().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
2015-10-05 19:36:11 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
By("Check that daemon pods launch on every node of the cluster.")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
2015-10-05 19:36:11 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
2016-10-05 07:16:41 +00:00
|
|
|
err = checkDaemonStatus(f, dsName)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2015-10-05 19:36:11 +00:00
|
|
|
|
|
|
|
By("Stop a daemon pod, check that the daemon pod is revived.")
|
2017-01-23 23:17:26 +00:00
|
|
|
podList := listDaemonPods(c, ns, label)
|
2015-10-05 19:36:11 +00:00
|
|
|
pod := podList.Items[0]
|
2017-01-23 23:17:26 +00:00
|
|
|
err = c.Core().Pods(ns).Delete(pod.Name, nil)
|
2015-10-05 19:36:11 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
2015-10-05 19:36:11 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
|
|
|
|
})
|
|
|
|
|
2015-12-22 21:02:17 +00:00
|
|
|
It("should run and stop complex daemon", func() {
|
2015-10-05 19:36:11 +00:00
|
|
|
complexLabel := map[string]string{daemonsetNameLabel: dsName}
|
|
|
|
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
2017-01-23 23:17:26 +00:00
|
|
|
framework.Logf("Creating daemon %q with a node selector", dsName)
|
|
|
|
ds := newDaemonSet(dsName, image, complexLabel)
|
|
|
|
ds.Spec.Template.Spec.NodeSelector = nodeSelector
|
2017-03-23 00:25:52 +00:00
|
|
|
ds, err := c.Extensions().DaemonSets(ns).Create(ds)
|
2015-10-05 19:36:11 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
By("Initially, daemon pods should not be running on any nodes.")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
2015-10-05 19:36:11 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
|
|
|
|
2017-03-21 00:17:42 +00:00
|
|
|
By("Change node label to blue, check that daemon pod is launched.")
|
2016-10-19 13:55:39 +00:00
|
|
|
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
2015-10-05 19:36:11 +00:00
|
|
|
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
|
|
|
|
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
|
|
|
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
|
|
|
|
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
|
|
|
Expect(len(daemonSetLabels)).To(Equal(1))
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
2016-07-01 17:02:51 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
2016-10-05 07:16:41 +00:00
|
|
|
err = checkDaemonStatus(f, dsName)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2016-07-01 17:02:51 +00:00
|
|
|
|
2017-03-21 00:17:42 +00:00
|
|
|
By("Update the node label to green, and wait for daemons to be unscheduled")
|
|
|
|
nodeSelector[daemonsetColorLabel] = "green"
|
|
|
|
greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
2016-07-01 17:02:51 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
|
2017-03-23 00:25:52 +00:00
|
|
|
Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
2016-07-01 17:02:51 +00:00
|
|
|
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
2017-03-21 00:17:42 +00:00
|
|
|
|
|
|
|
By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
|
|
|
|
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
|
|
|
|
daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel])
|
|
|
|
ds, err = c.Extensions().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
|
|
|
Expect(err).NotTo(HaveOccurred(), "error patching daemon set")
|
|
|
|
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
|
|
|
|
Expect(len(daemonSetLabels)).To(Equal(1))
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
|
2017-03-21 00:17:42 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
|
|
|
err = checkDaemonStatus(f, dsName)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2016-07-01 17:02:51 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
It("should run and stop complex daemon with node affinity", func() {
|
2016-11-18 19:21:23 +00:00
|
|
|
complexLabel := map[string]string{daemonsetNameLabel: dsName}
|
|
|
|
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
2017-01-23 23:17:26 +00:00
|
|
|
framework.Logf("Creating daemon %q with a node affinity", dsName)
|
|
|
|
ds := newDaemonSet(dsName, image, complexLabel)
|
|
|
|
ds.Spec.Template.Spec.Affinity = &v1.Affinity{
|
2016-11-30 16:51:12 +00:00
|
|
|
NodeAffinity: &v1.NodeAffinity{
|
|
|
|
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
|
|
|
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
|
|
|
{
|
|
|
|
MatchExpressions: []v1.NodeSelectorRequirement{
|
|
|
|
{
|
|
|
|
Key: daemonsetColorLabel,
|
|
|
|
Operator: v1.NodeSelectorOpIn,
|
|
|
|
Values: []string{nodeSelector[daemonsetColorLabel]},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-11-18 19:21:23 +00:00
|
|
|
}
|
2017-03-23 00:25:52 +00:00
|
|
|
ds, err := c.Extensions().DaemonSets(ns).Create(ds)
|
2016-11-18 19:21:23 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
By("Initially, daemon pods should not be running on any nodes.")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
2016-11-18 19:21:23 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
|
|
|
|
2017-03-21 00:17:42 +00:00
|
|
|
By("Change node label to blue, check that daemon pod is launched.")
|
2016-11-18 19:21:23 +00:00
|
|
|
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
|
|
|
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
|
|
|
|
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
|
|
|
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
|
|
|
|
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
|
|
|
Expect(len(daemonSetLabels)).To(Equal(1))
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
2016-11-18 19:21:23 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
|
|
|
err = checkDaemonStatus(f, dsName)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2017-03-21 00:17:42 +00:00
|
|
|
By("Remove the node label and wait for daemons to be unscheduled")
|
2016-11-18 19:21:23 +00:00
|
|
|
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
|
|
|
|
Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
|
2017-03-23 00:25:52 +00:00
|
|
|
Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
2016-11-18 19:21:23 +00:00
|
|
|
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
2017-01-23 23:17:26 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
It("should retry creating failed daemon pods", func() {
|
|
|
|
label := map[string]string{daemonsetNameLabel: dsName}
|
2016-11-18 19:21:23 +00:00
|
|
|
|
2017-01-23 23:17:26 +00:00
|
|
|
By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
|
2017-03-02 00:04:54 +00:00
|
|
|
ds, err := c.Extensions().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
2017-01-23 23:17:26 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
By("Check that daemon pods launch on every node of the cluster.")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
2017-01-23 23:17:26 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
|
|
|
err = checkDaemonStatus(f, dsName)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2016-11-18 19:21:23 +00:00
|
|
|
|
2017-01-23 23:17:26 +00:00
|
|
|
By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
|
|
|
|
podList := listDaemonPods(c, ns, label)
|
|
|
|
pod := podList.Items[0]
|
|
|
|
pod.ResourceVersion = ""
|
|
|
|
pod.Status.Phase = v1.PodFailed
|
|
|
|
_, err = c.Core().Pods(ns).UpdateStatus(&pod)
|
|
|
|
Expect(err).NotTo(HaveOccurred(), "error failing a daemon pod")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
2017-01-23 23:17:26 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
|
2015-08-27 17:18:06 +00:00
|
|
|
})
|
2017-02-16 10:18:16 +00:00
|
|
|
|
2017-02-28 01:52:24 +00:00
|
|
|
It("Should not update pod when spec was updated and update strategy is OnDelete", func() {
|
2017-02-16 10:18:16 +00:00
|
|
|
label := map[string]string{daemonsetNameLabel: dsName}
|
|
|
|
|
|
|
|
framework.Logf("Creating simple daemon set %s", dsName)
|
2017-03-02 00:04:54 +00:00
|
|
|
ds, err := c.Extensions().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
2017-02-16 10:18:16 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2017-02-28 01:52:24 +00:00
|
|
|
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(1)))
|
2017-02-16 10:18:16 +00:00
|
|
|
|
|
|
|
By("Check that daemon pods launch on every node of the cluster.")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
2017-02-16 10:18:16 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
|
|
|
|
2017-02-28 01:52:24 +00:00
|
|
|
By("Make sure all daemon pods have correct template generation 1")
|
|
|
|
err = checkDaemonPodsTemplateGeneration(c, ns, label, "1")
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2017-02-16 10:18:16 +00:00
|
|
|
By("Update daemon pods image.")
|
2017-03-17 18:20:53 +00:00
|
|
|
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, redisImage)
|
|
|
|
ds, err = c.Extensions().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
2017-02-16 10:18:16 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2017-02-28 01:52:24 +00:00
|
|
|
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(2)))
|
2017-02-16 10:18:16 +00:00
|
|
|
|
2017-02-28 01:52:24 +00:00
|
|
|
By("Check that daemon pods images aren't updated.")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
|
2017-02-16 10:18:16 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2017-02-28 01:52:24 +00:00
|
|
|
By("Make sure all daemon pods have correct template generation 1")
|
|
|
|
err = checkDaemonPodsTemplateGeneration(c, ns, label, "1")
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2017-02-16 10:18:16 +00:00
|
|
|
By("Check that daemon pods are still running on every node of the cluster.")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
2017-02-16 10:18:16 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
|
|
|
})
|
|
|
|
|
|
|
|
It("Should update pod when spec was updated and update strategy is RollingUpdate", func() {
|
|
|
|
label := map[string]string{daemonsetNameLabel: dsName}
|
|
|
|
|
2017-02-28 01:52:24 +00:00
|
|
|
templateGeneration := int64(999)
|
|
|
|
framework.Logf("Creating simple daemon set %s with templateGeneration %d", dsName, templateGeneration)
|
|
|
|
ds := newDaemonSet(dsName, image, label)
|
|
|
|
ds.Spec.TemplateGeneration = templateGeneration
|
2017-03-17 18:20:53 +00:00
|
|
|
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
2017-02-28 01:52:24 +00:00
|
|
|
ds, err := c.Extensions().DaemonSets(ns).Create(ds)
|
2017-02-16 10:18:16 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2017-02-28 01:52:24 +00:00
|
|
|
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
2017-02-16 10:18:16 +00:00
|
|
|
|
|
|
|
By("Check that daemon pods launch on every node of the cluster.")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
2017-02-16 10:18:16 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
|
|
|
|
2017-02-28 01:52:24 +00:00
|
|
|
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
|
|
|
|
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2017-02-16 10:18:16 +00:00
|
|
|
By("Update daemon pods image.")
|
2017-03-17 18:20:53 +00:00
|
|
|
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, redisImage)
|
|
|
|
ds, err = c.Extensions().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
2017-02-16 10:18:16 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2017-02-28 01:52:24 +00:00
|
|
|
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration + 1))
|
2017-02-16 10:18:16 +00:00
|
|
|
|
2017-02-28 01:52:24 +00:00
|
|
|
By("Check that daemon pods images are updated.")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, redisImage, 1))
|
2017-02-16 10:18:16 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2017-02-28 01:52:24 +00:00
|
|
|
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration+1))
|
|
|
|
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration+1))
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2017-02-16 10:18:16 +00:00
|
|
|
By("Check that daemon pods are still running on every node of the cluster.")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
2017-02-16 10:18:16 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
|
|
|
})
|
|
|
|
|
2017-02-28 01:52:24 +00:00
|
|
|
It("Should adopt or recreate existing pods when creating a RollingUpdate DaemonSet with matching or mismatching templateGeneration", func() {
|
|
|
|
label := map[string]string{daemonsetNameLabel: dsName}
|
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
// 1. Create a RollingUpdate DaemonSet
|
2017-02-28 01:52:24 +00:00
|
|
|
templateGeneration := int64(999)
|
2017-03-10 23:23:40 +00:00
|
|
|
framework.Logf("Creating simple RollingUpdate DaemonSet %s with templateGeneration %d", dsName, templateGeneration)
|
2017-02-28 01:52:24 +00:00
|
|
|
ds := newDaemonSet(dsName, image, label)
|
|
|
|
ds.Spec.TemplateGeneration = templateGeneration
|
|
|
|
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
|
|
|
ds, err := c.Extensions().DaemonSets(ns).Create(ds)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
|
|
|
|
|
|
|
By("Check that daemon pods launch on every node of the cluster.")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
2017-02-28 01:52:24 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
|
|
|
|
|
|
|
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
|
|
|
|
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
// 2. Orphan DaemonSet pods
|
2017-02-28 01:52:24 +00:00
|
|
|
By(fmt.Sprintf("Deleting DaemonSet %s and orphaning its pods", dsName))
|
2017-03-10 23:23:40 +00:00
|
|
|
err = orphanDaemonSetPods(c, ds)
|
2017-02-28 01:52:24 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ns, label))
|
2017-03-10 23:23:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(f, ns, ds.Name))
|
2017-02-28 01:52:24 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
|
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
// 3. Adopt DaemonSet pods (no restart)
|
|
|
|
newDSName := "adopt"
|
2017-02-28 01:52:24 +00:00
|
|
|
By(fmt.Sprintf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newDSName))
|
|
|
|
newDS := newDaemonSet(newDSName, image, label)
|
|
|
|
newDS.Spec.TemplateGeneration = templateGeneration
|
|
|
|
newDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
|
|
|
newDS, err = c.Extensions().DaemonSets(ns).Create(newDS)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
Expect(newDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
By(fmt.Sprintf("Wait for all pods to be adopted by DaemonSet %s", newDSName))
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsAdopted(c, ns, newDS.UID, label))
|
2017-03-10 23:23:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
|
|
|
|
|
2017-02-28 01:52:24 +00:00
|
|
|
By(fmt.Sprintf("Make sure no daemon pod updated its template generation %d", templateGeneration))
|
|
|
|
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
By("Make sure no pods are recreated by looking at their names")
|
|
|
|
err = checkDaemonSetPodsName(c, ns, dsName, label)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2017-02-28 01:52:24 +00:00
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
// 4. Orphan DaemonSet pods again
|
2017-02-28 01:52:24 +00:00
|
|
|
By(fmt.Sprintf("Deleting DaemonSet %s and orphaning its pods", newDSName))
|
2017-03-10 23:23:40 +00:00
|
|
|
err = orphanDaemonSetPods(c, newDS)
|
2017-02-28 01:52:24 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ns, label))
|
2017-03-10 23:23:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(f, ns, newDSName))
|
2017-02-28 01:52:24 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
|
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
// 4. Adopt DaemonSet pods (should kill and restart those pods)
|
|
|
|
newRestartDSName := "restart"
|
2017-02-28 01:52:24 +00:00
|
|
|
By(fmt.Sprintf("Creating a new RollingUpdate DaemonSet %s to restart adopted pods", newRestartDSName))
|
|
|
|
newRestartDS := newDaemonSet(newRestartDSName, image, label)
|
|
|
|
newRestartDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
|
|
|
newRestartDS, err = c.Extensions().DaemonSets(ns).Create(newRestartDS)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
Expect(newRestartDS.Spec.TemplateGeneration).To(Equal(int64(1)))
|
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
By("Wait for restarted DaemonSet pods launch on every node of the cluster.")
|
2017-03-23 00:25:52 +00:00
|
|
|
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsNameMatch(c, ns, newRestartDSName, label))
|
2017-03-10 23:23:40 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to restart")
|
2017-02-28 01:52:24 +00:00
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
By("Make sure restarted DaemonSet pods have correct template generation 1")
|
|
|
|
err = checkDaemonPodsTemplateGeneration(c, ns, label, "1")
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
2017-02-28 01:52:24 +00:00
|
|
|
})
|
2015-08-27 17:18:06 +00:00
|
|
|
})
|
|
|
|
|
2017-03-17 18:20:53 +00:00
|
|
|
// getDaemonSetImagePatch generates a patch for updating a DaemonSet's container image
|
|
|
|
func getDaemonSetImagePatch(containerName, containerImage string) string {
|
|
|
|
return fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, containerName, containerImage)
|
|
|
|
}
|
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
func orphanDaemonSetPods(c clientset.Interface, ds *extensions.DaemonSet) error {
|
|
|
|
trueVar := true
|
|
|
|
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
|
|
|
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
|
|
|
|
return c.Extensions().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
|
|
|
|
}
|
|
|
|
|
2017-01-23 23:17:26 +00:00
|
|
|
func newDaemonSet(dsName, image string, label map[string]string) *extensions.DaemonSet {
|
|
|
|
return &extensions.DaemonSet{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: dsName,
|
|
|
|
},
|
|
|
|
Spec: extensions.DaemonSetSpec{
|
|
|
|
Template: v1.PodTemplateSpec{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Labels: label,
|
|
|
|
},
|
|
|
|
Spec: v1.PodSpec{
|
|
|
|
Containers: []v1.Container{
|
|
|
|
{
|
|
|
|
Name: dsName,
|
|
|
|
Image: image,
|
|
|
|
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *v1.PodList {
|
|
|
|
selector := labels.Set(label).AsSelector()
|
|
|
|
options := metav1.ListOptions{LabelSelector: selector.String()}
|
|
|
|
podList, err := c.Core().Pods(ns).List(options)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
Expect(len(podList.Items)).To(BeNumerically(">", 0))
|
|
|
|
return podList
|
|
|
|
}
|
|
|
|
|
2015-09-24 12:56:28 +00:00
|
|
|
func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, map[string]string) {
|
|
|
|
daemonSetLabels := map[string]string{}
|
|
|
|
otherLabels := map[string]string{}
|
|
|
|
for k, v := range labels {
|
|
|
|
if strings.HasPrefix(k, daemonsetLabelPrefix) {
|
|
|
|
daemonSetLabels[k] = v
|
|
|
|
} else {
|
|
|
|
otherLabels[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return daemonSetLabels, otherLabels
|
|
|
|
}
|
|
|
|
|
2016-10-18 13:00:38 +00:00
|
|
|
func clearDaemonSetNodeLabels(c clientset.Interface) error {
|
|
|
|
nodeList := framework.GetReadySchedulableNodesOrDie(c)
|
2015-08-27 17:18:06 +00:00
|
|
|
for _, node := range nodeList.Items {
|
2015-09-24 12:56:28 +00:00
|
|
|
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-08-27 17:18:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-11-18 20:55:17 +00:00
|
|
|
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
|
2016-10-18 13:00:38 +00:00
|
|
|
nodeClient := c.Core().Nodes()
|
2016-11-18 20:55:17 +00:00
|
|
|
var newNode *v1.Node
|
2015-09-24 12:56:28 +00:00
|
|
|
var newLabels map[string]string
|
2017-03-23 00:25:52 +00:00
|
|
|
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, func() (bool, error) {
|
2016-12-07 14:40:26 +00:00
|
|
|
node, err := nodeClient.Get(nodeName, metav1.GetOptions{})
|
2015-09-24 12:56:28 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove all labels this test is creating
|
|
|
|
daemonSetLabels, otherLabels := separateDaemonSetNodeLabels(node.Labels)
|
|
|
|
if reflect.DeepEqual(daemonSetLabels, labels) {
|
|
|
|
newNode = node
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
node.Labels = otherLabels
|
|
|
|
for k, v := range labels {
|
|
|
|
node.Labels[k] = v
|
|
|
|
}
|
|
|
|
newNode, err = nodeClient.Update(node)
|
|
|
|
if err == nil {
|
|
|
|
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
|
|
|
|
return true, err
|
|
|
|
}
|
2016-12-03 18:57:26 +00:00
|
|
|
if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.Logf("failed to update node due to resource version conflict")
|
2015-09-24 12:56:28 +00:00
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
return false, err
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
} else if len(newLabels) != len(labels) {
|
|
|
|
return nil, fmt.Errorf("Could not set daemon set test labels as expected.")
|
|
|
|
}
|
|
|
|
|
|
|
|
return newNode, nil
|
|
|
|
}
|
|
|
|
|
2017-03-23 00:25:52 +00:00
|
|
|
func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nodeNames []string) func() (bool, error) {
|
2015-08-27 17:18:06 +00:00
|
|
|
return func() (bool, error) {
|
2017-03-23 00:25:52 +00:00
|
|
|
podList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
2015-08-27 17:18:06 +00:00
|
|
|
if err != nil {
|
2017-03-23 00:25:52 +00:00
|
|
|
framework.Logf("could not get the pod list: %v", err)
|
2015-08-27 17:18:06 +00:00
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
pods := podList.Items
|
|
|
|
|
|
|
|
nodesToPodCount := make(map[string]int)
|
|
|
|
for _, pod := range pods {
|
2017-03-23 00:25:52 +00:00
|
|
|
if controllerRef := controller.GetControllerOf(&pod); controllerRef == nil || controllerRef.UID != ds.UID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if pod.DeletionTimestamp != nil {
|
|
|
|
continue
|
|
|
|
}
|
2017-04-17 17:56:40 +00:00
|
|
|
if podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
2017-01-23 23:17:26 +00:00
|
|
|
nodesToPodCount[pod.Spec.NodeName] += 1
|
|
|
|
}
|
2015-08-27 17:18:06 +00:00
|
|
|
}
|
2017-03-23 00:25:52 +00:00
|
|
|
framework.Logf("Number of nodes with available pods: %d", len(nodesToPodCount))
|
2015-08-27 17:18:06 +00:00
|
|
|
|
|
|
|
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
|
|
|
|
for _, nodeName := range nodeNames {
|
|
|
|
if nodesToPodCount[nodeName] != 1 {
|
2017-03-23 00:25:52 +00:00
|
|
|
framework.Logf("Node %s is running more than one daemon pod", nodeName)
|
2015-08-27 17:18:06 +00:00
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-23 00:25:52 +00:00
|
|
|
framework.Logf("Number of running nodes: %d, number of available pods: %d", len(nodeNames), len(nodesToPodCount))
|
2015-08-27 17:18:06 +00:00
|
|
|
// Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
|
|
|
|
// nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
|
|
|
|
// other nodes.
|
|
|
|
return len(nodesToPodCount) == len(nodeNames), nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-23 00:25:52 +00:00
|
|
|
func checkRunningOnAllNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) {
|
2015-08-27 17:18:06 +00:00
|
|
|
return func() (bool, error) {
|
2017-01-22 03:36:02 +00:00
|
|
|
nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
|
2016-04-07 17:21:31 +00:00
|
|
|
framework.ExpectNoError(err)
|
2015-08-27 17:18:06 +00:00
|
|
|
nodeNames := make([]string, 0)
|
|
|
|
for _, node := range nodeList.Items {
|
2017-03-02 00:04:54 +00:00
|
|
|
if !canScheduleOnNode(node, ds) {
|
2017-03-10 23:23:40 +00:00
|
|
|
framework.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints)
|
2017-03-02 00:04:54 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-08-27 17:18:06 +00:00
|
|
|
nodeNames = append(nodeNames, node.Name)
|
|
|
|
}
|
2017-03-23 00:25:52 +00:00
|
|
|
return checkDaemonPodOnNodes(f, ds, nodeNames)()
|
2015-08-27 17:18:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-02 00:04:54 +00:00
|
|
|
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
|
|
|
|
func canScheduleOnNode(node v1.Node, ds *extensions.DaemonSet) bool {
|
|
|
|
newPod := daemon.NewPod(ds, node.Name)
|
|
|
|
nodeInfo := schedulercache.NewNodeInfo()
|
|
|
|
nodeInfo.SetNode(&node)
|
|
|
|
fit, _, err := daemon.Predicates(newPod, nodeInfo)
|
|
|
|
if err != nil {
|
|
|
|
framework.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return fit
|
|
|
|
}
|
|
|
|
|
2017-03-23 00:25:52 +00:00
|
|
|
func checkRunningOnNoNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) {
|
|
|
|
return checkDaemonPodOnNodes(f, ds, make([]string, 0))
|
2015-08-27 17:18:06 +00:00
|
|
|
}
|
2016-10-05 07:16:41 +00:00
|
|
|
|
|
|
|
func checkDaemonStatus(f *framework.Framework, dsName string) error {
|
2016-12-07 14:40:26 +00:00
|
|
|
ds, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
|
2016-10-05 07:16:41 +00:00
|
|
|
if err != nil {
|
2016-11-18 20:55:17 +00:00
|
|
|
return fmt.Errorf("Could not get daemon set from v1.")
|
2016-10-05 07:16:41 +00:00
|
|
|
}
|
|
|
|
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
|
|
|
|
if desired != scheduled && desired != ready {
|
|
|
|
return fmt.Errorf("Error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-02-16 10:18:16 +00:00
|
|
|
|
2017-03-23 00:25:52 +00:00
|
|
|
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *extensions.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
|
2017-02-16 10:18:16 +00:00
|
|
|
return func() (bool, error) {
|
2017-03-23 00:25:52 +00:00
|
|
|
podList, err := c.Core().Pods(ds.Namespace).List(metav1.ListOptions{})
|
2017-02-16 10:18:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
pods := podList.Items
|
|
|
|
|
2017-03-23 00:25:52 +00:00
|
|
|
unavailablePods := 0
|
|
|
|
allImagesUpdated := true
|
2017-02-16 10:18:16 +00:00
|
|
|
for _, pod := range pods {
|
2017-03-23 00:25:52 +00:00
|
|
|
if controllerRef := controller.GetControllerOf(&pod); controllerRef == nil || controllerRef.UID != ds.UID {
|
|
|
|
continue
|
|
|
|
}
|
2017-02-16 10:18:16 +00:00
|
|
|
podImage := pod.Spec.Containers[0].Image
|
2017-03-23 00:25:52 +00:00
|
|
|
if podImage != image {
|
|
|
|
allImagesUpdated = false
|
|
|
|
framework.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage)
|
|
|
|
}
|
2017-04-17 17:56:40 +00:00
|
|
|
if !podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
2017-03-23 00:25:52 +00:00
|
|
|
framework.Logf("Pod %s is not available", pod.Name)
|
|
|
|
unavailablePods++
|
2017-02-16 10:18:16 +00:00
|
|
|
}
|
|
|
|
}
|
2017-03-23 00:25:52 +00:00
|
|
|
if unavailablePods > maxUnavailable {
|
|
|
|
return false, fmt.Errorf("number of unavailable pods: %d is greater than maxUnavailable: %d", unavailablePods, maxUnavailable)
|
|
|
|
}
|
|
|
|
if !allImagesUpdated {
|
|
|
|
return false, nil
|
|
|
|
}
|
2017-02-16 10:18:16 +00:00
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
}
|
2017-02-28 01:52:24 +00:00
|
|
|
|
|
|
|
func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label map[string]string, templateGeneration string) error {
|
|
|
|
pods := listDaemonPods(c, ns, label)
|
|
|
|
for _, pod := range pods.Items {
|
2017-03-10 23:23:40 +00:00
|
|
|
// We don't care about inactive pods
|
|
|
|
if !controller.IsPodActive(&pod) {
|
|
|
|
continue
|
|
|
|
}
|
2017-02-28 01:52:24 +00:00
|
|
|
podTemplateGeneration := pod.Labels[extensions.DaemonSetTemplateGenerationKey]
|
|
|
|
if podTemplateGeneration != templateGeneration {
|
2017-03-10 23:23:40 +00:00
|
|
|
return fmt.Errorf("expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration)
|
2017-02-28 01:52:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func checkDaemonSetDeleted(f *framework.Framework, ns, name string) func() (bool, error) {
|
|
|
|
return func() (bool, error) {
|
|
|
|
_, err := f.ClientSet.Extensions().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
|
|
|
if !apierrs.IsNotFound(err) {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
func checkDaemonSetPodsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
|
|
|
|
return func() (bool, error) {
|
|
|
|
pods := listDaemonPods(c, ns, label)
|
|
|
|
for _, pod := range pods.Items {
|
|
|
|
// This pod is orphaned only when controller ref is cleared
|
|
|
|
if controllerRef := controller.GetControllerOf(&pod); controllerRef != nil {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
2017-02-28 01:52:24 +00:00
|
|
|
}
|
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
func checkDaemonSetPodsAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
|
|
|
|
return func() (bool, error) {
|
|
|
|
pods := listDaemonPods(c, ns, label)
|
|
|
|
for _, pod := range pods.Items {
|
|
|
|
// This pod is adopted only when its controller ref is update
|
|
|
|
if controllerRef := controller.GetControllerOf(&pod); controllerRef == nil || controllerRef.UID != dsUID {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
2017-02-28 01:52:24 +00:00
|
|
|
}
|
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
func checkDaemonSetPodsNameMatch(c clientset.Interface, ns, prefix string, label map[string]string) func() (bool, error) {
|
|
|
|
return func() (bool, error) {
|
|
|
|
if err := checkDaemonSetPodsName(c, ns, prefix, label); err != nil {
|
|
|
|
framework.Logf("%v", err)
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
return true, nil
|
2017-02-28 01:52:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-10 23:23:40 +00:00
|
|
|
func checkDaemonSetPodsName(c clientset.Interface, ns, prefix string, label map[string]string) error {
|
|
|
|
pods := listDaemonPods(c, ns, label)
|
|
|
|
for _, pod := range pods.Items {
|
|
|
|
if !strings.HasPrefix(pod.Name, prefix) {
|
|
|
|
return fmt.Errorf("expected pod %s name to be prefixed %q", pod.Name, prefix)
|
|
|
|
}
|
2017-02-28 01:52:24 +00:00
|
|
|
}
|
2017-03-10 23:23:40 +00:00
|
|
|
return nil
|
2017-02-28 01:52:24 +00:00
|
|
|
}
|