diff --git a/pkg/controller/daemon/daemoncontroller.go b/pkg/controller/daemon/daemoncontroller.go index f51681246b..6054b15e38 100644 --- a/pkg/controller/daemon/daemoncontroller.go +++ b/pkg/controller/daemon/daemoncontroller.go @@ -492,6 +492,10 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) // Group Pods by Node name. nodeToDaemonPods := make(map[string][]*v1.Pod) for _, pod := range claimedPods { + // Skip terminating pods + if pod.DeletionTimestamp != nil { + continue + } nodeName := pod.Spec.NodeName nodeToDaemonPods[nodeName] = append(nodeToDaemonPods[nodeName], pod) } @@ -553,10 +557,6 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error { var daemonPodsRunning []*v1.Pod for i := range daemonPods { pod := daemonPods[i] - // Skip terminating pods. We won't delete them again or count them as running daemon pods. - if pod.DeletionTimestamp != nil { - continue - } if pod.Status.Phase == v1.PodFailed { msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, node.Name, pod.Name) glog.V(2).Infof(msg) diff --git a/pkg/controller/daemon/daemoncontroller_test.go b/pkg/controller/daemon/daemoncontroller_test.go index 9d1c94039e..d7ee241d44 100644 --- a/pkg/controller/daemon/daemoncontroller_test.go +++ b/pkg/controller/daemon/daemoncontroller_test.go @@ -333,11 +333,15 @@ func markPodsReady(store cache.Store) { // mark pods as ready for _, obj := range store.List() { pod := obj.(*v1.Pod) - condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} - podutil.UpdatePodCondition(&pod.Status, &condition) + markPodReady(pod) } } +func markPodReady(pod *v1.Pod) { + condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} + podutil.UpdatePodCondition(&pod.Status, &condition) +} + // DaemonSets without node selectors should launch pods on every node. func TestSimpleDaemonSetLaunchesPods(t *testing.T) { ds := newDaemonSet("foo") @@ -1263,6 +1267,12 @@ func TestGetNodesToDaemonPods(t *testing.T) { newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds), newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil), newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2), + func() *v1.Pod { + pod := newPod("matching-owned-2-but-set-for-deletion", "node-2", simpleDaemonSetLabel, ds) + now := metav1.Now() + pod.DeletionTimestamp = &now + return pod + }(), } for _, pod := range ignoredPods { manager.podStore.Add(pod) diff --git a/pkg/controller/daemon/update.go b/pkg/controller/daemon/update.go index c8322fae07..3327cad48c 100644 --- a/pkg/controller/daemon/update.go +++ b/pkg/controller/daemon/update.go @@ -126,5 +126,6 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet, if err != nil { return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err) } + glog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable) return maxUnavailable, numUnavailable, nil } diff --git a/pkg/controller/daemon/update_test.go b/pkg/controller/daemon/update_test.go index 529c2c81c3..9561f05d44 100644 --- a/pkg/controller/daemon/update_test.go +++ b/pkg/controller/daemon/update_test.go @@ -20,6 +20,7 @@ import ( "testing" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" ) @@ -133,3 +134,121 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) { syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) clearExpectations(t, manager, ds, podControl) } + +func TestGetUnavailableNumbers(t *testing.T) { + cases := []struct { + name string + Manager *daemonSetsController + ds *extensions.DaemonSet + nodeToPods map[string][]*v1.Pod + maxUnavailable int + numUnavailable int + Err error + }{ + { + name: "No nodes", + Manager: func() *daemonSetsController { + manager, _, _ := newTestController() + return manager + }(), + ds: func() *extensions.DaemonSet { + ds := newDaemonSet("x") + intStr := intstr.FromInt(0) + ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} + return ds + }(), + nodeToPods: make(map[string][]*v1.Pod), + maxUnavailable: 0, + numUnavailable: 0, + }, + { + name: "Two nodes with ready pods", + Manager: func() *daemonSetsController { + manager, _, _ := newTestController() + addNodes(manager.nodeStore, 0, 2, nil) + return manager + }(), + ds: func() *extensions.DaemonSet { + ds := newDaemonSet("x") + intStr := intstr.FromInt(1) + ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} + return ds + }(), + nodeToPods: func() map[string][]*v1.Pod { + mapping := make(map[string][]*v1.Pod) + pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil) + pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil) + markPodReady(pod0) + markPodReady(pod1) + mapping["node-0"] = []*v1.Pod{pod0} + mapping["node-1"] = []*v1.Pod{pod1} + return mapping + }(), + maxUnavailable: 1, + numUnavailable: 0, + }, + { + name: "Two nodes, one node without pods", + Manager: func() *daemonSetsController { + manager, _, _ := newTestController() + addNodes(manager.nodeStore, 0, 2, nil) + return manager + }(), + ds: func() *extensions.DaemonSet { + ds := newDaemonSet("x") + intStr := intstr.FromInt(0) + ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} + return ds + }(), + nodeToPods: func() map[string][]*v1.Pod { + mapping := make(map[string][]*v1.Pod) + pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil) + markPodReady(pod0) + mapping["node-0"] = []*v1.Pod{pod0} + return mapping + }(), + maxUnavailable: 0, + numUnavailable: 1, + }, + { + name: "Two nodes with pods, MaxUnavailable in percents", + Manager: func() *daemonSetsController { + manager, _, _ := newTestController() + addNodes(manager.nodeStore, 0, 2, nil) + return manager + }(), + ds: func() *extensions.DaemonSet { + ds := newDaemonSet("x") + intStr := intstr.FromString("50%") + ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr} + return ds + }(), + nodeToPods: func() map[string][]*v1.Pod { + mapping := make(map[string][]*v1.Pod) + pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil) + pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil) + markPodReady(pod0) + markPodReady(pod1) + mapping["node-0"] = []*v1.Pod{pod0} + mapping["node-1"] = []*v1.Pod{pod1} + return mapping + }(), + maxUnavailable: 1, + numUnavailable: 0, + }, + } + + for _, c := range cases { + c.Manager.dsStore.Add(c.ds) + maxUnavailable, numUnavailable, err := c.Manager.getUnavailableNumbers(c.ds, c.nodeToPods) + if err != nil && c.Err != nil { + if c.Err != err { + t.Errorf("Test case: %s. Expected error: %v but got: %v", c.name, c.Err, err) + } + } else if err != nil { + t.Errorf("Test case: %s. Unexpected error: %v", c.name, err) + } else if maxUnavailable != c.maxUnavailable || numUnavailable != c.numUnavailable { + t.Errorf("Test case: %s. Wrong values. maxUnavailable: %d, expected: %d, numUnavailable: %d. expected: %d", c.name, maxUnavailable, c.maxUnavailable, numUnavailable, c.numUnavailable) + } + } +}