mirror of https://github.com/k3s-io/k3s
Pods marked for deletion should be counted as unavailable
parent
fab2f5e3af
commit
0f75e56722
|
@ -492,6 +492,10 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet)
|
||||||
// Group Pods by Node name.
|
// Group Pods by Node name.
|
||||||
nodeToDaemonPods := make(map[string][]*v1.Pod)
|
nodeToDaemonPods := make(map[string][]*v1.Pod)
|
||||||
for _, pod := range claimedPods {
|
for _, pod := range claimedPods {
|
||||||
|
// Skip terminating pods
|
||||||
|
if pod.DeletionTimestamp != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
nodeName := pod.Spec.NodeName
|
nodeName := pod.Spec.NodeName
|
||||||
nodeToDaemonPods[nodeName] = append(nodeToDaemonPods[nodeName], pod)
|
nodeToDaemonPods[nodeName] = append(nodeToDaemonPods[nodeName], pod)
|
||||||
}
|
}
|
||||||
|
@ -553,10 +557,6 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
|
||||||
var daemonPodsRunning []*v1.Pod
|
var daemonPodsRunning []*v1.Pod
|
||||||
for i := range daemonPods {
|
for i := range daemonPods {
|
||||||
pod := daemonPods[i]
|
pod := daemonPods[i]
|
||||||
// Skip terminating pods. We won't delete them again or count them as running daemon pods.
|
|
||||||
if pod.DeletionTimestamp != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if pod.Status.Phase == v1.PodFailed {
|
if pod.Status.Phase == v1.PodFailed {
|
||||||
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, node.Name, pod.Name)
|
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, node.Name, pod.Name)
|
||||||
glog.V(2).Infof(msg)
|
glog.V(2).Infof(msg)
|
||||||
|
|
|
@ -333,11 +333,15 @@ func markPodsReady(store cache.Store) {
|
||||||
// mark pods as ready
|
// mark pods as ready
|
||||||
for _, obj := range store.List() {
|
for _, obj := range store.List() {
|
||||||
pod := obj.(*v1.Pod)
|
pod := obj.(*v1.Pod)
|
||||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
markPodReady(pod)
|
||||||
podutil.UpdatePodCondition(&pod.Status, &condition)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func markPodReady(pod *v1.Pod) {
|
||||||
|
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||||
|
podutil.UpdatePodCondition(&pod.Status, &condition)
|
||||||
|
}
|
||||||
|
|
||||||
// DaemonSets without node selectors should launch pods on every node.
|
// DaemonSets without node selectors should launch pods on every node.
|
||||||
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
@ -1263,6 +1267,12 @@ func TestGetNodesToDaemonPods(t *testing.T) {
|
||||||
newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds),
|
newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds),
|
||||||
newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil),
|
newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil),
|
||||||
newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2),
|
newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2),
|
||||||
|
func() *v1.Pod {
|
||||||
|
pod := newPod("matching-owned-2-but-set-for-deletion", "node-2", simpleDaemonSetLabel, ds)
|
||||||
|
now := metav1.Now()
|
||||||
|
pod.DeletionTimestamp = &now
|
||||||
|
return pod
|
||||||
|
}(),
|
||||||
}
|
}
|
||||||
for _, pod := range ignoredPods {
|
for _, pod := range ignoredPods {
|
||||||
manager.podStore.Add(pod)
|
manager.podStore.Add(pod)
|
||||||
|
|
|
@ -126,5 +126,6 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
|
return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
|
||||||
}
|
}
|
||||||
|
glog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable)
|
||||||
return maxUnavailable, numUnavailable, nil
|
return maxUnavailable, numUnavailable, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -133,3 +134,121 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
clearExpectations(t, manager, ds, podControl)
|
clearExpectations(t, manager, ds, podControl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetUnavailableNumbers(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
Manager *daemonSetsController
|
||||||
|
ds *extensions.DaemonSet
|
||||||
|
nodeToPods map[string][]*v1.Pod
|
||||||
|
maxUnavailable int
|
||||||
|
numUnavailable int
|
||||||
|
Err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "No nodes",
|
||||||
|
Manager: func() *daemonSetsController {
|
||||||
|
manager, _, _ := newTestController()
|
||||||
|
return manager
|
||||||
|
}(),
|
||||||
|
ds: func() *extensions.DaemonSet {
|
||||||
|
ds := newDaemonSet("x")
|
||||||
|
intStr := intstr.FromInt(0)
|
||||||
|
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||||
|
return ds
|
||||||
|
}(),
|
||||||
|
nodeToPods: make(map[string][]*v1.Pod),
|
||||||
|
maxUnavailable: 0,
|
||||||
|
numUnavailable: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Two nodes with ready pods",
|
||||||
|
Manager: func() *daemonSetsController {
|
||||||
|
manager, _, _ := newTestController()
|
||||||
|
addNodes(manager.nodeStore, 0, 2, nil)
|
||||||
|
return manager
|
||||||
|
}(),
|
||||||
|
ds: func() *extensions.DaemonSet {
|
||||||
|
ds := newDaemonSet("x")
|
||||||
|
intStr := intstr.FromInt(1)
|
||||||
|
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||||
|
return ds
|
||||||
|
}(),
|
||||||
|
nodeToPods: func() map[string][]*v1.Pod {
|
||||||
|
mapping := make(map[string][]*v1.Pod)
|
||||||
|
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
|
||||||
|
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
|
||||||
|
markPodReady(pod0)
|
||||||
|
markPodReady(pod1)
|
||||||
|
mapping["node-0"] = []*v1.Pod{pod0}
|
||||||
|
mapping["node-1"] = []*v1.Pod{pod1}
|
||||||
|
return mapping
|
||||||
|
}(),
|
||||||
|
maxUnavailable: 1,
|
||||||
|
numUnavailable: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Two nodes, one node without pods",
|
||||||
|
Manager: func() *daemonSetsController {
|
||||||
|
manager, _, _ := newTestController()
|
||||||
|
addNodes(manager.nodeStore, 0, 2, nil)
|
||||||
|
return manager
|
||||||
|
}(),
|
||||||
|
ds: func() *extensions.DaemonSet {
|
||||||
|
ds := newDaemonSet("x")
|
||||||
|
intStr := intstr.FromInt(0)
|
||||||
|
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||||
|
return ds
|
||||||
|
}(),
|
||||||
|
nodeToPods: func() map[string][]*v1.Pod {
|
||||||
|
mapping := make(map[string][]*v1.Pod)
|
||||||
|
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
|
||||||
|
markPodReady(pod0)
|
||||||
|
mapping["node-0"] = []*v1.Pod{pod0}
|
||||||
|
return mapping
|
||||||
|
}(),
|
||||||
|
maxUnavailable: 0,
|
||||||
|
numUnavailable: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Two nodes with pods, MaxUnavailable in percents",
|
||||||
|
Manager: func() *daemonSetsController {
|
||||||
|
manager, _, _ := newTestController()
|
||||||
|
addNodes(manager.nodeStore, 0, 2, nil)
|
||||||
|
return manager
|
||||||
|
}(),
|
||||||
|
ds: func() *extensions.DaemonSet {
|
||||||
|
ds := newDaemonSet("x")
|
||||||
|
intStr := intstr.FromString("50%")
|
||||||
|
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||||
|
return ds
|
||||||
|
}(),
|
||||||
|
nodeToPods: func() map[string][]*v1.Pod {
|
||||||
|
mapping := make(map[string][]*v1.Pod)
|
||||||
|
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
|
||||||
|
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
|
||||||
|
markPodReady(pod0)
|
||||||
|
markPodReady(pod1)
|
||||||
|
mapping["node-0"] = []*v1.Pod{pod0}
|
||||||
|
mapping["node-1"] = []*v1.Pod{pod1}
|
||||||
|
return mapping
|
||||||
|
}(),
|
||||||
|
maxUnavailable: 1,
|
||||||
|
numUnavailable: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range cases {
|
||||||
|
c.Manager.dsStore.Add(c.ds)
|
||||||
|
maxUnavailable, numUnavailable, err := c.Manager.getUnavailableNumbers(c.ds, c.nodeToPods)
|
||||||
|
if err != nil && c.Err != nil {
|
||||||
|
if c.Err != err {
|
||||||
|
t.Errorf("Test case: %s. Expected error: %v but got: %v", c.name, c.Err, err)
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
t.Errorf("Test case: %s. Unexpected error: %v", c.name, err)
|
||||||
|
} else if maxUnavailable != c.maxUnavailable || numUnavailable != c.numUnavailable {
|
||||||
|
t.Errorf("Test case: %s. Wrong values. maxUnavailable: %d, expected: %d, numUnavailable: %d. expected: %d", c.name, maxUnavailable, c.maxUnavailable, numUnavailable, c.numUnavailable)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue