Merge pull request #60301 from tnozicka/fix-recreate

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Fix Deployment with Recreate strategy not to wait on Pods in terminal phase

**What this PR does / why we need it**:
Fixes Deployment with Recreate strategy not to wait on Pods in terminal phase. It can happen after eviction or failing to match selector and RS leaves such pod around right now. (Hopefully RC gets fixed separately.) 

**Which issue(s) this PR fixes** *:
Fixes https://github.com/kubernetes/kubernetes/issues/60162

**Special notes for your reviewer**:

**Release note**:

```release-note
Fixes a case when Deployment with recreate strategy could get stuck on old failed Pod.
```

/cc @janetkuo
pull/6/head
Kubernetes Submit Queue 2018-02-26 15:00:49 -08:00 committed by GitHub
commit e491689ef9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 129 additions and 16 deletions

View File

@ -104,8 +104,19 @@ func oldPodsRunning(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSe
if newRS != nil && newRS.UID == rsUID {
continue
}
if len(podList.Items) > 0 {
return true
for _, pod := range podList.Items {
switch pod.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
// Don't count pods in terminal state.
continue
case v1.PodUnknown:
// This happens in situation like when the node is temporarily disconnected from the cluster.
// If we can't be sure that the pod is not running, we have to count it.
return true
default:
// Pod is not in terminal phase.
return true
}
}
}
return false

View File

@ -90,33 +90,135 @@ func TestOldPodsRunning(t *testing.T) {
oldRSs []*extensions.ReplicaSet
podMap map[types.UID]*v1.PodList
expected bool
hasOldPodsRunning bool
}{
{
name: "no old RSs",
expected: false,
name: "no old RSs",
hasOldPodsRunning: false,
},
{
name: "old RSs with running pods",
oldRSs: []*extensions.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")},
podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}),
expected: true,
name: "old RSs with running pods",
oldRSs: []*extensions.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")},
podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}),
hasOldPodsRunning: true,
},
{
name: "old RSs without pods but with non-zero status replicas",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-blabla", 0, 1, nil)},
expected: true,
name: "old RSs without pods but with non-zero status replicas",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)},
hasOldPodsRunning: true,
},
{
name: "old RSs without pods or non-zero status replicas",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-blabla", 0, 0, nil)},
expected: false,
name: "old RSs without pods or non-zero status replicas",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
hasOldPodsRunning: false,
},
{
name: "old RSs with zero status replicas but pods in terminal state are present",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodFailed,
},
},
{
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
},
},
},
},
},
hasOldPodsRunning: false,
},
{
name: "old RSs with zero status replicas but pod in unknown phase present",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodUnknown,
},
},
},
},
},
hasOldPodsRunning: true,
},
{
name: "old RSs with zero status replicas with pending pod present",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
},
},
},
hasOldPodsRunning: true,
},
{
name: "old RSs with zero status replicas with running pod present",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
},
},
},
},
hasOldPodsRunning: true,
},
{
name: "old RSs with zero status replicas but pods in terminal state and pending are present",
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodFailed,
},
},
{
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
},
},
},
},
"uid-2": {
Items: []v1.Pod{},
},
"uid-3": {
Items: []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
},
},
},
hasOldPodsRunning: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if expected, got := test.expected, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got {
if expected, got := test.hasOldPodsRunning, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got {
t.Errorf("%s: expected %t, got %t", test.name, expected, got)
}
})