diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 1c0ddc1a6c..578f850c2d 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -403,8 +403,7 @@ func (o *DrainOptions) unreplicatedFilter(pod corev1.Pod) (bool, *warning, *fata func (o *DrainOptions) daemonsetFilter(pod corev1.Pod) (bool, *warning, *fatal) { // Note that we return false in cases where the pod is DaemonSet managed, - // regardless of flags. We never delete them, the only question is whether - // their presence constitutes an error. + // regardless of flags. // // The exception is for pods that are orphaned (the referencing // management resource - including DaemonSet - is not found). @@ -413,12 +412,17 @@ func (o *DrainOptions) daemonsetFilter(pod corev1.Pod) (bool, *warning, *fatal) if controllerRef == nil || controllerRef.Kind != "DaemonSet" { return true, nil, nil } + // Any finished pod can be removed. + if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed { + return true, nil, nil + } if _, err := o.client.ExtensionsV1beta1().DaemonSets(pod.Namespace).Get(controllerRef.Name, metav1.GetOptions{}); err != nil { // remove orphaned pods with a warning if --force is used if apierrors.IsNotFound(err) && o.Force { return true, &warning{err.Error()}, nil } + return false, nil, &fatal{err.Error()} } @@ -450,9 +454,14 @@ func (o *DrainOptions) localStorageFilter(pod corev1.Pod) (bool, *warning, *fata if !hasLocalStorage(pod) { return true, nil, nil } + // Any finished pod can be removed. + if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed { + return true, nil, nil + } if !o.DeleteLocalData { return false, nil, &fatal{kLocalStorageFatal} } + return true, &warning{kLocalStorageWarning}, nil } diff --git a/pkg/kubectl/cmd/drain_test.go b/pkg/kubectl/cmd/drain_test.go index 0605d707ee..b8862715cf 100644 --- a/pkg/kubectl/cmd/drain_test.go +++ b/pkg/kubectl/cmd/drain_test.go @@ -308,6 +308,31 @@ func TestDrain(t *testing.T) { }, } + ds_terminated_pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + CreationTimestamp: metav1.Time{Time: time.Now()}, + Labels: labels, + SelfLink: testapi.Default.SelfLink("pods", "bar"), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "extensions/v1beta1", + Kind: "DaemonSet", + Name: "ds", + BlockOwnerDeletion: boolptr(true), + Controller: boolptr(true), + }, + }, + }, + Spec: corev1.PodSpec{ + NodeName: "node", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + }, + } + ds_pod_with_emptyDir := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", @@ -378,6 +403,46 @@ func TestDrain(t *testing.T) { }, }, }, + Spec: corev1.PodSpec{ + NodeName: "node", + Volumes: []corev1.Volume{ + { + Name: "scratch", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: ""}}, + }, + }, + }, + } + + terminated_job_pod_with_local_storage := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + CreationTimestamp: metav1.Time{Time: time.Now()}, + Labels: labels, + SelfLink: testapi.Default.SelfLink("pods", "bar"), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "v1", + Kind: "Job", + Name: "job", + BlockOwnerDeletion: boolptr(true), + Controller: boolptr(true), + }, + }, + }, + Spec: corev1.PodSpec{ + NodeName: "node", + Volumes: []corev1.Volume{ + { + Name: "scratch", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: ""}}, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + }, } rs := extensions.ReplicaSet{ @@ -444,6 +509,26 @@ func TestDrain(t *testing.T) { }, }, } + emptydir_terminated_pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + CreationTimestamp: metav1.Time{Time: time.Now()}, + Labels: labels, + }, + Spec: corev1.PodSpec{ + NodeName: "node", + Volumes: []corev1.Volume{ + { + Name: "scratch", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: ""}}, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + }, + } tests := []struct { description string @@ -477,6 +562,16 @@ func TestDrain(t *testing.T) { expectFatal: true, expectDelete: false, }, + { + description: "DS-managed terminated pod", + node: node, + expected: cordoned_node, + pods: []corev1.Pod{ds_terminated_pod}, + rcs: []api.ReplicationController{rc}, + args: []string{"node"}, + expectFatal: false, + expectDelete: true, + }, { description: "orphaned DS-managed pod", node: node, @@ -519,11 +614,21 @@ func TestDrain(t *testing.T) { expectDelete: false, }, { - description: "Job-managed pod", + description: "Job-managed pod with local storage", node: node, expected: cordoned_node, pods: []corev1.Pod{job_pod}, rcs: []api.ReplicationController{rc}, + args: []string{"node", "--force", "--delete-local-data=true"}, + expectFatal: false, + expectDelete: true, + }, + { + description: "Job-managed terminated pod", + node: node, + expected: cordoned_node, + pods: []corev1.Pod{terminated_job_pod_with_local_storage}, + rcs: []api.ReplicationController{rc}, args: []string{"node"}, expectFatal: false, expectDelete: true, @@ -567,6 +672,16 @@ func TestDrain(t *testing.T) { expectFatal: true, expectDelete: false, }, + { + description: "terminated pod with emptyDir", + node: node, + expected: cordoned_node, + pods: []corev1.Pod{emptydir_terminated_pod}, + rcs: []api.ReplicationController{rc}, + args: []string{"node"}, + expectFatal: false, + expectDelete: true, + }, { description: "pod with EmptyDir and --delete-local-data", node: node,