Merge pull request #40592 from kubernetes/revert-40239-disk_pod_lifecycle

Revert "Delay deletion of pod from the API server until volumes are deleted"
pull/6/head
Wojciech Tyczynski 2017-01-27 15:54:18 +01:00 committed by GitHub
commit f81e9fd000
14 changed files with 26 additions and 140 deletions

View File

@ -179,7 +179,6 @@ go_test(
"//pkg/kubelet/server/remotecommand:go_default_library",
"//pkg/kubelet/server/stats:go_default_library",
"//pkg/kubelet/status:go_default_library",
"//pkg/kubelet/status/testing:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/kubelet/util/queue:go_default_library",
"//pkg/kubelet/util/sliceutils:go_default_library",

View File

@ -697,7 +697,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
}
klet.imageManager = imageManager
klet.statusManager = status.NewManager(kubeClient, klet.podManager, klet)
klet.statusManager = status.NewManager(kubeClient, klet.podManager)
klet.probeManager = prober.NewManager(
klet.statusManager,

View File

@ -727,37 +727,6 @@ func (kl *Kubelet) podIsTerminated(pod *v1.Pod) bool {
return false
}
// Returns true if all required node-level resources that a pod was consuming have been reclaimed by the kubelet.
// Reclaiming resources is a prerequisite to deleting a pod from the API server.
func (kl *Kubelet) OkToDeletePod(pod *v1.Pod) bool {
if pod.DeletionTimestamp == nil {
// We shouldnt delete pods whose DeletionTimestamp is not set
return false
}
if !notRunning(pod.Status.ContainerStatuses) {
// We shouldnt delete pods that still have running containers
glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod))
return false
}
if kl.podVolumesExist(pod.UID) && !kl.kubeletConfiguration.KeepTerminatedPodVolumes {
// We shouldnt delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes
glog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod))
return false
}
return true
}
// notRunning returns true if every status is terminated or waiting, or the status list
// is empty.
func notRunning(statuses []v1.ContainerStatus) bool {
for _, status := range statuses {
if status.State.Terminated == nil && status.State.Waiting == nil {
return false
}
}
return true
}
// filterOutTerminatedPods returns the given pods which the status manager
// does not consider failed or succeeded.
func (kl *Kubelet) filterOutTerminatedPods(pods []*v1.Pod) []*v1.Pod {

View File

@ -59,7 +59,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/secret"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/kubelet/status"
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/queue"
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
@ -180,7 +179,7 @@ func newTestKubeletWithImageList(
}
kubelet.secretManager = secretManager
kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient, kubelet.secretManager)
kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{})
kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager)
kubelet.containerRefManager = kubecontainer.NewRefManager()
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{})
if err != nil {

View File

@ -57,7 +57,6 @@ go_test(
"//pkg/kubelet/pod:go_default_library",
"//pkg/kubelet/prober/results:go_default_library",
"//pkg/kubelet/status:go_default_library",
"//pkg/kubelet/status/testing:go_default_library",
"//pkg/probe:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/intstr:go_default_library",

View File

@ -28,7 +28,6 @@ import (
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/status"
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/kubernetes/pkg/util/exec"
)
@ -103,7 +102,7 @@ func newTestManager() *manager {
// Add test pod to pod manager, so that status manager can get the pod from pod manager if needed.
podManager.AddPod(getTestPod())
m := NewManager(
status.NewManager(&fake.Clientset{}, podManager, &statustest.FakePodDeletionSafetyProvider{}),
status.NewManager(&fake.Clientset{}, podManager),
results.NewManager(),
nil, // runner
refManager,

View File

@ -31,7 +31,6 @@ import (
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/status"
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/kubernetes/pkg/util/exec"
)
@ -118,7 +117,7 @@ func TestDoProbe(t *testing.T) {
}
// Clean up.
m.statusManager = status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(nil, nil), &statustest.FakePodDeletionSafetyProvider{})
m.statusManager = status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(nil, nil))
resultsManager(m, probeType).Remove(testContainerID)
}
}

View File

@ -43,7 +43,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/secret"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/kubelet/status"
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@ -77,7 +76,7 @@ func TestRunOnce(t *testing.T) {
cadvisor: cadvisor,
nodeLister: testNodeLister{},
nodeInfo: testNodeInfo{},
statusManager: status.NewManager(nil, podManager, &statustest.FakePodDeletionSafetyProvider{}),
statusManager: status.NewManager(nil, podManager),
containerRefManager: kubecontainer.NewRefManager(),
podManager: podManager,
os: &containertest.FakeOS{},

View File

@ -50,7 +50,6 @@ go_test(
"//pkg/kubelet/pod:go_default_library",
"//pkg/kubelet/pod/testing:go_default_library",
"//pkg/kubelet/secret:go_default_library",
"//pkg/kubelet/status/testing:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//vendor:github.com/stretchr/testify/assert",
"//vendor:k8s.io/apimachinery/pkg/api/errors",
@ -70,9 +69,6 @@ filegroup(
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/kubelet/status/testing:all-srcs",
],
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -66,7 +66,6 @@ type manager struct {
// Map from (mirror) pod UID to latest status version successfully sent to the API server.
// apiStatusVersions must only be accessed from the sync thread.
apiStatusVersions map[types.UID]uint64
podDeletionSafety PodDeletionSafetyProvider
}
// PodStatusProvider knows how to provide status for a pod. It's intended to be used by other components
@ -77,12 +76,6 @@ type PodStatusProvider interface {
GetPodStatus(uid types.UID) (v1.PodStatus, bool)
}
// An object which provides guarantees that a pod can be saftely deleted.
type PodDeletionSafetyProvider interface {
// A function which returns true if the pod can safely be deleted
OkToDeletePod(pod *v1.Pod) bool
}
// Manager is the Source of truth for kubelet pod status, and should be kept up-to-date with
// the latest v1.PodStatus. It also syncs updates back to the API server.
type Manager interface {
@ -109,14 +102,13 @@ type Manager interface {
const syncPeriod = 10 * time.Second
func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager, podDeletionSafety PodDeletionSafetyProvider) Manager {
func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager) Manager {
return &manager{
kubeClient: kubeClient,
podManager: podManager,
podStatuses: make(map[types.UID]versionedPodStatus),
podStatusChannel: make(chan podStatusSyncRequest, 1000), // Buffer up to 1000 statuses
apiStatusVersions: make(map[types.UID]uint64),
podDeletionSafety: podDeletionSafety,
}
}
@ -388,7 +380,7 @@ func (m *manager) syncBatch() {
}
syncedUID = mirrorUID
}
if m.needsUpdate(syncedUID, status) || m.couldBeDeleted(uid, status.status) {
if m.needsUpdate(syncedUID, status) {
updatedStatuses = append(updatedStatuses, podStatusSyncRequest{uid, status})
} else if m.needsReconcile(uid, status.status) {
// Delete the apiStatusVersions here to force an update on the pod status
@ -441,7 +433,11 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
// We don't handle graceful deletion of mirror pods.
return
}
if !m.podDeletionSafety.OkToDeletePod(pod) {
if pod.DeletionTimestamp == nil {
return
}
if !notRunning(pod.Status.ContainerStatuses) {
glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod))
return
}
deleteOptions := metav1.NewDeleteOptions(0)
@ -466,15 +462,6 @@ func (m *manager) needsUpdate(uid types.UID, status versionedPodStatus) bool {
return !ok || latest < status.version
}
func (m *manager) couldBeDeleted(uid types.UID, status v1.PodStatus) bool {
// The pod could be a static pod, so we should translate first.
pod, ok := m.podManager.GetPodByUID(uid)
if !ok {
return false
}
return !kubepod.IsMirrorPod(pod) && m.podDeletionSafety.OkToDeletePod(pod)
}
// needsReconcile compares the given status with the status in the pod manager (which
// in fact comes from apiserver), returns whether the status needs to be reconciled with
// the apiserver. Now when pod status is inconsistent between apiserver and kubelet,
@ -575,6 +562,17 @@ func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus {
return status
}
// notRunning returns true if every status is terminated or waiting, or the status list
// is empty.
func notRunning(statuses []v1.ContainerStatus) bool {
for _, status := range statuses {
if status.State.Terminated == nil && status.State.Waiting == nil {
return false
}
}
return true
}
func copyStatus(source *v1.PodStatus) (v1.PodStatus, error) {
clone, err := api.Scheme.DeepCopy(source)
if err != nil {

View File

@ -39,7 +39,6 @@ import (
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
kubesecret "k8s.io/kubernetes/pkg/kubelet/secret"
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
@ -75,7 +74,7 @@ func (m *manager) testSyncBatch() {
func newTestManager(kubeClient clientset.Interface) *manager {
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), kubesecret.NewFakeManager())
podManager.AddPod(getTestPod())
return NewManager(kubeClient, podManager, &statustest.FakePodDeletionSafetyProvider{}).(*manager)
return NewManager(kubeClient, podManager).(*manager)
}
func generateRandomMessage() string {

View File

@ -1,31 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["fake_pod_deletion_safety.go"],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/kubelet/pod:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,28 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"k8s.io/kubernetes/pkg/api/v1"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
)
type FakePodDeletionSafetyProvider struct{}
func (f *FakePodDeletionSafetyProvider) OkToDeletePod(pod *v1.Pod) bool {
return !kubepod.IsMirrorPod(pod) && pod.DeletionTimestamp != nil
}

View File

@ -135,18 +135,7 @@ func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc() func() {
}
func isPodTerminated(pod *v1.Pod) bool {
return pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(pod.Status.ContainerStatuses))
}
// notRunning returns true if every status is terminated or waiting, or the status list
// is empty.
func notRunning(statuses []v1.ContainerStatus) bool {
for _, status := range statuses {
if status.State.Terminated == nil && status.State.Waiting == nil {
return false
}
}
return true
return pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded
}
// Iterate through all pods and add to desired state of world if they don't