mirror of https://github.com/k3s-io/k3s
Delete containers when pod is deleted
parent
2ae16a2e60
commit
8bc4444f16
|
@ -695,3 +695,7 @@ func buildResourceToRankFunc(withImageFs bool) map[api.ResourceName]rankFunc {
|
|||
}
|
||||
return resourceToRankFunc
|
||||
}
|
||||
|
||||
func PodIsEvicted(podStatus api.PodStatus) bool {
|
||||
return podStatus.Phase == api.PodFailed && podStatus.Reason == reason
|
||||
}
|
||||
|
|
|
@ -2188,11 +2188,10 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
|
|||
glog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", format.Pod(pod), e)
|
||||
handler.HandlePodSyncs([]*api.Pod{pod})
|
||||
}
|
||||
|
||||
if e.Type == pleg.ContainerDied {
|
||||
if podStatus, err := kl.podCache.Get(e.ID); err == nil {
|
||||
if containerID, ok := e.Data.(string); ok {
|
||||
kl.containerDeletor.deleteContainersInPod(containerID, podStatus)
|
||||
}
|
||||
kl.cleanUpContainersInPod(e.ID, containerID)
|
||||
}
|
||||
}
|
||||
case <-syncCh:
|
||||
|
@ -2922,6 +2921,16 @@ func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint) {
|
|||
server.ListenAndServeKubeletReadOnlyServer(kl, kl.resourceAnalyzer, address, port, kl.containerRuntime)
|
||||
}
|
||||
|
||||
// Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around.
|
||||
func (kl *Kubelet) cleanUpContainersInPod(podId types.UID, exitedContainerID string) {
|
||||
if podStatus, err := kl.podCache.Get(podId); err == nil {
|
||||
if status, ok := kl.statusManager.GetPodStatus(podId); ok {
|
||||
// If a pod is evicted, we can delete all the dead containers.
|
||||
kl.containerDeletor.deleteContainersInPod(exitedContainerID, podStatus, eviction.PodIsEvicted(status))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isSyncPodWorthy filters out events that are not worthy of pod syncing
|
||||
func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool {
|
||||
// ContatnerRemoved doesn't affect pod state
|
||||
|
|
|
@ -59,7 +59,7 @@ func newPodContainerDeletor(runtime kubecontainer.Runtime, containersToKeep int)
|
|||
}
|
||||
|
||||
// getContainersToDeleteInPod returns the exited containers in a pod whose name matches the name inferred from exitedContainerID, ordered by the creation time from the latest to the earliest.
|
||||
func (p *podContainerDeletor) getContainersToDeleteInPod(exitedContainerID string, podStatus *kubecontainer.PodStatus) containerStatusbyCreatedList {
|
||||
func getContainersToDeleteInPod(exitedContainerID string, podStatus *kubecontainer.PodStatus, containersToKeep int) containerStatusbyCreatedList {
|
||||
var matchedContainer *kubecontainer.ContainerStatus
|
||||
var exitedContainers []*kubecontainer.ContainerStatus
|
||||
// Find all exited containers in the pod
|
||||
|
@ -84,17 +84,21 @@ func (p *podContainerDeletor) getContainersToDeleteInPod(exitedContainerID strin
|
|||
candidates = append(candidates, containerStatus)
|
||||
}
|
||||
}
|
||||
if len(candidates) <= p.containersToKeep {
|
||||
|
||||
if len(candidates) <= containersToKeep {
|
||||
return containerStatusbyCreatedList{}
|
||||
}
|
||||
|
||||
sort.Sort(candidates)
|
||||
return candidates[p.containersToKeep:]
|
||||
return candidates[containersToKeep:]
|
||||
}
|
||||
|
||||
// deleteContainersInPod issues container deletion requests for containers selected by getContainersToDeleteInPod.
|
||||
func (p *podContainerDeletor) deleteContainersInPod(exitedContainerID string, podStatus *kubecontainer.PodStatus) {
|
||||
for _, candidate := range p.getContainersToDeleteInPod(exitedContainerID, podStatus) {
|
||||
func (p *podContainerDeletor) deleteContainersInPod(exitedContainerID string, podStatus *kubecontainer.PodStatus, removeAll bool) {
|
||||
containersToKeep := p.containersToKeep
|
||||
if removeAll {
|
||||
containersToKeep = 0
|
||||
}
|
||||
for _, candidate := range getContainersToDeleteInPod(exitedContainerID, podStatus, containersToKeep) {
|
||||
select {
|
||||
case p.worker <- candidate.ID:
|
||||
default:
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"time"
|
||||
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
)
|
||||
|
||||
func testGetContainersToDeleteInPod(t *testing.T) {
|
||||
|
@ -61,9 +60,28 @@ func testGetContainersToDeleteInPod(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
expectedCandidates := []*kubecontainer.ContainerStatus{pod.ContainerStatuses[2], pod.ContainerStatuses[1]}
|
||||
candidates := newPodContainerDeletor(&containertest.FakeRuntime{}, 1).getContainersToDeleteInPod("2", &pod)
|
||||
if !reflect.DeepEqual(candidates, expectedCandidates) {
|
||||
t.Errorf("expected %v got %v", expectedCandidates, candidates)
|
||||
testCases := []struct {
|
||||
containersToKeep int
|
||||
expectedContainersToDelete []*kubecontainer.ContainerStatus
|
||||
}{
|
||||
{
|
||||
0,
|
||||
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[3], pod.ContainerStatuses[2], pod.ContainerStatuses[1]},
|
||||
},
|
||||
{
|
||||
1,
|
||||
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[2], pod.ContainerStatuses[1]},
|
||||
},
|
||||
{
|
||||
2,
|
||||
[]*kubecontainer.ContainerStatus{pod.ContainerStatuses[1]},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
candidates := getContainersToDeleteInPod("4", &pod, test.containersToKeep)
|
||||
if !reflect.DeepEqual(getContainersToDeleteInPod("4", &pod, test.containersToKeep), test.expectedContainersToDelete) {
|
||||
t.Errorf("expected %v got %v", test.expectedContainersToDelete, candidates)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue