mirror of https://github.com/k3s-io/k3s
Ensure that init containers are preserved during pruning
Pods with multiple init containers were getting the wrong containers pruned. Fix an error message and add a test.pull/6/head
parent
7ad337a2c2
commit
223542690a
|
@ -2095,7 +2095,7 @@ func (dm *DockerManager) tryContainerStart(container *api.Container, pod *api.Po
|
|||
// of outstanding init containers still present. This reduces load on the container garbage collector
|
||||
// by only preserving the most recent terminated init container.
|
||||
func (dm *DockerManager) pruneInitContainersBeforeStart(pod *api.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.DockerID]int) {
|
||||
// only the last execution of an init container should be preserved, and only preserve it if it is in the
|
||||
// only the last execution of each init container should be preserved, and only preserve it if it is in the
|
||||
// list of init containers to keep.
|
||||
initContainerNames := sets.NewString()
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
|
@ -2104,11 +2104,11 @@ func (dm *DockerManager) pruneInitContainersBeforeStart(pod *api.Pod, podStatus
|
|||
for name := range initContainerNames {
|
||||
count := 0
|
||||
for _, status := range podStatus.ContainerStatuses {
|
||||
if !initContainerNames.Has(status.Name) || status.State != kubecontainer.ContainerStateExited {
|
||||
if status.Name != name || !initContainerNames.Has(status.Name) || status.State != kubecontainer.ContainerStateExited {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
// keep the first init container we see
|
||||
// keep the first init container for this name
|
||||
if count == 1 {
|
||||
continue
|
||||
}
|
||||
|
@ -2125,7 +2125,7 @@ func (dm *DockerManager) pruneInitContainersBeforeStart(pod *api.Pod, podStatus
|
|||
count--
|
||||
continue
|
||||
}
|
||||
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", name, err, format.Pod(pod)))
|
||||
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod)))
|
||||
// TODO: report serious errors
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -1892,3 +1892,37 @@ func TestGetPodStatusNoSuchContainer(t *testing.T) {
|
|||
"create", "start", "inspect_container",
|
||||
})
|
||||
}
|
||||
|
||||
func TestPruneInitContainers(t *testing.T) {
|
||||
dm, fake := newTestDockerManager()
|
||||
pod := &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
InitContainers: []api.Container{
|
||||
{Name: "init1"},
|
||||
{Name: "init2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
status := &kubecontainer.PodStatus{
|
||||
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
||||
{Name: "init2", ID: kubecontainer.ContainerID{ID: "init2-new-1"}, State: kubecontainer.ContainerStateExited},
|
||||
{Name: "init1", ID: kubecontainer.ContainerID{ID: "init1-new-1"}, State: kubecontainer.ContainerStateExited},
|
||||
{Name: "init1", ID: kubecontainer.ContainerID{ID: "init1-new-2"}, State: kubecontainer.ContainerStateExited},
|
||||
{Name: "init1", ID: kubecontainer.ContainerID{ID: "init1-old-1"}, State: kubecontainer.ContainerStateExited},
|
||||
{Name: "init2", ID: kubecontainer.ContainerID{ID: "init2-old-1"}, State: kubecontainer.ContainerStateExited},
|
||||
},
|
||||
}
|
||||
fake.ExitedContainerList = []dockertypes.Container{
|
||||
{ID: "init1-new-1"},
|
||||
{ID: "init1-new-2"},
|
||||
{ID: "init1-old-1"},
|
||||
{ID: "init2-new-1"},
|
||||
{ID: "init2-old-1"},
|
||||
}
|
||||
keep := map[kubecontainer.DockerID]int{}
|
||||
dm.pruneInitContainersBeforeStart(pod, status, keep)
|
||||
sort.Sort(sort.StringSlice(fake.Removed))
|
||||
if !reflect.DeepEqual([]string{"init1-new-2", "init1-old-1", "init2-old-1"}, fake.Removed) {
|
||||
t.Fatal(fake.Removed)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue