CRI: Add init containers

pull/6/head
Pengfei Ni 2016-09-23 17:37:32 +08:00
parent 08dd569424
commit 9d42d450f8
6 changed files with 391 additions and 44 deletions

View File

@ -197,11 +197,12 @@ func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeApi.PodSandboxFilter)
}
result = append(result, &runtimeApi.PodSandbox{
Id: s.Id,
Metadata: s.Metadata,
State: s.State,
CreatedAt: s.CreatedAt,
Labels: s.Labels,
Id: s.Id,
Metadata: s.Metadata,
State: s.State,
CreatedAt: s.CreatedAt,
Labels: s.Labels,
Annotations: s.Annotations,
})
}
@ -321,6 +322,7 @@ func (r *FakeRuntimeService) ListContainers(filter *runtimeApi.ContainerFilter)
Image: s.Image,
ImageRef: s.ImageRef,
Labels: s.Labels,
Annotations: s.Annotations,
})
}

View File

@ -123,6 +123,31 @@ func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeApi.PodSand
}, nil
}
// getContainerSpec gets the container spec by containerName.
func getContainerSpec(pod *api.Pod, containerName string) *api.Container {
for i, c := range pod.Spec.Containers {
if containerName == c.Name {
return &pod.Spec.Containers[i]
}
}
for i, c := range pod.Spec.InitContainers {
if containerName == c.Name {
return &pod.Spec.InitContainers[i]
}
}
return nil
}
// isContainerFailed returns true if container has exited and exitcode is not zero.
func isContainerFailed(status *kubecontainer.ContainerStatus) bool {
if status.State == kubecontainer.ContainerStateExited && status.ExitCode != 0 {
return true
}
return false
}
// milliCPUToShares converts milliCPU to CPU shares
func milliCPUToShares(milliCPU int64) int64 {
if milliCPU == 0 {

View File

@ -37,6 +37,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/types"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/term"
)
@ -479,12 +480,7 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID
func (m *kubeGenericRuntimeManager) killContainer(pod *api.Pod, containerID kubecontainer.ContainerID, containerName string, reason string, gracePeriodOverride *int64) error {
var containerSpec *api.Container
if pod != nil {
for i, c := range pod.Spec.Containers {
if containerName == c.Name {
containerSpec = &pod.Spec.Containers[i]
break
}
}
containerSpec = getContainerSpec(pod, containerName)
} else {
// Restore necessary information if one of the specs is nil.
restoredPod, restoredContainer, err := m.restoreSpecsFromContainerLabels(containerID)
@ -544,6 +540,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *api.Pod, r
go func(container *kubecontainer.Container) {
defer utilruntime.HandleCrash()
defer wg.Done()
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, container.Name)
if err := m.killContainer(pod, container.ID, container.Name, "Need to kill Pod", gracePeriodOverride); err != nil {
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
@ -560,6 +557,94 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *api.Pod, r
return
}
// pruneInitContainers ensures that before we begin creating init containers, we have reduced the number
// of outstanding init containers still present. This reduces load on the container garbage collector
// by only preserving the most recent terminated init container.
func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *api.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.ContainerID]int) {
// only the last execution of each init container should be preserved, and only preserve it if it is in the
// list of init containers to keep.
initContainerNames := sets.NewString()
for _, container := range pod.Spec.InitContainers {
initContainerNames.Insert(container.Name)
}
for name := range initContainerNames {
count := 0
for _, status := range podStatus.ContainerStatuses {
if status.Name != name || !initContainerNames.Has(status.Name) || status.State != kubecontainer.ContainerStateExited {
continue
}
count++
// keep the first init container for this name
if count == 1 {
continue
}
// if there is a reason to preserve the older container, do so
if _, ok := initContainersToKeep[status.ID]; ok {
continue
}
// prune all other init containers that match this container name
glog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count)
if err := m.runtimeService.RemoveContainer(status.ID.ID); err != nil {
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod)))
continue
}
// remove any references to this container
if _, ok := m.containerRefManager.GetRef(status.ID); ok {
m.containerRefManager.ClearRef(status.ID)
} else {
glog.Warningf("No ref for container %q", status.ID)
}
}
}
}
// findNextInitContainerToRun returns the status of the last failed container, the
// next init container to start, or done if there are no further init containers.
// Status is only returned if an init container is failed, in which case next will
// point to the current container.
func findNextInitContainerToRun(pod *api.Pod, podStatus *kubecontainer.PodStatus) (status *kubecontainer.ContainerStatus, next *api.Container, done bool) {
if len(pod.Spec.InitContainers) == 0 {
return nil, nil, true
}
// If there are failed containers, return the status of the last failed one.
for i := len(pod.Spec.InitContainers) - 1; i >= 0; i-- {
container := &pod.Spec.InitContainers[i]
status := podStatus.FindContainerStatusByName(container.Name)
if status != nil && isContainerFailed(status) {
return status, container, false
}
}
// There are no failed containers now.
for i := len(pod.Spec.InitContainers) - 1; i >= 0; i-- {
container := &pod.Spec.InitContainers[i]
status := podStatus.FindContainerStatusByName(container.Name)
if status == nil {
continue
}
// container is still running, return not done.
if status.State == kubecontainer.ContainerStateRunning {
return nil, nil, false
}
if status.State == kubecontainer.ContainerStateExited {
// all init containers successful
if i == (len(pod.Spec.InitContainers) - 1) {
return nil, nil, true
}
// all containers up to i successful, go to i+1
return nil, &pod.Spec.InitContainers[i+1], false
}
}
return nil, &pod.Spec.InitContainers[0], false
}
// AttachContainer attaches to the container's console
func (m *kubeGenericRuntimeManager) AttachContainer(id kubecontainer.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size) (err error) {
return fmt.Errorf("not implemented")

View File

@ -42,7 +42,7 @@ func makeAndSetFakeEvictablePod(m *kubeGenericRuntimeManager, fakeRuntime *apite
return err
}
fakeContainers, err := makeFakeContainers(m, pod.apiPod, pod.apiPod.Spec.Containers, pod.createdAt)
fakeContainers, err := makeFakeContainers(m, pod.apiPod, pod.apiPod.Spec.Containers, pod.createdAt, runtimeApi.ContainerState_EXITED)
if err != nil {
return err
}

View File

@ -343,6 +343,13 @@ type podContainerSpecChanges struct {
// the key is the container ID of the container, while
// the value contains neccessary information to kill a container.
ContainersToKill map[kubecontainer.ContainerID]containerToKillInfo
// InitFailed indicates whether init containers are failed.
InitFailed bool
// InitContainersToKeep keeps a map of init containers that need to be kept as
// is, note that the key is the container ID of the container, while
// the value is index of the container inside pod.Spec.InitContainers.
InitContainersToKeep map[kubecontainer.ContainerID]int
}
// podSandboxChanged checks whether the spec of the pod is changed and returns
@ -374,20 +381,61 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *api.Pod, podStatus *k
return false, sandboxStatus.Metadata.GetAttempt(), sandboxStatus.GetId()
}
// checkAndKeepInitContainers keeps all successfully completed init containers. If there
// are failing containers, only keep the first failing one.
func checkAndKeepInitContainers(pod *api.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.ContainerID]int) bool {
initFailed := false
for i, container := range pod.Spec.InitContainers {
containerStatus := podStatus.FindContainerStatusByName(container.Name)
if containerStatus == nil {
continue
}
if containerStatus.State == kubecontainer.ContainerStateRunning {
initContainersToKeep[containerStatus.ID] = i
continue
}
if containerStatus.State == kubecontainer.ContainerStateExited {
initContainersToKeep[containerStatus.ID] = i
}
if isContainerFailed(containerStatus) {
initFailed = true
break
}
}
return initFailed
}
// computePodContainerChanges checks whether the pod spec has changed and returns the changes if true.
func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) podContainerSpecChanges {
glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod)
sandboxChanged, attempt, sandboxID := m.podSandboxChanged(pod, podStatus)
changes := podContainerSpecChanges{
CreateSandbox: sandboxChanged,
SandboxID: sandboxID,
Attempt: attempt,
ContainersToStart: make(map[int]string),
ContainersToKeep: make(map[kubecontainer.ContainerID]int),
ContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo),
CreateSandbox: sandboxChanged,
SandboxID: sandboxID,
Attempt: attempt,
ContainersToStart: make(map[int]string),
ContainersToKeep: make(map[kubecontainer.ContainerID]int),
InitContainersToKeep: make(map[kubecontainer.ContainerID]int),
ContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo),
}
// check the status of init containers.
initFailed := false
// always reset the init containers if the sandbox is changed.
if !sandboxChanged {
// Keep all successfully completed containers. If there are failing containers,
// only keep the first failing one.
initFailed = checkAndKeepInitContainers(pod, podStatus, changes.InitContainersToKeep)
}
changes.InitFailed = initFailed
// check the status of containers.
for index, container := range pod.Spec.Containers {
if sandboxChanged {
message := fmt.Sprintf("Container %+v's pod sandbox is dead, the container will be recreated.", container)
@ -406,6 +454,18 @@ func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, pod
continue
}
if initFailed {
// Initialization failed and Container exists.
// If we have an initialization failure everything will be killed anyway.
// If RestartPolicy is Always or OnFailure we restart containers that were running before.
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
message := fmt.Sprintf("Failed to initialize pod. %q will be restarted.", container.Name)
glog.V(1).Info(message)
changes.ContainersToStart[index] = message
}
continue
}
expectedHash := kubecontainer.HashContainer(&container)
containerChanged := containerStatus.Hash != expectedHash
if containerChanged {
@ -428,10 +488,17 @@ func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, pod
}
}
// compute containers that to be killed
runningContainerStatues := podStatus.GetRunningContainerStatuses()
for _, containerStatus := range runningContainerStatues {
if _, keep := changes.ContainersToKeep[containerStatus.ID]; !keep {
// Don't keep init containers if they are the only containers to keep.
if !sandboxChanged && len(changes.ContainersToStart) == 0 && len(changes.ContainersToKeep) == 0 {
changes.InitContainersToKeep = make(map[kubecontainer.ContainerID]int)
}
// compute containers to be killed
runningContainerStatuses := podStatus.GetRunningContainerStatuses()
for _, containerStatus := range runningContainerStatuses {
_, keep := changes.ContainersToKeep[containerStatus.ID]
_, keepInit := changes.InitContainersToKeep[containerStatus.ID]
if !keep && !keepInit {
var podContainer *api.Container
var killMessage string
for i, c := range pod.Spec.Containers {
@ -459,9 +526,8 @@ func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, pod
// 2. Kill pod sandbox if necessary.
// 3. Kill any containers that should not be running.
// 4. Create sandbox if necessary.
// 5. Create necessary containers
//
// TODO: support init containers in SyncPod.
// 5. Create init containers.
// 6. Create normal containers.
func (m *kubeGenericRuntimeManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
// Step 1: Compute sandbox and container changes.
podContainerChanges := m.computePodContainerChanges(pod, podStatus)
@ -507,6 +573,9 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *api.Pod, _ api.PodStatus, podSt
}
}
// Keep terminated init containers fairly aggressively controlled
m.pruneInitContainersBeforeStart(pod, podStatus, podContainerChanges.InitContainersToKeep)
// We pass the value of the podIP down to generatePodSandboxConfig and
// generateContainerConfig, which in turn passes it to various other
// functions, in order to facilitate functionality that requires this
@ -585,7 +654,59 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *api.Pod, _ api.PodStatus, podSt
return
}
// Step 5: start containers in podContainerChanges.ContainersToStart.
// Step 5: start init containers.
status, next, done := findNextInitContainerToRun(pod, podStatus)
if status != nil && status.ExitCode != 0 {
// container initialization has failed, flag the pod as failed
initContainerResult := kubecontainer.NewSyncResult(kubecontainer.InitContainer, status.Name)
initContainerResult.Fail(kubecontainer.ErrRunInitContainer, fmt.Sprintf("init container %q exited with %d", status.Name, status.ExitCode))
result.AddSyncResult(initContainerResult)
if pod.Spec.RestartPolicy == api.RestartPolicyNever {
utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %#v", format.Pod(pod), status.Name, status))
return
}
utilruntime.HandleError(fmt.Errorf("Error running pod %q init container %q, restarting: %#v", format.Pod(pod), status.Name, status))
}
if next != nil {
if len(podContainerChanges.ContainersToStart) == 0 {
glog.V(4).Infof("No containers to start, stopping at init container %+v in pod %v", next.Name, format.Pod(pod))
return
}
// If we need to start the next container, do so now then exit
container := next
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name)
result.AddSyncResult(startContainerResult)
isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff)
if isInBackOff {
startContainerResult.Fail(err, msg)
glog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod))
return
}
glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod))
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil {
startContainerResult.Fail(err, msg)
utilruntime.HandleError(fmt.Errorf("init container start failed: %v: %s", err, msg))
return
}
// Successfully started the container; clear the entry in the failure
glog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod))
return
}
if !done {
// init container still running
glog.V(4).Infof("An init container is still running in pod %v", format.Pod(pod))
return
}
if podContainerChanges.InitFailed {
glog.V(4).Infof("Not all init containers have succeeded for pod %v", format.Pod(pod))
return
}
// Step 6: start containers in podContainerChanges.ContainersToStart.
for idx := range podContainerChanges.ContainersToStart {
container := &pod.Spec.Containers[idx]
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name)

View File

@ -61,11 +61,17 @@ func makeAndSetFakePod(m *kubeGenericRuntimeManager, fakeRuntime *apitest.FakeRu
return nil, nil, err
}
fakeContainers, err := makeFakeContainers(m, pod, pod.Spec.Containers, fakeCreatedAt)
fakeContainers, err := makeFakeContainers(m, pod, pod.Spec.Containers, fakeCreatedAt, runtimeApi.ContainerState_RUNNING)
if err != nil {
return nil, nil, err
}
fakeInitContainers, err := makeFakeContainers(m, pod, pod.Spec.InitContainers, fakeCreatedAt, runtimeApi.ContainerState_EXITED)
if err != nil {
return nil, nil, err
}
fakeContainers = append(fakeContainers, fakeInitContainers...)
fakeRuntime.SetFakeSandboxes([]*apitest.FakePodSandbox{fakePodSandbox})
fakeRuntime.SetFakeContainers(fakeContainers)
return fakePodSandbox, fakeContainers, nil
@ -90,7 +96,7 @@ func makeFakePodSandbox(m *kubeGenericRuntimeManager, pod *api.Pod, createdAt in
}, nil
}
func makeFakeContainer(m *kubeGenericRuntimeManager, pod *api.Pod, container api.Container, sandboxConfig *runtimeApi.PodSandboxConfig, createdAt int64) (*apitest.FakeContainer, error) {
func makeFakeContainer(m *kubeGenericRuntimeManager, pod *api.Pod, container api.Container, sandboxConfig *runtimeApi.PodSandboxConfig, createdAt int64, state runtimeApi.ContainerState) (*apitest.FakeContainer, error) {
containerConfig, err := m.generateContainerConfig(&container, pod, 0, "")
if err != nil {
return nil, err
@ -98,7 +104,6 @@ func makeFakeContainer(m *kubeGenericRuntimeManager, pod *api.Pod, container api
containerID := apitest.BuildContainerName(containerConfig.Metadata)
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)
runningState := runtimeApi.ContainerState_RUNNING
imageRef := containerConfig.Image.GetImage()
return &apitest.FakeContainer{
ContainerStatus: runtimeApi.ContainerStatus{
@ -107,7 +112,7 @@ func makeFakeContainer(m *kubeGenericRuntimeManager, pod *api.Pod, container api
Image: containerConfig.Image,
ImageRef: &imageRef,
CreatedAt: &createdAt,
State: &runningState,
State: &state,
Labels: containerConfig.Labels,
Annotations: containerConfig.Annotations,
},
@ -115,7 +120,7 @@ func makeFakeContainer(m *kubeGenericRuntimeManager, pod *api.Pod, container api
}, nil
}
func makeFakeContainers(m *kubeGenericRuntimeManager, pod *api.Pod, containers []api.Container, createdAt int64) ([]*apitest.FakeContainer, error) {
func makeFakeContainers(m *kubeGenericRuntimeManager, pod *api.Pod, containers []api.Container, createdAt int64, state runtimeApi.ContainerState) ([]*apitest.FakeContainer, error) {
sandboxConfig, err := m.generatePodSandboxConfig(pod, 0)
if err != nil {
return nil, err
@ -123,7 +128,7 @@ func makeFakeContainers(m *kubeGenericRuntimeManager, pod *api.Pod, containers [
result := make([]*apitest.FakeContainer, len(containers))
for idx, c := range containers {
containerWithState, err := makeFakeContainer(m, pod, c, sandboxConfig, createdAt)
containerWithState, err := makeFakeContainer(m, pod, c, sandboxConfig, createdAt, state)
if err != nil {
return nil, err
}
@ -155,6 +160,17 @@ func verifyPods(a, b []*kubecontainer.Pod) bool {
return reflect.DeepEqual(a, b)
}
func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected []string) ([]string, bool) {
actual := []string{}
for _, c := range fakeRuntime.Containers {
actual = append(actual, c.GetId())
}
sort.Sort(sort.StringSlice(actual))
sort.Sort(sort.StringSlice(expected))
return actual, reflect.DeepEqual(expected, actual)
}
func TestNewKubeRuntimeManager(t *testing.T) {
_, _, _, err := createTestRuntimeManager()
assert.NoError(t, err)
@ -249,12 +265,13 @@ func TestGetPods(t *testing.T) {
for i := range containers {
fakeContainer := fakeContainers[i]
c, err := m.toKubeContainer(&runtimeApi.Container{
Id: fakeContainer.Id,
Metadata: fakeContainer.Metadata,
State: fakeContainer.State,
Image: fakeContainer.Image,
ImageRef: fakeContainer.ImageRef,
Labels: fakeContainer.Labels,
Id: fakeContainer.Id,
Metadata: fakeContainer.Metadata,
State: fakeContainer.State,
Image: fakeContainer.Image,
ImageRef: fakeContainer.ImageRef,
Labels: fakeContainer.Labels,
Annotations: fakeContainer.Annotations,
})
if err != nil {
t.Fatalf("unexpected error %v", err)
@ -263,11 +280,12 @@ func TestGetPods(t *testing.T) {
}
// Convert fakeSandbox to kubecontainer.Container
sandbox, err := m.sandboxToKubeContainer(&runtimeApi.PodSandbox{
Id: fakeSandbox.Id,
Metadata: fakeSandbox.Metadata,
State: fakeSandbox.State,
CreatedAt: fakeSandbox.CreatedAt,
Labels: fakeSandbox.Labels,
Id: fakeSandbox.Id,
Metadata: fakeSandbox.Metadata,
State: fakeSandbox.State,
CreatedAt: fakeSandbox.CreatedAt,
Labels: fakeSandbox.Labels,
Annotations: fakeSandbox.Annotations,
})
if err != nil {
t.Fatalf("unexpected error %v", err)
@ -287,7 +305,7 @@ func TestGetPods(t *testing.T) {
assert.NoError(t, err)
if !verifyPods(expected, actual) {
t.Errorf("expected %#v, got %#v", expected, actual)
t.Errorf("expected %q, got %q", expected, actual)
}
}
@ -484,3 +502,99 @@ func TestSyncPod(t *testing.T) {
assert.Equal(t, runtimeApi.ContainerState_RUNNING, c.GetState())
}
}
func TestPruneInitContainers(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
InitContainers: []api.Container{
{
Name: "init1",
Image: "busybox",
},
{
Name: "init2",
Image: "busybox",
},
},
},
}
// Set fake sandbox and fake containers to fakeRuntime.
_, _, err = makeAndSetFakePod(m, fakeRuntime, pod)
assert.NoError(t, err)
status := &kubecontainer.PodStatus{
ContainerStatuses: []*kubecontainer.ContainerStatus{
{Name: "init2", ID: kubecontainer.ContainerID{ID: "init2_0"}, State: kubecontainer.ContainerStateExited},
{Name: "init1", ID: kubecontainer.ContainerID{ID: "init1_0"}, State: kubecontainer.ContainerStateExited},
{Name: "init1", ID: kubecontainer.ContainerID{ID: "init1_1"}, State: kubecontainer.ContainerStateExited},
{Name: "init1", ID: kubecontainer.ContainerID{ID: "init1_2"}, State: kubecontainer.ContainerStateExited},
{Name: "init2", ID: kubecontainer.ContainerID{ID: "init2_1"}, State: kubecontainer.ContainerStateExited},
},
}
keep := map[kubecontainer.ContainerID]int{}
m.pruneInitContainersBeforeStart(pod, status, keep)
expectedContainers := []string{"init1_0", "init2_0"}
if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
t.Errorf("expected %q, got %q", expectedContainers, actual)
}
}
func TestSyncPodWithInitContainers(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
initContainers := []api.Container{
{
Name: "init1",
Image: "init",
ImagePullPolicy: api.PullIfNotPresent,
},
}
containers := []api.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: api.PullIfNotPresent,
},
{
Name: "foo2",
Image: "alpine",
ImagePullPolicy: api.PullIfNotPresent,
},
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: containers,
InitContainers: initContainers,
},
}
_, _, err = makeAndSetFakePod(m, fakeRuntime, pod)
assert.NoError(t, err)
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result := m.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff)
assert.NoError(t, result.Error())
assert.Equal(t, 3, len(fakeRuntime.Containers))
expectedContainers := []string{"foo1_0", "foo2_0", "init1_0"}
if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
t.Errorf("expected %q, got %q", expectedContainers, actual)
}
}