Merge pull request #49599 from tcharding/kubelet_test_mock

Automatic merge from submit-queue (batch tested with PRs 51391, 51338, 51340, 50773, 49599)

Remove duplicate code

This PR cleans up Kubelet test code. Adds a function enabling the removal of duplicate code for Mock chaining. Also adds a function to check the pod status, again enabling removal of duplicate code.

Fixes #22470

**Special notes for your reviewer**:

This is my first PR for the Kubernetes project. Keeping it simple.
pull/6/head
Kubernetes Submit Queue 2017-08-26 08:49:29 -07:00 committed by GitHub
commit 9188043c6e
1 changed files with 53 additions and 163 deletions

View File

@ -115,6 +115,11 @@ func (tk *TestKubelet) Cleanup() {
}
}
func (tk *TestKubelet) chainMock() {
tk.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
tk.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
}
// newTestKubelet returns test kubelet with two images.
func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubelet {
imageList := []kubecontainer.Image{
@ -323,7 +328,6 @@ var emptyPodUIDs map[types.UID]kubetypes.SyncPodType
func TestSyncLoopAbort(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
kubelet.runtimeState.setRuntimeSync(time.Now())
// The syncLoop waits on time.After(resyncInterval), set it really big so that we don't race for
@ -344,10 +348,6 @@ func TestSyncLoopAbort(t *testing.T) {
func TestSyncPodsStartPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
pods := []*v1.Pod{
@ -368,9 +368,6 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return ready })
@ -416,14 +413,18 @@ func (ls testNodeLister) List(selector labels.Selector) ([]*v1.Node, error) {
return ls.nodes, nil
}
func checkPodStatus(t *testing.T, kl *Kubelet, pod *v1.Pod, phase v1.PodPhase) {
status, found := kl.statusManager.GetPodStatus(pod.UID)
require.True(t, found, "Status of pod %q is not found in the status map", pod.UID)
require.Equal(t, phase, status.Phase)
}
// Tests that we handle port conflicts correctly by setting the failed status in status map.
func TestHandlePortConflicts(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.chainMock()
kl := testKubelet.kubelet
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
{
@ -449,26 +450,18 @@ func TestHandlePortConflicts(t *testing.T) {
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// notfittingPod should be Failed
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
require.True(t, found, "Status of pod %q is not found in the status map", notfittingPod.UID)
require.Equal(t, v1.PodFailed, status.Phase)
// fittingPod should be Pending
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
require.True(t, found, "Status of pod %q is not found in the status map", fittingPod.UID)
require.Equal(t, v1.PodPending, status.Phase)
// Check pod status stored in the status map.
checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
checkPodStatus(t, kl, fittingPod, v1.PodPending)
}
// Tests that we handle host name conflicts correctly by setting the failed status in status map.
func TestHandleHostNameConflicts(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.chainMock()
kl := testKubelet.kubelet
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
{
@ -491,22 +484,17 @@ func TestHandleHostNameConflicts(t *testing.T) {
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// notfittingPod should be Failed
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
require.True(t, found, "Status of pod %q is not found in the status map", notfittingPod.UID)
require.Equal(t, v1.PodFailed, status.Phase)
// fittingPod should be Pending
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
require.True(t, found, "Status of pod %q is not found in the status map", fittingPod.UID)
require.Equal(t, v1.PodPending, status.Phase)
// Check pod status stored in the status map.
checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
checkPodStatus(t, kl, fittingPod, v1.PodPending)
}
// Tests that we handle not matching labels selector correctly by setting the failed status in status map.
func TestHandleNodeSelector(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.chainMock()
kl := testKubelet.kubelet
nodes := []*v1.Node{
{
@ -519,9 +507,6 @@ func TestHandleNodeSelector(t *testing.T) {
},
}
kl.nodeInfo = testNodeInfo{nodes: nodes}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
pods := []*v1.Pod{
podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
podWithUIDNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
@ -531,22 +516,17 @@ func TestHandleNodeSelector(t *testing.T) {
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// notfittingPod should be Failed
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
require.True(t, found, "Status of pod %q is not found in the status map", notfittingPod.UID)
require.Equal(t, v1.PodFailed, status.Phase)
// fittingPod should be Pending
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
require.True(t, found, "Status of pod %q is not found in the status map", fittingPod.UID)
require.Equal(t, v1.PodPending, status.Phase)
// Check pod status stored in the status map.
checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
checkPodStatus(t, kl, fittingPod, v1.PodPending)
}
// Tests that we handle exceeded resources correctly by setting the failed status in status map.
func TestHandleMemExceeded(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.chainMock()
kl := testKubelet.kubelet
nodes := []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
@ -557,9 +537,6 @@ func TestHandleMemExceeded(t *testing.T) {
}}},
}
kl.nodeInfo = testNodeInfo{nodes: nodes}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
spec := v1.PodSpec{NodeName: string(kl.nodeName),
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
@ -580,25 +557,17 @@ func TestHandleMemExceeded(t *testing.T) {
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// notfittingPod should be Failed
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
require.True(t, found, "Status of pod %q is not found in the status map", notfittingPod.UID)
require.Equal(t, v1.PodFailed, status.Phase)
// fittingPod should be Pending
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
require.True(t, found, "Status of pod %q is not found in the status map", fittingPod.UID)
require.Equal(t, v1.PodPending, status.Phase)
// Check pod status stored in the status map.
checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
checkPodStatus(t, kl, fittingPod, v1.PodPending)
}
// TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
@ -736,11 +705,7 @@ func TestCreateMirrorPod(t *testing.T) {
for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
@ -763,11 +728,7 @@ func TestCreateMirrorPod(t *testing.T) {
func TestDeleteOutdatedMirrorPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
@ -806,10 +767,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
func TestDeleteOrphanedMirrorPods(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
@ -929,11 +887,7 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) {
func TestHostNetworkAllowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kubelet := testKubelet.kubelet
@ -962,11 +916,7 @@ func TestHostNetworkAllowed(t *testing.T) {
func TestHostNetworkDisallowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kubelet := testKubelet.kubelet
@ -994,11 +944,7 @@ func TestHostNetworkDisallowed(t *testing.T) {
func TestHostPIDAllowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kubelet := testKubelet.kubelet
@ -1027,11 +973,7 @@ func TestHostPIDAllowed(t *testing.T) {
func TestHostPIDDisallowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kubelet := testKubelet.kubelet
@ -1059,11 +1001,7 @@ func TestHostPIDDisallowed(t *testing.T) {
func TestHostIPCAllowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kubelet := testKubelet.kubelet
@ -1092,11 +1030,7 @@ func TestHostIPCAllowed(t *testing.T) {
func TestHostIPCDisallowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kubelet := testKubelet.kubelet
@ -1124,11 +1058,7 @@ func TestHostIPCDisallowed(t *testing.T) {
func TestPrivilegeContainerAllowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kubelet := testKubelet.kubelet
@ -1154,10 +1084,7 @@ func TestPrivilegeContainerAllowed(t *testing.T) {
func TestPrivilegedContainerDisallowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
@ -1181,10 +1108,7 @@ func TestPrivilegedContainerDisallowed(t *testing.T) {
func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kubelet := testKubelet.kubelet
kubelet.runtimeState.setNetworkState(fmt.Errorf("simulated network error"))
@ -1299,11 +1223,7 @@ func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kubelet := testKubelet.kubelet
@ -1368,11 +1288,7 @@ func podWithUIDNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec
func TestDeletePodDirsForDeletedPods(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kl := testKubelet.kubelet
pods := []*v1.Pod{
podWithUIDNameNs("12345678", "pod1", "ns"),
@ -1408,11 +1324,7 @@ func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod,
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kl := testKubelet.kubelet
pods := []*v1.Pod{
podWithUIDNameNs("12345678", "pod1", "ns"),
@ -1431,11 +1343,7 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
runningPod := &kubecontainer.Pod{
ID: "12345678",
Name: "pod1",
@ -1495,10 +1403,7 @@ func TestGetPodsToSync(t *testing.T) {
func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kubelet := testKubelet.kubelet
numContainers := 10
expectedOrder := []string{}
@ -1558,10 +1463,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
emptyContainerID := (&kubecontainer.ContainerID{}).String()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kubelet := testKubelet.kubelet
pod := podWithUIDNameNs("12345678", "foo", "new")
pod.Spec = v1.PodSpec{RestartPolicy: v1.RestartPolicyOnFailure}
@ -1748,10 +1650,7 @@ func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
emptyContainerID := (&kubecontainer.ContainerID{}).String()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.chainMock()
kubelet := testKubelet.kubelet
pod := podWithUIDNameNs("12345678", "foo", "new")
containers := []v1.Container{{Name: "succeed"}, {Name: "failed"}}
@ -1915,6 +1814,7 @@ func (a *testPodAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecyc
func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
testKubelet.chainMock()
kl := testKubelet.kubelet
kl.nodeInfo = testNodeInfo{nodes: []*v1.Node{
{
@ -1926,9 +1826,6 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
},
},
}}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
pods := []*v1.Pod{
{
@ -1953,16 +1850,10 @@ func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
kl.admitHandlers.AddPodAdmitHandler(&testPodAdmitHandler{podsToReject: podsToReject})
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// podToReject should be Failed
status, found := kl.statusManager.GetPodStatus(podToReject.UID)
require.True(t, found, "Status of pod %q is not found in the status map", podToAdmit.UID)
require.Equal(t, v1.PodFailed, status.Phase)
// podToAdmit should be Pending
status, found = kl.statusManager.GetPodStatus(podToAdmit.UID)
require.True(t, found, "Status of pod %q is not found in the status map", podToAdmit.UID)
require.Equal(t, v1.PodPending, status.Phase)
// Check pod status stored in the status map.
checkPodStatus(t, kl, podToReject, v1.PodFailed)
checkPodStatus(t, kl, podToAdmit, v1.PodPending)
}
// testPodSyncLoopHandler is a lifecycle.PodSyncLoopHandler that is used for testing.
@ -2066,10 +1957,9 @@ func TestSyncPodKillPod(t *testing.T) {
},
})
require.NoError(t, err)
// Check pod status stored in the status map.
status, found := kl.statusManager.GetPodStatus(pod.UID)
require.True(t, found, "Status of pod %q is not found in the status map", pod.UID)
require.Equal(t, v1.PodFailed, status.Phase)
checkPodStatus(t, kl, pod, v1.PodFailed)
}
func waitForVolumeUnmount(