Merge pull request #24846 from pmorie/kubelet-test-loc

Reduce LOC in kubelet tests
pull/6/head
Saad Ali 2016-05-12 15:52:27 -07:00
commit 25f37007aa
3 changed files with 198 additions and 467 deletions

View File

@ -674,37 +674,29 @@ func TestFindContainersByPod(t *testing.T) {
}
func TestMakePortsAndBindings(t *testing.T) {
portMapping := func(container, host int, protocol api.Protocol, ip string) kubecontainer.PortMapping {
return kubecontainer.PortMapping{
ContainerPort: container,
HostPort: host,
Protocol: protocol,
HostIP: ip,
}
}
portBinding := func(port, ip string) dockernat.PortBinding {
return dockernat.PortBinding{
HostPort: port,
HostIP: ip,
}
}
ports := []kubecontainer.PortMapping{
{
ContainerPort: 80,
HostPort: 8080,
HostIP: "127.0.0.1",
},
{
ContainerPort: 443,
HostPort: 443,
Protocol: "tcp",
},
{
ContainerPort: 444,
HostPort: 444,
Protocol: "udp",
},
{
ContainerPort: 445,
HostPort: 445,
Protocol: "foobar",
},
{
ContainerPort: 443,
HostPort: 446,
Protocol: "tcp",
},
{
ContainerPort: 443,
HostPort: 446,
Protocol: "udp",
},
portMapping(80, 8080, "", "127.0.0.1"),
portMapping(443, 443, "tcp", ""),
portMapping(444, 444, "udp", ""),
portMapping(445, 445, "foobar", ""),
portMapping(443, 446, "tcp", ""),
portMapping(443, 446, "udp", ""),
}
exposedPorts, bindings := makePortsAndBindings(ports)
@ -725,38 +717,20 @@ func TestMakePortsAndBindings(t *testing.T) {
// Construct expected bindings
expectPortBindings := map[string][]dockernat.PortBinding{
"80/tcp": {
dockernat.PortBinding{
HostPort: "8080",
HostIP: "127.0.0.1",
},
portBinding("8080", "127.0.0.1"),
},
"443/tcp": {
dockernat.PortBinding{
HostPort: "443",
HostIP: "",
},
dockernat.PortBinding{
HostPort: "446",
HostIP: "",
},
portBinding("443", ""),
portBinding("446", ""),
},
"443/udp": {
dockernat.PortBinding{
HostPort: "446",
HostIP: "",
},
portBinding("446", ""),
},
"444/udp": {
dockernat.PortBinding{
HostPort: "444",
HostIP: "",
},
portBinding("444", ""),
},
"445/tcp": {
dockernat.PortBinding{
HostPort: "445",
HostIP: "",
},
portBinding("445", ""),
},
}

View File

@ -284,18 +284,11 @@ func TestSyncPodsStartPod(t *testing.T) {
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "bar"},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "bar"},
},
},
},
}),
}
kubelet.podManager.SetPods(pods)
kubelet.HandlePodSyncs(pods)
@ -315,8 +308,9 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: "foo", Namespace: "new",
ID: "12345678",
Name: "foo",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "bar"},
},
@ -339,22 +333,16 @@ func TestMountExternalVolumes(t *testing.T) {
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plug}, &volumeHost{kubelet})
pod := api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "test",
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{},
},
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{},
},
},
}
podVolumes, err := kubelet.mountExternalVolumes(&pod)
})
podVolumes, err := kubelet.mountExternalVolumes(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
@ -473,30 +461,23 @@ func TestCleanupOrphanedVolumes(t *testing.T) {
}}).ReactionChain
// Create a pod referencing the volume via a PersistentVolumeClaim
pod := api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "podUID",
Name: "pod",
Namespace: "test",
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: "myvolumeclaim",
VolumeSource: api.VolumeSource{
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
ClaimName: "myclaim",
},
pod := podWithUidNameNsSpec("podUID", "pod", "test", api.PodSpec{
Volumes: []api.Volume{
{
Name: "myvolumeclaim",
VolumeSource: api.VolumeSource{
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
ClaimName: "myclaim",
},
},
},
},
}
})
// The pod is pending and not running yet. Test that cleanupOrphanedVolumes
// won't remove the volume from disk if the volume is referenced only
// indirectly by a claim.
err := kubelet.cleanupOrphanedVolumes([]*api.Pod{&pod}, []*kubecontainer.Pod{})
err := kubelet.cleanupOrphanedVolumes([]*api.Pod{pod}, []*kubecontainer.Pod{})
if err != nil {
t.Errorf("cleanupOrphanedVolumes failed: %v", err)
}
@ -1836,11 +1817,7 @@ func TestExecInContainer(t *testing.T) {
}
err := kubelet.ExecInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: podName,
Namespace: podNamespace,
}}),
kubecontainer.GetPodFullName(podWithUidNameNs("12345678", podName, podNamespace)),
"",
containerID,
[]string{"ls"},
@ -2002,22 +1979,8 @@ func TestHandlePortConflicts(t *testing.T) {
spec := api.PodSpec{NodeName: kl.nodeName, Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "123456789",
Name: "newpod",
Namespace: "foo",
},
Spec: spec,
},
{
ObjectMeta: api.ObjectMeta{
UID: "987654321",
Name: "oldpod",
Namespace: "foo",
},
Spec: spec,
},
podWithUidNameNsSpec("123456789", "newpod", "foo", spec),
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec),
}
// Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = unversioned.NewTime(time.Now())
@ -2075,29 +2038,10 @@ func TestHandleHostNameConflicts(t *testing.T) {
},
}}
// default NodeName in test is 127.0.0.1
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "123456789",
Name: "notfittingpod",
Namespace: "foo",
},
Spec: api.PodSpec{
// default NodeName in test is 127.0.0.1
NodeName: "127.0.0.2",
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "987654321",
Name: "fittingpod",
Namespace: "foo",
},
Spec: api.PodSpec{
// default NodeName in test is 127.0.0.1
NodeName: "127.0.0.1",
},
},
podWithUidNameNsSpec("123456789", "notfittingpod", "foo", api.PodSpec{NodeName: "127.0.0.2"}),
podWithUidNameNsSpec("987654321", "fittingpod", "foo", api.PodSpec{NodeName: "127.0.0.1"}),
}
notfittingPod := pods[0]
@ -2143,22 +2087,8 @@ func TestHandleNodeSelector(t *testing.T) {
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "123456789",
Name: "podA",
Namespace: "foo",
},
Spec: api.PodSpec{NodeSelector: map[string]string{"key": "A"}},
},
{
ObjectMeta: api.ObjectMeta{
UID: "987654321",
Name: "podB",
Namespace: "foo",
},
Spec: api.PodSpec{NodeSelector: map[string]string{"key": "B"}},
},
podWithUidNameNsSpec("123456789", "podA", "foo", api.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
podWithUidNameNsSpec("987654321", "podB", "foo", api.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
}
// The first pod should be rejected.
notfittingPod := pods[0]
@ -2209,22 +2139,8 @@ func TestHandleMemExceeded(t *testing.T) {
},
}}}}
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "123456789",
Name: "newpod",
Namespace: "foo",
},
Spec: spec,
},
{
ObjectMeta: api.ObjectMeta{
UID: "987654321",
Name: "oldpod",
Namespace: "foo",
},
Spec: spec,
},
podWithUidNameNsSpec("123456789", "newpod", "foo", spec),
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec),
}
// Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = unversioned.NewTime(time.Now())
@ -3104,16 +3020,8 @@ func TestCreateMirrorPod(t *testing.T) {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "foo",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "file",
},
},
}
pod := podWithUidNameNs("12345678", "bar", "foo")
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
pods := []*api.Pod{pod}
kl.podManager.SetPods(pods)
err := kl.syncPod(pod, nil, &kubecontainer.PodStatus{}, updateType)
@ -3139,38 +3047,21 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "file",
},
pod := podWithUidNameNsSpec("12345678", "foo", "ns", api.PodSpec{
Containers: []api.Container{
{Name: "1234", Image: "foo"},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "1234", Image: "foo"},
},
},
}
})
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
// Mirror pod has an outdated spec.
mirrorPod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "11111111",
Name: "foo",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
mirrorPod := podWithUidNameNsSpec("11111111", "foo", "ns", api.PodSpec{
Containers: []api.Container{
{Name: "1234", Image: "bar"},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "1234", Image: "bar"},
},
},
}
})
mirrorPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "api"
mirrorPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = "mirror"
pods := []*api.Pod{pod, mirrorPod}
kl.podManager.SetPods(pods)
@ -3321,24 +3212,16 @@ func TestHostNetworkAllowed(t *testing.T) {
HostNetworkSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource},
},
})
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource,
},
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
}
})
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
kubelet.podManager.SetPods([]*api.Pod{pod})
err := kubelet.syncPod(pod, nil, &kubecontainer.PodStatus{}, kubetypes.SyncPodUpdate)
if err != nil {
@ -3355,24 +3238,16 @@ func TestHostNetworkDisallowed(t *testing.T) {
HostNetworkSources: []string{},
},
})
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource,
},
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
}
})
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
err := kubelet.syncPod(pod, nil, &kubecontainer.PodStatus{}, kubetypes.SyncPodUpdate)
if err == nil {
t.Errorf("expected pod infra creation to fail")
@ -3387,18 +3262,12 @@ func TestPrivilegeContainerAllowed(t *testing.T) {
AllowPrivileged: true,
})
privileged := true
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
},
},
}
})
kubelet.podManager.SetPods([]*api.Pod{pod})
err := kubelet.syncPod(pod, nil, &kubecontainer.PodStatus{}, kubetypes.SyncPodUpdate)
if err != nil {
@ -3414,18 +3283,12 @@ func TestPrivilegeContainerDisallowed(t *testing.T) {
AllowPrivileged: false,
})
privileged := true
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
},
},
}
})
err := kubelet.syncPod(pod, nil, &kubecontainer.PodStatus{}, kubetypes.SyncPodUpdate)
if err == nil {
t.Errorf("expected pod infra creation to fail")
@ -3508,6 +3371,25 @@ func TestRegisterExistingNodeWithApiserver(t *testing.T) {
}
func TestMakePortMappings(t *testing.T) {
port := func(name string, protocol api.Protocol, containerPort, hostPort int32, ip string) api.ContainerPort {
return api.ContainerPort{
Name: name,
Protocol: protocol,
ContainerPort: containerPort,
HostPort: hostPort,
HostIP: ip,
}
}
portMapping := func(name string, protocol api.Protocol, containerPort, hostPort int, ip string) kubecontainer.PortMapping {
return kubecontainer.PortMapping{
Name: name,
Protocol: protocol,
ContainerPort: containerPort,
HostPort: hostPort,
HostIP: ip,
}
}
tests := []struct {
container *api.Container
expectedPortMappings []kubecontainer.PortMapping
@ -3516,59 +3398,19 @@ func TestMakePortMappings(t *testing.T) {
&api.Container{
Name: "fooContainer",
Ports: []api.ContainerPort{
{
Protocol: api.ProtocolTCP,
ContainerPort: 80,
HostPort: 8080,
HostIP: "127.0.0.1",
},
{
Protocol: api.ProtocolTCP,
ContainerPort: 443,
HostPort: 4343,
HostIP: "192.168.0.1",
},
{
Name: "foo",
Protocol: api.ProtocolUDP,
ContainerPort: 555,
HostPort: 5555,
},
{
Name: "foo", // Duplicated, should be ignored.
Protocol: api.ProtocolUDP,
ContainerPort: 888,
HostPort: 8888,
},
{
Protocol: api.ProtocolTCP, // Duplicated, should be ignored.
ContainerPort: 80,
HostPort: 8888,
},
port("", api.ProtocolTCP, 80, 8080, "127.0.0.1"),
port("", api.ProtocolTCP, 443, 4343, "192.168.0.1"),
port("foo", api.ProtocolUDP, 555, 5555, ""),
// Duplicated, should be ignored.
port("foo", api.ProtocolUDP, 888, 8888, ""),
// Duplicated, should be ignored.
port("", api.ProtocolTCP, 80, 8888, ""),
},
},
[]kubecontainer.PortMapping{
{
Name: "fooContainer-TCP:80",
Protocol: api.ProtocolTCP,
ContainerPort: 80,
HostPort: 8080,
HostIP: "127.0.0.1",
},
{
Name: "fooContainer-TCP:443",
Protocol: api.ProtocolTCP,
ContainerPort: 443,
HostPort: 4343,
HostIP: "192.168.0.1",
},
{
Name: "fooContainer-foo",
Protocol: api.ProtocolUDP,
ContainerPort: 555,
HostPort: 5555,
HostIP: "",
},
portMapping("fooContainer-TCP:80", api.ProtocolTCP, 80, 8080, "127.0.0.1"),
portMapping("fooContainer-TCP:443", api.ProtocolTCP, 443, 4343, "192.168.0.1"),
portMapping("fooContainer-foo", api.ProtocolUDP, 555, 5555, ""),
},
},
}
@ -3710,6 +3552,23 @@ func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
}
}
func podWithUidNameNs(uid types.UID, name, namespace string) *api.Pod {
return &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: uid,
Name: name,
Namespace: namespace,
Annotations: map[string]string{},
},
}
}
func podWithUidNameNsSpec(uid types.UID, name, namespace string, spec api.PodSpec) *api.Pod {
pod := podWithUidNameNs(uid, name, namespace)
pod.Spec = spec
return pod
}
func TestDeletePodDirsForDeletedPods(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
@ -3717,20 +3576,8 @@ func TestDeletePodDirsForDeletedPods(t *testing.T) {
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "pod1",
Namespace: "ns",
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "12345679",
Name: "pod2",
Namespace: "ns",
},
},
podWithUidNameNs("12345678", "pod1", "ns"),
podWithUidNameNs("12345679", "pod2", "ns"),
}
kl.podManager.SetPods(pods)
@ -3776,27 +3623,9 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "pod1",
Namespace: "ns",
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "12345679",
Name: "pod2",
Namespace: "ns",
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "12345680",
Name: "pod3",
Namespace: "ns",
},
},
podWithUidNameNs("12345678", "pod1", "ns"),
podWithUidNameNs("12345679", "pod2", "ns"),
podWithUidNameNs("12345680", "pod3", "ns"),
}
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
@ -3817,13 +3646,8 @@ func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
Name: "pod1",
Namespace: "ns",
}
apiPod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: runningPod.ID,
Name: runningPod.Name,
Namespace: runningPod.Namespace,
},
}
apiPod := podWithUidNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace)
// Sync once to create pod directory; confirm that the pod directory has
// already been created.
pods := []*api.Pod{apiPod}
@ -3843,6 +3667,16 @@ func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
}
func TestCleanupBandwidthLimits(t *testing.T) {
testPod := func(name, ingress string) *api.Pod {
pod := podWithUidNameNs("", name, "")
if len(ingress) != 0 {
pod.Annotations["kubernetes.io/ingress-bandwidth"] = ingress
}
return pod
}
// TODO(random-liu): We removed the test case for pod status not cached here. We should add a higher
// layer status getter function and test that function instead.
tests := []struct {
@ -3858,19 +3692,8 @@ func TestCleanupBandwidthLimits(t *testing.T) {
Phase: api.PodRunning,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Annotations: map[string]string{
"kubernetes.io/ingress-bandwidth": "10M",
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
},
testPod("foo", "10M"),
testPod("bar", ""),
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"},
@ -3882,19 +3705,8 @@ func TestCleanupBandwidthLimits(t *testing.T) {
Phase: api.PodFailed,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Annotations: map[string]string{
"kubernetes.io/ingress-bandwidth": "10M",
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
},
testPod("foo", "10M"),
testPod("bar", ""),
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
@ -3906,16 +3718,8 @@ func TestCleanupBandwidthLimits(t *testing.T) {
Phase: api.PodFailed,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
},
testPod("foo", ""),
testPod("bar", ""),
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
@ -3948,6 +3752,18 @@ func TestExtractBandwidthResources(t *testing.T) {
four, _ := resource.ParseQuantity("4M")
ten, _ := resource.ParseQuantity("10M")
twenty, _ := resource.ParseQuantity("20M")
testPod := func(ingress, egress string) *api.Pod {
pod := &api.Pod{ObjectMeta: api.ObjectMeta{Annotations: map[string]string{}}}
if len(ingress) != 0 {
pod.Annotations["kubernetes.io/ingress-bandwidth"] = ingress
}
if len(egress) != 0 {
pod.Annotations["kubernetes.io/egress-bandwidth"] = egress
}
return pod
}
tests := []struct {
pod *api.Pod
expectedIngress *resource.Quantity
@ -3958,45 +3774,20 @@ func TestExtractBandwidthResources(t *testing.T) {
pod: &api.Pod{},
},
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"kubernetes.io/ingress-bandwidth": "10M",
},
},
},
pod: testPod("10M", ""),
expectedIngress: ten,
},
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"kubernetes.io/egress-bandwidth": "10M",
},
},
},
pod: testPod("", "10M"),
expectedEgress: ten,
},
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"kubernetes.io/ingress-bandwidth": "4M",
"kubernetes.io/egress-bandwidth": "20M",
},
},
},
pod: testPod("4M", "20M"),
expectedIngress: four,
expectedEgress: twenty,
},
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"kubernetes.io/ingress-bandwidth": "foo",
},
},
},
pod: testPod("foo", ""),
expectError: true,
},
}
@ -4089,16 +3880,11 @@ func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
}
specContainerList = append(specContainerList, api.Container{Name: containerName})
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: types.UID("uid1"),
Name: "foo",
Namespace: "test",
},
Spec: api.PodSpec{
Containers: specContainerList,
},
pod := podWithUidNameNs("uid1", "foo", "test")
pod.Spec = api.PodSpec{
Containers: specContainerList,
}
status := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
@ -4135,14 +3921,9 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
}
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{RestartPolicy: api.RestartPolicyOnFailure},
}
pod := podWithUidNameNs("12345678", "foo", "new")
pod.Spec = api.PodSpec{RestartPolicy: api.RestartPolicyOnFailure}
podStatus := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,

View File

@ -229,36 +229,12 @@ func TestFakePodWorkers(t *testing.T) {
&api.Pod{},
},
{
&api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
},
&api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "fooMirror",
Namespace: "new",
},
},
podWithUidNameNs("12345678", "foo", "new"),
podWithUidNameNs("12345678", "fooMirror", "new"),
},
{
&api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "98765",
Name: "bar",
Namespace: "new",
},
},
&api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "98765",
Name: "barMirror",
Namespace: "new",
},
},
podWithUidNameNs("98765", "bar", "new"),
podWithUidNameNs("98765", "barMirror", "new"),
},
}