k3s/pkg/kubelet/kubelet_test.go

4180 lines
119 KiB
Go
Raw Normal View History

2014-06-06 23:40:48 +00:00
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
2014-06-06 23:40:48 +00:00
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
2014-06-23 18:32:11 +00:00
2014-06-06 23:40:48 +00:00
package kubelet
import (
"bytes"
2014-06-06 23:40:48 +00:00
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"path"
"reflect"
"sort"
"strings"
2014-06-06 23:40:48 +00:00
"testing"
"time"
2014-06-06 23:40:48 +00:00
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/client/record"
2015-08-13 19:01:50 +00:00
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/kubelet/container"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/network"
"k8s.io/kubernetes/pkg/kubelet/pleg"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/prober"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/queue"
"k8s.io/kubernetes/pkg/runtime"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/bandwidth"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/version"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/volume"
_ "k8s.io/kubernetes/pkg/volume/host_path"
2014-06-06 23:40:48 +00:00
)
func init() {
util.ReallyCrash = true
}
const testKubeletHostname = "127.0.0.1"
2015-04-23 20:57:30 +00:00
type fakeHTTP struct {
url string
err error
}
func (f *fakeHTTP) Get(url string) (*http.Response, error) {
f.url = url
return nil, f.err
}
type TestKubelet struct {
kubelet *Kubelet
fakeRuntime *kubecontainer.FakeRuntime
fakeCadvisor *cadvisor.Mock
fakeKubeClient *testclient.Fake
fakeMirrorClient *kubepod.FakeMirrorClient
}
func newTestKubelet(t *testing.T) *TestKubelet {
fakeRuntime := &kubecontainer.FakeRuntime{}
fakeRuntime.VersionInfo = "1.15"
fakeRecorder := &record.FakeRecorder{}
fakeKubeClient := &testclient.Fake{}
kubelet := &Kubelet{}
kubelet.kubeClient = fakeKubeClient
kubelet.os = kubecontainer.FakeOS{}
kubelet.hostname = testKubeletHostname
kubelet.nodeName = testKubeletHostname
kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime, false, "" /* Pod CIDR */, func() error { return nil })
kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
if tempDir, err := ioutil.TempDir("/tmp", "kubelet_test."); err != nil {
t.Fatalf("can't make a temp rootdir: %v", err)
} else {
kubelet.rootDirectory = tempDir
}
if err := os.MkdirAll(kubelet.rootDirectory, 0750); err != nil {
t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err)
}
kubelet.sourcesReady = func(_ sets.String) bool { return true }
kubelet.masterServiceNamespace = api.NamespaceDefault
kubelet.serviceLister = testServiceLister{}
kubelet.nodeLister = testNodeLister{}
kubelet.nodeInfo = testNodeInfo{}
kubelet.recorder = fakeRecorder
if err := kubelet.setupDataDirs(); err != nil {
t.Fatalf("can't initialize kubelet data dirs: %v", err)
}
kubelet.daemonEndpoints = &api.NodeDaemonEndpoints{}
mockCadvisor := &cadvisor.Mock{}
kubelet.cadvisor = mockCadvisor
fakeMirrorClient := kubepod.NewFakeMirrorClient()
kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient)
kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager)
kubelet.containerRefManager = kubecontainer.NewRefManager()
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{})
if err != nil {
t.Fatalf("can't initialize disk space manager: %v", err)
}
kubelet.diskSpaceManager = diskSpaceManager
kubelet.containerRuntime = fakeRuntime
kubelet.runtimeCache = kubecontainer.NewFakeRuntimeCache(kubelet.containerRuntime)
kubelet.podWorkers = &fakePodWorkers{
syncPodFn: kubelet.syncPod,
runtimeCache: kubelet.runtimeCache,
t: t,
}
kubelet.probeManager = prober.FakeManager{}
kubelet.livenessManager = proberesults.NewManager()
kubelet.volumeManager = newVolumeManager()
kubelet.containerManager = cm.NewStubContainerManager()
fakeClock := &util.FakeClock{Time: time.Now()}
kubelet.backOff = util.NewBackOff(time.Second, time.Minute)
kubelet.backOff.Clock = fakeClock
kubelet.podKillingCh = make(chan *kubecontainer.Pod, 20)
2015-10-27 01:50:57 +00:00
kubelet.resyncInterval = 10 * time.Second
kubelet.workQueue = queue.NewBasicWorkQueue()
// Relist period does not affect the tests.
kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour)
return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient}
}
func newTestPods(count int) []*api.Pod {
pods := make([]*api.Pod, count)
for i := 0; i < count; i++ {
pods[i] = &api.Pod{
2015-10-21 17:17:27 +00:00
Spec: api.PodSpec{
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
},
ObjectMeta: api.ObjectMeta{
UID: types.UID(10000 + i),
Name: fmt.Sprintf("pod%d", i),
},
}
}
return pods
}
func TestKubeletDirs(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
root := kubelet.rootDirectory
var exp, got string
got = kubelet.getPodsDir()
exp = path.Join(root, "pods")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPluginsDir()
exp = path.Join(root, "plugins")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPluginDir("foobar")
exp = path.Join(root, "plugins/foobar")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodDir("abc123")
exp = path.Join(root, "pods/abc123")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodVolumesDir("abc123")
exp = path.Join(root, "pods/abc123/volumes")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodVolumeDir("abc123", "plugin", "foobar")
exp = path.Join(root, "pods/abc123/volumes/plugin/foobar")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodPluginsDir("abc123")
exp = path.Join(root, "pods/abc123/plugins")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodPluginDir("abc123", "foobar")
exp = path.Join(root, "pods/abc123/plugins/foobar")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodContainerDir("abc123", "def456")
exp = path.Join(root, "pods/abc123/containers/def456")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
}
func TestKubeletDirsCompat(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
root := kubelet.rootDirectory
if err := os.MkdirAll(root, 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
var exp, got string
// Old-style pod dir.
if err := os.MkdirAll(fmt.Sprintf("%s/oldpod", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
// New-style pod dir.
if err := os.MkdirAll(fmt.Sprintf("%s/pods/newpod", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
// Both-style pod dir.
if err := os.MkdirAll(fmt.Sprintf("%s/bothpod", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
if err := os.MkdirAll(fmt.Sprintf("%s/pods/bothpod", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
got = kubelet.getPodDir("oldpod")
exp = path.Join(root, "oldpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodDir("newpod")
exp = path.Join(root, "pods/newpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodDir("bothpod")
exp = path.Join(root, "pods/bothpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodDir("neitherpod")
exp = path.Join(root, "pods/neitherpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
root = kubelet.getPodDir("newpod")
// Old-style container dir.
if err := os.MkdirAll(fmt.Sprintf("%s/oldctr", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
// New-style container dir.
if err := os.MkdirAll(fmt.Sprintf("%s/containers/newctr", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
// Both-style container dir.
if err := os.MkdirAll(fmt.Sprintf("%s/bothctr", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
if err := os.MkdirAll(fmt.Sprintf("%s/containers/bothctr", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
got = kubelet.getPodContainerDir("newpod", "oldctr")
exp = path.Join(root, "oldctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodContainerDir("newpod", "newctr")
exp = path.Join(root, "containers/newctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodContainerDir("newpod", "bothctr")
exp = path.Join(root, "containers/bothctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodContainerDir("newpod", "neitherctr")
exp = path.Join(root, "containers/neitherctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
}
var emptyPodUIDs map[types.UID]kubetypes.SyncPodType
func TestSyncLoopTimeUpdate(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
loopTime1 := kubelet.LatestLoopEntryTime()
if !loopTime1.IsZero() {
t.Errorf("Unexpected sync loop time: %s, expected 0", loopTime1)
}
2015-10-27 01:50:57 +00:00
// Start sync ticker.
syncCh := make(chan time.Time, 1)
housekeepingCh := make(chan time.Time, 1)
plegCh := make(chan *pleg.PodLifecycleEvent)
syncCh <- time.Now()
kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet, syncCh, housekeepingCh, plegCh)
loopTime2 := kubelet.LatestLoopEntryTime()
if loopTime2.IsZero() {
t.Errorf("Unexpected sync loop time: 0, expected non-zero value.")
}
syncCh <- time.Now()
kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet, syncCh, housekeepingCh, plegCh)
loopTime3 := kubelet.LatestLoopEntryTime()
if !loopTime3.After(loopTime1) {
t.Errorf("Sync Loop Time was not updated correctly. Second update timestamp should be greater than first update timestamp")
}
}
func TestSyncLoopAbort(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
kubelet.runtimeState.setRuntimeSync(time.Now())
// The syncLoop waits on time.After(resyncInterval), set it really big so that we don't race for
// the channel close
kubelet.resyncInterval = time.Second * 30
ch := make(chan kubetypes.PodUpdate)
close(ch)
// sanity check (also prevent this test from hanging in the next step)
ok := kubelet.syncLoopIteration(ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
if ok {
t.Fatalf("expected syncLoopIteration to return !ok since update chan was closed")
}
// this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly
kubelet.syncLoop(ch, kubelet)
}
func TestSyncPodsStartPod(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "bar"},
},
},
},
}
kubelet.podManager.SetPods(pods)
kubelet.HandlePodSyncs(pods)
fakeRuntime.AssertStartedPods([]string{string(pods[0].UID)})
}
func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
ready := false
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
kubelet.sourcesReady = func(_ sets.String) bool { return ready }
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: "foo", Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "bar"},
},
},
}
kubelet.HandlePodCleanups()
// Sources are not ready yet. Don't remove any pods.
fakeRuntime.AssertKilledPods([]string{})
ready = true
kubelet.HandlePodCleanups()
// Sources are ready. Remove unwanted pods.
fakeRuntime.AssertKilledPods([]string{"12345678"})
2015-01-13 05:47:49 +00:00
}
func TestMountExternalVolumes(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
2015-08-08 01:52:23 +00:00
kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{&volume.FakeVolumePlugin{PluginName: "fake", Host: nil}}, &volumeHost{kubelet})
2015-03-13 13:19:07 +00:00
pod := api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
2014-10-22 17:02:02 +00:00
Name: "foo",
Namespace: "test",
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{},
},
},
},
}
podVolumes, err := kubelet.mountExternalVolumes(&pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
expectedPodVolumes := []string{"vol1"}
if len(expectedPodVolumes) != len(podVolumes) {
t.Errorf("Unexpected volumes. Expected %#v got %#v. Manifest was: %#v", expectedPodVolumes, podVolumes, pod)
}
for _, name := range expectedPodVolumes {
if _, ok := podVolumes[name]; !ok {
2015-03-13 13:19:07 +00:00
t.Errorf("api.Pod volumes map is missing key: %s. %#v", name, podVolumes)
}
}
}
func TestGetPodVolumesFromDisk(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
2015-08-08 01:52:23 +00:00
plug := &volume.FakeVolumePlugin{PluginName: "fake", Host: nil}
kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plug}, &volumeHost{kubelet})
volsOnDisk := []struct {
podUID types.UID
volName string
}{
{"pod1", "vol1"},
{"pod1", "vol2"},
{"pod2", "vol1"},
}
expectedPaths := []string{}
for i := range volsOnDisk {
2015-08-08 01:52:23 +00:00
fv := volume.FakeVolume{PodUID: volsOnDisk[i].podUID, VolName: volsOnDisk[i].volName, Plugin: plug}
fv.SetUp()
expectedPaths = append(expectedPaths, fv.GetPath())
}
volumesFound := kubelet.getPodVolumesFromDisk()
if len(volumesFound) != len(expectedPaths) {
t.Errorf("Expected to find %d cleaners, got %d", len(expectedPaths), len(volumesFound))
}
for _, ep := range expectedPaths {
found := false
for _, cl := range volumesFound {
if ep == cl.GetPath() {
found = true
break
}
}
if !found {
t.Errorf("Could not find a volume with path %s", ep)
}
}
}
type stubVolume struct {
path string
}
func (f *stubVolume) GetPath() string {
return f.path
}
func (f *stubVolume) GetAttributes() volume.Attributes {
return volume.Attributes{}
}
func (f *stubVolume) SetUp() error {
return nil
}
func (f *stubVolume) SetUpAt(dir string) error {
return nil
}
func TestMakeVolumeMounts(t *testing.T) {
container := api.Container{
VolumeMounts: []api.VolumeMount{
2014-06-13 01:34:47 +00:00
{
2015-10-21 17:17:27 +00:00
MountPath: "/etc/hosts",
Name: "disk",
ReadOnly: false,
},
2014-06-19 23:59:48 +00:00
{
MountPath: "/mnt/path3",
2014-08-27 05:08:06 +00:00
Name: "disk",
ReadOnly: true,
},
{
MountPath: "/mnt/path4",
Name: "disk4",
ReadOnly: false,
},
{
MountPath: "/mnt/path5",
Name: "disk5",
ReadOnly: false,
},
},
}
podVolumes := kubecontainer.VolumeMap{
"disk": kubecontainer.VolumeInfo{Builder: &stubVolume{"/mnt/disk"}},
"disk4": kubecontainer.VolumeInfo{Builder: &stubVolume{"/mnt/host"}},
"disk5": kubecontainer.VolumeInfo{Builder: &stubVolume{"/var/lib/kubelet/podID/volumes/empty/disk5"}},
2014-08-27 05:08:06 +00:00
}
2014-06-19 23:59:48 +00:00
2015-10-21 17:17:27 +00:00
pod := api.Pod{
Spec: api.PodSpec{
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
},
}
mounts, _ := makeMounts(&pod, "/pod", &container, podVolumes)
expectedMounts := []kubecontainer.Mount{
{
"disk",
2015-10-21 17:17:27 +00:00
"/etc/hosts",
"/mnt/disk",
false,
false,
},
{
"disk",
"/mnt/path3",
"/mnt/disk",
true,
false,
},
{
"disk4",
"/mnt/path4",
"/mnt/host",
false,
false,
},
{
"disk5",
"/mnt/path5",
"/var/lib/kubelet/podID/volumes/empty/disk5",
false,
false,
},
}
if !reflect.DeepEqual(mounts, expectedMounts) {
t.Errorf("Unexpected mounts: Expected %#v got %#v. Container was: %#v", expectedMounts, mounts, container)
2014-06-19 23:59:48 +00:00
}
}
2014-07-15 22:40:02 +00:00
func TestGetContainerInfo(t *testing.T) {
2014-07-02 18:21:29 +00:00
containerID := "ab2cdf"
containerPath := fmt.Sprintf("/docker/%v", containerID)
containerInfo := cadvisorapi.ContainerInfo{
ContainerReference: cadvisorapi.ContainerReference{
2014-06-19 20:22:20 +00:00
Name: containerPath,
},
}
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
mockCadvisor := testKubelet.fakeCadvisor
2014-12-01 11:10:59 +00:00
mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil)
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: kubecontainer.ContainerID{"test", containerID},
},
},
},
2014-07-01 21:05:10 +00:00
}
stats, err := kubelet.GetContainerInfo("qux_ns", "", "foo", cadvisorReq)
2014-07-01 21:05:10 +00:00
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if stats == nil {
t.Fatalf("stats should not be nil")
}
2014-07-01 21:05:10 +00:00
mockCadvisor.AssertExpectations(t)
}
func TestGetRawContainerInfoRoot(t *testing.T) {
2014-07-01 21:05:10 +00:00
containerPath := "/"
containerInfo := &cadvisorapi.ContainerInfo{
ContainerReference: cadvisorapi.ContainerReference{
2014-07-01 21:05:10 +00:00
Name: containerPath,
},
}
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
mockCadvisor.On("ContainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil)
2014-07-01 21:05:10 +00:00
_, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, false)
2014-06-19 20:22:20 +00:00
if err != nil {
t.Errorf("unexpected error: %v", err)
}
mockCadvisor.AssertExpectations(t)
}
2014-06-19 20:25:54 +00:00
func TestGetRawContainerInfoSubcontainers(t *testing.T) {
containerPath := "/kubelet"
containerInfo := map[string]*cadvisorapi.ContainerInfo{
containerPath: {
ContainerReference: cadvisorapi.ContainerReference{
Name: containerPath,
},
},
"/kubelet/sub": {
ContainerReference: cadvisorapi.ContainerReference{
Name: "/kubelet/sub",
},
},
}
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
mockCadvisor.On("SubcontainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil)
result, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, true)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(result) != 2 {
t.Errorf("Expected 2 elements, received: %+v", result)
}
mockCadvisor.AssertExpectations(t)
}
2014-07-15 22:40:02 +00:00
func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) {
2014-07-02 18:21:29 +00:00
containerID := "ab2cdf"
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
fakeRuntime := testKubelet.fakeRuntime
cadvisorApiFailure := fmt.Errorf("cAdvisor failure")
containerInfo := cadvisorapi.ContainerInfo{}
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, cadvisorApiFailure)
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "uuid",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{Name: "foo",
ID: kubecontainer.ContainerID{"test", containerID},
},
},
},
2014-06-19 20:34:26 +00:00
}
stats, err := kubelet.GetContainerInfo("qux_ns", "uuid", "foo", cadvisorReq)
2014-06-19 20:34:26 +00:00
if stats != nil {
t.Errorf("non-nil stats on error")
}
if err == nil {
t.Errorf("expect error but received nil error")
return
}
if err.Error() != cadvisorApiFailure.Error() {
t.Errorf("wrong error message. expect %v, got %v", cadvisorApiFailure, err)
2014-06-19 20:34:26 +00:00
}
mockCadvisor.AssertExpectations(t)
}
2014-07-15 22:40:02 +00:00
func TestGetContainerInfoOnNonExistContainer(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.PodList = []*kubecontainer.Pod{}
2014-06-19 20:34:26 +00:00
2014-09-05 09:49:11 +00:00
stats, _ := kubelet.GetContainerInfo("qux", "", "foo", nil)
2014-06-19 20:34:26 +00:00
if stats != nil {
t.Errorf("non-nil stats on non exist container")
}
mockCadvisor.AssertExpectations(t)
}
func TestGetContainerInfoWhenContainerRuntimeFailed(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
fakeRuntime := testKubelet.fakeRuntime
expectedErr := fmt.Errorf("List containers error")
fakeRuntime.Err = expectedErr
stats, err := kubelet.GetContainerInfo("qux", "", "foo", nil)
if err == nil {
t.Errorf("expected error from dockertools, got none")
}
if err.Error() != expectedErr.Error() {
t.Errorf("expected error %v got %v", expectedErr.Error(), err.Error())
}
if stats != nil {
t.Errorf("non-nil stats when dockertools failed")
}
mockCadvisor.AssertExpectations(t)
}
func TestGetContainerInfoWithNoContainers(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
stats, err := kubelet.GetContainerInfo("qux_ns", "", "foo", nil)
if err == nil {
t.Errorf("expected error from cadvisor client, got none")
}
if err != ErrContainerNotFound {
t.Errorf("expected error %v, got %v", ErrContainerNotFound.Error(), err.Error())
}
if stats != nil {
t.Errorf("non-nil stats when dockertools returned no containers")
}
mockCadvisor.AssertExpectations(t)
}
func TestGetContainerInfoWithNoMatchingContainers(t *testing.T) {
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{Name: "bar",
ID: kubecontainer.ContainerID{"test", "fakeID"},
},
}},
}
stats, err := kubelet.GetContainerInfo("qux_ns", "", "foo", nil)
if err == nil {
t.Errorf("Expected error from cadvisor client, got none")
}
if err != ErrContainerNotFound {
t.Errorf("Expected error %v, got %v", ErrContainerNotFound.Error(), err.Error())
}
if stats != nil {
t.Errorf("non-nil stats when dockertools returned no containers")
}
mockCadvisor.AssertExpectations(t)
}
2014-08-07 18:15:11 +00:00
type fakeContainerCommandRunner struct {
Cmd []string
ID kubecontainer.ContainerID
PodID types.UID
E error
Stdin io.Reader
Stdout io.WriteCloser
Stderr io.WriteCloser
TTY bool
Port uint16
Stream io.ReadWriteCloser
2014-08-07 18:15:11 +00:00
}
func (f *fakeContainerCommandRunner) RunInContainer(id kubecontainer.ContainerID, cmd []string) ([]byte, error) {
2014-08-07 18:15:11 +00:00
f.Cmd = cmd
f.ID = id
return []byte{}, f.E
}
func (f *fakeContainerCommandRunner) ExecInContainer(id kubecontainer.ContainerID, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error {
f.Cmd = cmd
f.ID = id
f.Stdin = in
f.Stdout = out
f.Stderr = err
f.TTY = tty
return f.E
}
func (f *fakeContainerCommandRunner) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error {
f.PodID = pod.ID
f.Port = port
f.Stream = stream
return nil
}
2014-08-07 18:15:11 +00:00
func TestRunInContainerNoSuchPod(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.PodList = []*kubecontainer.Pod{}
2014-08-07 18:15:11 +00:00
podName := "podFoo"
2015-03-11 23:40:20 +00:00
podNamespace := "nsFoo"
2014-08-07 18:15:11 +00:00
containerName := "containerFoo"
output, err := kubelet.RunInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
2014-09-05 09:49:11 +00:00
"",
2014-08-07 18:15:11 +00:00
containerName,
[]string{"ls"})
if output != nil {
t.Errorf("unexpected non-nil command: %v", output)
}
if err == nil {
t.Error("unexpected non-error")
}
}
func TestRunInContainer(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
2014-08-07 18:15:11 +00:00
kubelet.runner = &fakeCommandRunner
containerID := kubecontainer.ContainerID{"test", "abc1234"}
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: "podFoo",
Namespace: "nsFoo",
Containers: []*kubecontainer.Container{
{Name: "containerFoo",
ID: containerID,
},
},
},
2014-08-07 18:15:11 +00:00
}
cmd := []string{"ls"}
_, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd)
2014-08-07 18:15:11 +00:00
if fakeCommandRunner.ID != containerID {
2015-01-18 07:32:34 +00:00
t.Errorf("unexpected Name: %s", fakeCommandRunner.ID)
2014-08-07 18:15:11 +00:00
}
if !reflect.DeepEqual(fakeCommandRunner.Cmd, cmd) {
2015-01-18 07:32:34 +00:00
t.Errorf("unexpected command: %s", fakeCommandRunner.Cmd)
2014-08-07 18:15:11 +00:00
}
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
type countingDNSScrubber struct {
counter *int
}
func (cds countingDNSScrubber) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
(*cds.counter)++
return nameservers, searches
}
func TestParseResolvConf(t *testing.T) {
testCases := []struct {
data string
nameservers []string
searches []string
}{
{"", []string{}, []string{}},
{" ", []string{}, []string{}},
{"\n", []string{}, []string{}},
{"\t\n\t", []string{}, []string{}},
{"#comment\n", []string{}, []string{}},
{" #comment\n", []string{}, []string{}},
{"#comment\n#comment", []string{}, []string{}},
{"#comment\nnameserver", []string{}, []string{}},
{"#comment\nnameserver\nsearch", []string{}, []string{}},
{"nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
{" nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
{"\tnameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
{"nameserver\t1.2.3.4", []string{"1.2.3.4"}, []string{}},
{"nameserver \t 1.2.3.4", []string{"1.2.3.4"}, []string{}},
{"nameserver 1.2.3.4\nnameserver 5.6.7.8", []string{"1.2.3.4", "5.6.7.8"}, []string{}},
{"search foo", []string{}, []string{"foo"}},
{"search foo bar", []string{}, []string{"foo", "bar"}},
{"search foo bar bat\n", []string{}, []string{"foo", "bar", "bat"}},
{"search foo\nsearch bar", []string{}, []string{"bar"}},
{"nameserver 1.2.3.4\nsearch foo bar", []string{"1.2.3.4"}, []string{"foo", "bar"}},
{"nameserver 1.2.3.4\nsearch foo\nnameserver 5.6.7.8\nsearch bar", []string{"1.2.3.4", "5.6.7.8"}, []string{"bar"}},
{"#comment\nnameserver 1.2.3.4\n#comment\nsearch foo\ncomment", []string{"1.2.3.4"}, []string{"foo"}},
}
for i, tc := range testCases {
ns, srch, err := parseResolvConf(strings.NewReader(tc.data), nil)
if err != nil {
t.Errorf("expected success, got %v", err)
continue
}
if !reflect.DeepEqual(ns, tc.nameservers) {
t.Errorf("[%d] expected nameservers %#v, got %#v", i, tc.nameservers, ns)
}
if !reflect.DeepEqual(srch, tc.searches) {
t.Errorf("[%d] expected searches %#v, got %#v", i, tc.searches, srch)
}
counter := 0
cds := countingDNSScrubber{&counter}
ns, srch, err = parseResolvConf(strings.NewReader(tc.data), cds)
if err != nil {
t.Errorf("expected success, got %v", err)
continue
}
if !reflect.DeepEqual(ns, tc.nameservers) {
t.Errorf("[%d] expected nameservers %#v, got %#v", i, tc.nameservers, ns)
}
if !reflect.DeepEqual(srch, tc.searches) {
t.Errorf("[%d] expected searches %#v, got %#v", i, tc.searches, srch)
}
if counter != 1 {
t.Errorf("[%d] expected dnsScrubber to have been called: got %d", i, counter)
}
}
}
func TestDNSConfigurationParams(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
clusterNS := "203.0.113.1"
kubelet.clusterDomain = "kubernetes.io"
kubelet.clusterDNS = net.ParseIP(clusterNS)
pods := newTestPods(2)
pods[0].Spec.DNSPolicy = api.DNSClusterFirst
pods[1].Spec.DNSPolicy = api.DNSDefault
options := make([]*kubecontainer.RunContainerOptions, 2)
for i, pod := range pods {
var err error
kubelet.volumeManager.SetVolumes(pod.UID, make(kubecontainer.VolumeMap, 0))
options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{})
if err != nil {
t.Fatalf("failed to generate container options: %v", err)
}
}
if len(options[0].DNS) != 1 || options[0].DNS[0] != clusterNS {
t.Errorf("expected nameserver %s, got %+v", clusterNS, options[0].DNS)
}
if len(options[0].DNSSearch) == 0 || options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
t.Errorf("expected search %s, got %+v", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
}
if len(options[1].DNS) != 1 || options[1].DNS[0] != "127.0.0.1" {
t.Errorf("expected nameserver 127.0.0.1, got %+v", options[1].DNS)
}
if len(options[1].DNSSearch) != 1 || options[1].DNSSearch[0] != "." {
t.Errorf("expected search \".\", got %+v", options[1].DNSSearch)
}
kubelet.resolverConfig = "/etc/resolv.conf"
for i, pod := range pods {
var err error
options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{})
if err != nil {
t.Fatalf("failed to generate container options: %v", err)
}
}
t.Logf("nameservers %+v", options[1].DNS)
if len(options[0].DNS) != 1 {
t.Errorf("expected cluster nameserver only, got %+v", options[0].DNS)
} else if options[0].DNS[0] != clusterNS {
t.Errorf("expected nameserver %s, got %v", clusterNS, options[0].DNS[0])
}
if len(options[0].DNSSearch) != len(options[1].DNSSearch)+3 {
t.Errorf("expected prepend of cluster domain, got %+v", options[0].DNSSearch)
} else if options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
t.Errorf("expected domain %s, got %s", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
}
}
type testServiceLister struct {
services []api.Service
}
func (ls testServiceLister) List() (api.ServiceList, error) {
return api.ServiceList{
Items: ls.services,
}, nil
}
type testNodeLister struct {
nodes []api.Node
}
type testNodeInfo struct {
nodes []api.Node
}
func (ls testNodeInfo) GetNodeInfo(id string) (*api.Node, error) {
for _, node := range ls.nodes {
if node.Name == id {
return &node, nil
}
}
return nil, fmt.Errorf("Node with name: %s does not exist", id)
}
func (ls testNodeLister) List() (api.NodeList, error) {
return api.NodeList{
Items: ls.nodes,
}, nil
}
type envs []kubecontainer.EnvVar
func (e envs) Len() int {
return len(e)
}
func (e envs) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e envs) Less(i, j int) bool { return e[i].Name < e[j].Name }
func TestMakeEnvironmentVariables(t *testing.T) {
services := []api.Service{
{
ObjectMeta: api.ObjectMeta{Name: "kubernetes", Namespace: api.NamespaceDefault},
Spec: api.ServiceSpec{
2015-03-13 15:16:41 +00:00
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8081,
}},
ClusterIP: "1.2.3.1",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test1"},
Spec: api.ServiceSpec{
2015-03-13 15:16:41 +00:00
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8083,
}},
ClusterIP: "1.2.3.3",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "kubernetes", Namespace: "test2"},
Spec: api.ServiceSpec{
2015-03-13 15:16:41 +00:00
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8084,
}},
ClusterIP: "1.2.3.4",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test2"},
Spec: api.ServiceSpec{
2015-03-13 15:16:41 +00:00
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8085,
}},
ClusterIP: "1.2.3.5",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test2"},
Spec: api.ServiceSpec{
2015-03-13 15:16:41 +00:00
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8085,
}},
ClusterIP: "None",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test2"},
Spec: api.ServiceSpec{
2015-03-13 15:16:41 +00:00
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8085,
}},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "kubernetes", Namespace: "kubernetes"},
Spec: api.ServiceSpec{
2015-03-13 15:16:41 +00:00
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8086,
}},
ClusterIP: "1.2.3.6",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "not-special", Namespace: "kubernetes"},
Spec: api.ServiceSpec{
2015-03-13 15:16:41 +00:00
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8088,
}},
ClusterIP: "1.2.3.8",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "not-special", Namespace: "kubernetes"},
Spec: api.ServiceSpec{
2015-03-13 15:16:41 +00:00
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8088,
}},
ClusterIP: "None",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "not-special", Namespace: "kubernetes"},
Spec: api.ServiceSpec{
2015-03-13 15:16:41 +00:00
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8088,
}},
ClusterIP: "",
},
},
}
testCases := []struct {
name string // the name of the test case
ns string // the namespace to generate environment for
container *api.Container // the container to use
masterServiceNs string // the namespace to read master service info from
nilLister bool // whether the lister should be nil
expectedEnvs []kubecontainer.EnvVar // a set of expected environment vars
}{
{
2015-05-12 21:14:32 +00:00
name: "api server = Y, kubelet = Y",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
2015-05-12 21:14:32 +00:00
masterServiceNs: api.NamespaceDefault,
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
2015-05-12 21:14:32 +00:00
name: "api server = Y, kubelet = N",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
2015-05-12 21:14:32 +00:00
masterServiceNs: api.NamespaceDefault,
nilLister: true,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
{
2015-05-12 21:14:32 +00:00
name: "api server = N; kubelet = Y",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "BAZ"},
},
},
2015-05-12 21:14:32 +00:00
masterServiceNs: api.NamespaceDefault,
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAZ"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
2015-05-12 21:14:32 +00:00
name: "master service in pod ns",
ns: "test2",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "ZAP"},
},
},
2015-05-12 21:14:32 +00:00
masterServiceNs: "kubernetes",
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "ZAP"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.5"},
{Name: "TEST_SERVICE_PORT", Value: "8085"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.5:8085"},
{Name: "TEST_PORT_8085_TCP", Value: "tcp://1.2.3.5:8085"},
{Name: "TEST_PORT_8085_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8085_TCP_PORT", Value: "8085"},
{Name: "TEST_PORT_8085_TCP_ADDR", Value: "1.2.3.5"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.4"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8084"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.4:8084"},
{Name: "KUBERNETES_PORT_8084_TCP", Value: "tcp://1.2.3.4:8084"},
{Name: "KUBERNETES_PORT_8084_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8084_TCP_PORT", Value: "8084"},
{Name: "KUBERNETES_PORT_8084_TCP_ADDR", Value: "1.2.3.4"},
},
},
{
2015-05-12 21:14:32 +00:00
name: "pod in master service ns",
ns: "kubernetes",
container: &api.Container{},
masterServiceNs: "kubernetes",
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "NOT_SPECIAL_SERVICE_HOST", Value: "1.2.3.8"},
{Name: "NOT_SPECIAL_SERVICE_PORT", Value: "8088"},
{Name: "NOT_SPECIAL_PORT", Value: "tcp://1.2.3.8:8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP", Value: "tcp://1.2.3.8:8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_PROTO", Value: "tcp"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_PORT", Value: "8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_ADDR", Value: "1.2.3.8"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.6"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8086"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.6:8086"},
{Name: "KUBERNETES_PORT_8086_TCP", Value: "tcp://1.2.3.6:8086"},
{Name: "KUBERNETES_PORT_8086_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8086_TCP_PORT", Value: "8086"},
{Name: "KUBERNETES_PORT_8086_TCP_ADDR", Value: "1.2.3.6"},
},
},
2015-04-23 20:57:30 +00:00
{
2015-05-12 21:14:32 +00:00
name: "downward api pod",
ns: "downward-api",
container: &api.Container{
2015-04-23 20:57:30 +00:00
Env: []api.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Default.GroupVersion().String(),
2015-04-23 20:57:30 +00:00
FieldPath: "metadata.name",
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Default.GroupVersion().String(),
2015-04-23 20:57:30 +00:00
FieldPath: "metadata.namespace",
},
},
},
{
Name: "POD_IP",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Default.GroupVersion().String(),
FieldPath: "status.podIP",
},
},
},
2015-04-23 20:57:30 +00:00
},
},
2015-05-12 21:14:32 +00:00
masterServiceNs: "nothing",
nilLister: true,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "POD_NAME", Value: "dapi-test-pod-name"},
{Name: "POD_NAMESPACE", Value: "downward-api"},
{Name: "POD_IP", Value: "1.2.3.4"},
},
2015-04-23 20:57:30 +00:00
},
{
name: "env expansion",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "POD_NAME",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Default.GroupVersion().String(),
FieldPath: "metadata.name",
},
},
},
{
Name: "OUT_OF_ORDER_TEST",
Value: "$(OUT_OF_ORDER_TARGET)",
},
{
Name: "OUT_OF_ORDER_TARGET",
Value: "FOO",
},
{
Name: "EMPTY_VAR",
},
{
Name: "EMPTY_TEST",
Value: "foo-$(EMPTY_VAR)",
},
{
Name: "POD_NAME_TEST2",
Value: "test2-$(POD_NAME)",
},
{
Name: "POD_NAME_TEST3",
Value: "$(POD_NAME_TEST2)-3",
},
{
Name: "LITERAL_TEST",
Value: "literal-$(TEST_LITERAL)",
},
{
Name: "SERVICE_VAR_TEST",
Value: "$(TEST_SERVICE_HOST):$(TEST_SERVICE_PORT)",
},
{
Name: "TEST_UNDEFINED",
Value: "$(UNDEFINED_VAR)",
},
},
},
masterServiceNs: "nothing",
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "POD_NAME",
Value: "dapi-test-pod-name",
},
{
Name: "POD_NAME_TEST2",
Value: "test2-dapi-test-pod-name",
},
{
Name: "POD_NAME_TEST3",
Value: "test2-dapi-test-pod-name-3",
},
{
Name: "LITERAL_TEST",
Value: "literal-test-test-test",
},
{
Name: "TEST_SERVICE_HOST",
Value: "1.2.3.3",
},
{
Name: "TEST_SERVICE_PORT",
Value: "8083",
},
{
Name: "TEST_PORT",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP_PROTO",
Value: "tcp",
},
{
Name: "TEST_PORT_8083_TCP_PORT",
Value: "8083",
},
{
Name: "TEST_PORT_8083_TCP_ADDR",
Value: "1.2.3.3",
},
{
Name: "SERVICE_VAR_TEST",
Value: "1.2.3.3:8083",
},
{
Name: "OUT_OF_ORDER_TEST",
Value: "$(OUT_OF_ORDER_TARGET)",
},
{
Name: "OUT_OF_ORDER_TARGET",
Value: "FOO",
},
{
Name: "TEST_UNDEFINED",
Value: "$(UNDEFINED_VAR)",
},
{
Name: "EMPTY_VAR",
},
{
Name: "EMPTY_TEST",
Value: "foo-",
},
},
},
}
for i, tc := range testCases {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kl := testKubelet.kubelet
2015-05-12 21:14:32 +00:00
kl.masterServiceNamespace = tc.masterServiceNs
if tc.nilLister {
kl.serviceLister = nil
} else {
kl.serviceLister = testServiceLister{services}
}
2015-04-23 20:57:30 +00:00
testPod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Namespace: tc.ns,
Name: "dapi-test-pod-name",
},
}
testPod.Status.PodIP = "1.2.3.4"
2015-04-23 20:57:30 +00:00
result, err := kl.makeEnvironmentVariables(testPod, tc.container)
if err != nil {
t.Errorf("[%v] Unexpected error: %v", tc.name, err)
}
sort.Sort(envs(result))
sort.Sort(envs(tc.expectedEnvs))
2015-05-12 21:14:32 +00:00
if !reflect.DeepEqual(result, tc.expectedEnvs) {
t.Errorf("%d: [%v] Unexpected env entries; expected {%v}, got {%v}", i, tc.name, tc.expectedEnvs, result)
}
}
}
2015-01-28 17:56:35 +00:00
func waitingState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Waiting: &api.ContainerStateWaiting{},
},
}
}
func waitingStateWithLastTermination(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Waiting: &api.ContainerStateWaiting{},
},
LastTerminationState: api.ContainerState{
Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
},
},
}
}
func runningState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
}
}
func stoppedState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{},
},
}
}
func succeededState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
},
},
}
}
func failedState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{
ExitCode: -1,
},
},
}
}
2015-01-28 17:56:35 +00:00
func TestPodPhaseWithRestartAlways(t *testing.T) {
desiredState := api.PodSpec{
NodeName: "machine",
2015-01-28 17:56:35 +00:00
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicyAlways,
2015-01-28 17:56:35 +00:00
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
2015-04-02 12:52:03 +00:00
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
2015-01-28 17:56:35 +00:00
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"all running",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
stoppedState("containerA"),
stoppedState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"all stopped with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
stoppedState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"mixed state #1 with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodPending,
"mixed state #2 with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingState("containerB"),
},
},
},
api.PodPending,
"mixed state #3 with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingStateWithLastTermination("containerB"),
},
},
},
api.PodRunning,
"backoff crashloop container with restart always",
},
2015-01-28 17:56:35 +00:00
}
for _, test := range tests {
2015-06-12 11:11:53 +00:00
if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status {
2015-01-28 17:56:35 +00:00
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
}
}
}
func TestPodPhaseWithRestartNever(t *testing.T) {
desiredState := api.PodSpec{
NodeName: "machine",
2015-01-28 17:56:35 +00:00
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicyNever,
2015-01-28 17:56:35 +00:00
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
2015-04-02 12:52:03 +00:00
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
2015-01-28 17:56:35 +00:00
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"all running with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodSucceeded,
"all succeeded with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
failedState("containerA"),
failedState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodFailed,
"all failed with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
succeededState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"mixed state #1 with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodPending,
"mixed state #2 with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingState("containerB"),
},
},
},
api.PodPending,
"mixed state #3 with restart never",
},
2015-01-28 17:56:35 +00:00
}
for _, test := range tests {
2015-06-12 11:11:53 +00:00
if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status {
2015-01-28 17:56:35 +00:00
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
}
}
}
func TestPodPhaseWithRestartOnFailure(t *testing.T) {
desiredState := api.PodSpec{
NodeName: "machine",
2015-01-28 17:56:35 +00:00
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicyOnFailure,
2015-01-28 17:56:35 +00:00
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
2015-04-02 12:52:03 +00:00
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
2015-01-28 17:56:35 +00:00
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"all running with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodSucceeded,
"all succeeded with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
failedState("containerA"),
failedState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"all failed with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
succeededState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"mixed state #1 with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodPending,
"mixed state #2 with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingState("containerB"),
},
},
},
api.PodPending,
"mixed state #3 with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingStateWithLastTermination("containerB"),
},
},
},
api.PodRunning,
"backoff crashloop container with restart onfailure",
},
2015-01-28 17:56:35 +00:00
}
for _, test := range tests {
2015-06-12 11:11:53 +00:00
if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status {
2015-01-28 17:56:35 +00:00
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
}
}
}
2015-02-08 21:22:19 +00:00
func TestExecInContainerNoSuchPod(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
fakeRuntime.PodList = []*kubecontainer.Pod{}
podName := "podFoo"
2015-03-11 23:40:20 +00:00
podNamespace := "nsFoo"
containerID := "containerFoo"
err := kubelet.ExecInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
"",
containerID,
[]string{"ls"},
nil,
nil,
nil,
false,
)
if err == nil {
t.Fatal("unexpected non-error")
}
if !fakeCommandRunner.ID.IsEmpty() {
t.Fatal("unexpected invocation of runner.ExecInContainer")
}
}
func TestExecInContainerNoSuchContainer(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
podName := "podFoo"
2015-03-11 23:40:20 +00:00
podNamespace := "nsFoo"
containerID := "containerFoo"
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{Name: "bar",
ID: kubecontainer.ContainerID{"test", "barID"}},
},
},
}
err := kubelet.ExecInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: podName,
Namespace: podNamespace,
}}),
"",
containerID,
[]string{"ls"},
nil,
nil,
nil,
false,
)
if err == nil {
t.Fatal("unexpected non-error")
}
if !fakeCommandRunner.ID.IsEmpty() {
t.Fatal("unexpected invocation of runner.ExecInContainer")
}
}
type fakeReadWriteCloser struct{}
func (f *fakeReadWriteCloser) Write(data []byte) (int, error) {
return 0, nil
}
func (f *fakeReadWriteCloser) Read(data []byte) (int, error) {
return 0, nil
}
func (f *fakeReadWriteCloser) Close() error {
return nil
}
func TestExecInContainer(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
podName := "podFoo"
2015-03-11 23:40:20 +00:00
podNamespace := "nsFoo"
containerID := "containerFoo"
command := []string{"ls"}
stdin := &bytes.Buffer{}
stdout := &fakeReadWriteCloser{}
stderr := &fakeReadWriteCloser{}
tty := true
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{Name: containerID,
ID: kubecontainer.ContainerID{"test", containerID},
},
},
},
}
2015-02-08 21:22:19 +00:00
err := kubelet.ExecInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: podName,
Namespace: podNamespace,
}}),
"",
containerID,
[]string{"ls"},
stdin,
stdout,
stderr,
tty,
)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if e, a := containerID, fakeCommandRunner.ID.ID; e != a {
t.Fatalf("container name: expected %q, got %q", e, a)
}
if e, a := command, fakeCommandRunner.Cmd; !reflect.DeepEqual(e, a) {
t.Fatalf("command: expected '%v', got '%v'", e, a)
}
if e, a := stdin, fakeCommandRunner.Stdin; e != a {
t.Fatalf("stdin: expected %#v, got %#v", e, a)
}
if e, a := stdout, fakeCommandRunner.Stdout; e != a {
t.Fatalf("stdout: expected %#v, got %#v", e, a)
}
if e, a := stderr, fakeCommandRunner.Stderr; e != a {
t.Fatalf("stderr: expected %#v, got %#v", e, a)
}
if e, a := tty, fakeCommandRunner.TTY; e != a {
t.Fatalf("tty: expected %t, got %t", e, a)
}
}
func TestPortForwardNoSuchPod(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.PodList = []*kubecontainer.Pod{}
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
podName := "podFoo"
2015-03-11 23:40:20 +00:00
podNamespace := "nsFoo"
var port uint16 = 5000
err := kubelet.PortForward(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
"",
port,
nil,
)
if err == nil {
t.Fatal("unexpected non-error")
}
if !fakeCommandRunner.ID.IsEmpty() {
t.Fatal("unexpected invocation of runner.PortForward")
}
}
func TestPortForward(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
podName := "podFoo"
2015-03-11 23:40:20 +00:00
podNamespace := "nsFoo"
podID := types.UID("12345678")
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: podID,
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: kubecontainer.ContainerID{"test", "containerFoo"},
},
},
},
}
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
var port uint16 = 5000
stream := &fakeReadWriteCloser{}
err := kubelet.PortForward(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: podName,
Namespace: podNamespace,
}}),
"",
port,
stream,
)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if e, a := podID, fakeCommandRunner.PodID; e != a {
t.Fatalf("container id: expected %q, got %q", e, a)
}
if e, a := port, fakeCommandRunner.Port; e != a {
t.Fatalf("port: expected %v, got %v", e, a)
}
if e, a := stream, fakeCommandRunner.Stream; e != a {
t.Fatalf("stream: expected %v, got %v", e, a)
}
2015-02-08 21:22:19 +00:00
}
// Tests that identify the host port conflicts are detected correctly.
func TestGetHostPortConflicts(t *testing.T) {
pods := []*api.Pod{
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}},
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 82}}}}}},
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 83}}}}}},
}
// Pods should not cause any conflict.
if hasHostPortConflicts(pods) {
t.Errorf("expected no conflicts, Got conflicts")
}
expected := &api.Pod{
Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}},
}
// The new pod should cause conflict and be reported.
pods = append(pods, expected)
if !hasHostPortConflicts(pods) {
t.Errorf("expected no conflict, Got no conflicts")
}
}
// Tests that we handle port conflicts correctly by setting the failed status in status map.
func TestHandlePortConflicts(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kl := testKubelet.kubelet
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
spec := api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "123456789",
Name: "newpod",
Namespace: "foo",
},
Spec: spec,
},
{
ObjectMeta: api.ObjectMeta{
UID: "987654321",
Name: "oldpod",
Namespace: "foo",
},
Spec: spec,
},
}
2015-03-13 13:19:07 +00:00
// Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = unversioned.NewTime(time.Now())
pods[0].CreationTimestamp = unversioned.NewTime(time.Now().Add(1 * time.Second))
// The newer pod should be rejected.
conflictedPod := pods[0]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
status, found := kl.statusManager.GetPodStatus(conflictedPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", conflictedPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
}
// Tests that we handle not matching labels selector correctly by setting the failed status in status map.
func TestHandleNodeSelector(t *testing.T) {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
kl.nodeLister = testNodeLister{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}}},
}}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}}},
}}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "123456789",
Name: "podA",
Namespace: "foo",
},
Spec: api.PodSpec{NodeSelector: map[string]string{"key": "A"}},
},
{
ObjectMeta: api.ObjectMeta{
UID: "987654321",
Name: "podB",
Namespace: "foo",
},
Spec: api.PodSpec{NodeSelector: map[string]string{"key": "B"}},
},
}
// The first pod should be rejected.
notfittingPod := pods[0]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
}
// Tests that we handle exceeded resources correctly by setting the failed status in status map.
func TestHandleMemExceeded(t *testing.T) {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{MemoryCapacity: 100}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
spec := api.PodSpec{Containers: []api.Container{{Resources: api.ResourceRequirements{
2015-07-30 19:59:22 +00:00
Requests: api.ResourceList{
"memory": resource.MustParse("90"),
},
}}}}
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "123456789",
Name: "newpod",
Namespace: "foo",
},
Spec: spec,
},
{
ObjectMeta: api.ObjectMeta{
UID: "987654321",
Name: "oldpod",
Namespace: "foo",
},
Spec: spec,
},
}
// Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = unversioned.NewTime(time.Now())
pods[0].CreationTimestamp = unversioned.NewTime(time.Now().Add(1 * time.Second))
// The newer pod should be rejected.
notfittingPod := pods[0]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
}
// TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
2015-02-23 21:04:45 +00:00
kl := testKubelet.kubelet
pods := []*api.Pod{
{ObjectMeta: api.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
{ObjectMeta: api.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
}
podToTest := pods[1]
// Run once to populate the status map.
kl.HandlePodAdditions(pods)
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); !found {
t.Fatalf("expected to have status cached for pod2")
}
// Sync with empty pods so that the entry in status map will be removed.
kl.podManager.SetPods([]*api.Pod{})
kl.HandlePodCleanups()
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found {
t.Fatalf("expected to not have status cached for pod2")
}
}
func TestValidatePodStatus(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
testCases := []struct {
podPhase api.PodPhase
success bool
}{
{api.PodRunning, true},
{api.PodSucceeded, true},
{api.PodFailed, true},
{api.PodPending, false},
{api.PodUnknown, false},
}
for i, tc := range testCases {
err := kubelet.validatePodPhase(&api.PodStatus{Phase: tc.podPhase})
if tc.success {
if err != nil {
t.Errorf("[case %d]: unexpected failure - %v", i, err)
}
} else if err == nil {
t.Errorf("[case %d]: unexpected success", i)
}
}
}
func TestValidateContainerStatus(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
containerName := "x"
testCases := []struct {
statuses []api.ContainerStatus
success bool
}{
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
LastTerminationState: api.ContainerState{
Terminated: &api.ContainerStateTerminated{},
},
},
},
success: true,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{},
},
},
},
success: true,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Waiting: &api.ContainerStateWaiting{},
},
},
},
success: false,
},
}
for i, tc := range testCases {
_, err := kubelet.validateContainerStatus(&api.PodStatus{
ContainerStatuses: tc.statuses,
}, containerName, false)
if tc.success {
if err != nil {
t.Errorf("[case %d]: unexpected failure - %v", i, err)
}
} else if err == nil {
t.Errorf("[case %d]: unexpected success", i)
}
}
if _, err := kubelet.validateContainerStatus(&api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, "blah", false); err == nil {
t.Errorf("expected error with invalid container name")
}
if _, err := kubelet.validateContainerStatus(&api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, containerName, true); err != nil {
t.Errorf("unexpected error with for previous terminated container - %v", err)
}
if _, err := kubelet.validateContainerStatus(&api.PodStatus{
ContainerStatuses: testCases[1].statuses,
}, containerName, true); err == nil {
t.Errorf("expected error with for previous terminated container")
}
}
2015-02-23 21:04:45 +00:00
func TestUpdateNewNodeStatus(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("Start").Return(nil)
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Create a new DiskSpaceManager with a new policy. This new manager along with the mock
// FsInfo values added to Cadvisor should make the kubelet report that it has sufficient
// disk space.
dockerimagesFsInfo := cadvisorapiv2.FsInfo{Capacity: 500 * mb, Available: 200 * mb}
rootFsInfo := cadvisorapiv2.FsInfo{Capacity: 500 * mb, Available: 200 * mb}
mockCadvisor.On("DockerImagesFsInfo").Return(dockerimagesFsInfo, nil)
mockCadvisor.On("RootFsInfo").Return(rootFsInfo, nil)
dsp := DiskSpacePolicy{DockerFreeDiskMB: 100, RootFreeDiskMB: 100}
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, dsp)
if err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
diskSpaceManager.Unfreeze()
kubelet.diskSpaceManager = diskSpaceManager
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
},
NodeInfo: api.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
OsImage: "Debian GNU/Linux 7 (wheezy)",
ContainerRuntimeVersion: "docker://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
},
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(testclient.UpdateAction).GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected object type")
}
for i, cond := range updatedNode.Status.Conditions {
if cond.LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type)
}
if cond.LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type)
}
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if !reflect.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", util.ObjectDiff(expectedNode, updatedNode))
}
}
// FIXME: Enable me..
func testDockerRuntimeVersion(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.RuntimeType = "docker"
fakeRuntime.VersionInfo = "1.18"
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Create a new DiskSpaceManager with a new policy. This new manager along with the mock
// FsInfo values added to Cadvisor should make the kubelet report that it has sufficient
// disk space.
dockerimagesFsInfo := cadvisorapiv2.FsInfo{Capacity: 500 * mb, Available: 200 * mb}
rootFsInfo := cadvisorapiv2.FsInfo{Capacity: 500 * mb, Available: 200 * mb}
mockCadvisor.On("DockerImagesFsInfo").Return(dockerimagesFsInfo, nil)
mockCadvisor.On("RootFsInfo").Return(rootFsInfo, nil)
dsp := DiskSpacePolicy{DockerFreeDiskMB: 100, RootFreeDiskMB: 100}
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, dsp)
if err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
diskSpaceManager.Unfreeze()
kubelet.diskSpaceManager = diskSpaceManager
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
},
NodeInfo: api.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
OsImage: "Debian GNU/Linux 7 (wheezy)",
ContainerRuntimeVersion: "docker://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
},
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(testclient.UpdateAction).GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected object type")
}
for i, cond := range updatedNode.Status.Conditions {
if cond.LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp")
}
if cond.LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp")
}
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if !reflect.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", util.ObjectDiff(expectedNode, updatedNode))
}
// Downgrade docker version, node should be NotReady
fakeRuntime.RuntimeType = "docker"
fakeRuntime.VersionInfo = "1.17"
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions = kubeClient.Actions()
if len(actions) != 4 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok = actions[3].(testclient.UpdateAction).GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected object type")
}
if updatedNode.Status.Conditions[0].Reason != "KubeletNotReady" &&
!strings.Contains(updatedNode.Status.Conditions[0].Message, "container runtime version is older than") {
t.Errorf("unexpect NodeStatus due to container runtime version")
}
}
func TestUpdateExistingNodeStatus(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
2015-09-21 18:13:46 +00:00
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(2048, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
},
},
}}).ReactionChain
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("Start").Return(nil)
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Create a new DiskSpaceManager with a new policy. This new manager along with the mock FsInfo
// values added to Cadvisor should make the kubelet report that it is out of disk space.
dockerimagesFsInfo := cadvisorapiv2.FsInfo{Capacity: 500 * mb, Available: 70 * mb}
rootFsInfo := cadvisorapiv2.FsInfo{Capacity: 500 * mb, Available: 50 * mb}
mockCadvisor.On("DockerImagesFsInfo").Return(dockerimagesFsInfo, nil)
mockCadvisor.On("RootFsInfo").Return(rootFsInfo, nil)
dsp := DiskSpacePolicy{DockerFreeDiskMB: 100, RootFreeDiskMB: 100}
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, dsp)
if err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
diskSpaceManager.Unfreeze()
kubelet.diskSpaceManager = diskSpaceManager
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
2015-09-21 18:13:46 +00:00
LastHeartbeatTime: unversioned.Time{}, // placeholder
LastTransitionTime: unversioned.Time{}, // placeholder
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.Time{}, // placeholder
LastTransitionTime: unversioned.Time{}, // placeholder
},
},
NodeInfo: api.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
OsImage: "Debian GNU/Linux 7 (wheezy)",
ContainerRuntimeVersion: "docker://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
},
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Errorf("unexpected actions: %v", actions)
}
updateAction, ok := actions[1].(testclient.UpdateAction)
if !ok {
t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1])
}
updatedNode, ok := updateAction.GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected object type")
}
for i, cond := range updatedNode.Status.Conditions {
// Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same.
if old := unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; reflect.DeepEqual(cond.LastHeartbeatTime.Rfc3339Copy().UTC(), old) {
t.Errorf("Condition %v LastProbeTime: expected \n%v\n, got \n%v", cond.Type, unversioned.Now(), old)
}
if got, want := cond.LastTransitionTime.Rfc3339Copy().UTC(), unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; !reflect.DeepEqual(got, want) {
t.Errorf("Condition %v LastTransitionTime: expected \n%#v\n, got \n%#v", cond.Type, want, got)
}
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if !reflect.DeepEqual(expectedNode, updatedNode) {
t.Errorf("expected \n%v\n, got \n%v", expectedNode, updatedNode)
}
}
func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
fakeRuntime := testKubelet.fakeRuntime
// This causes returning an error from GetContainerRuntimeVersion() which
// simulates that container runtime is down.
fakeRuntime.VersionInfo = ""
kubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("Start").Return(nil)
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Create a new DiskSpaceManager with a new policy. This new manager along with the
// mock FsInfo values assigned to Cadvisor should make the kubelet report that it has
// sufficient disk space.
dockerimagesFsInfo := cadvisorapiv2.FsInfo{Capacity: 500 * mb, Available: 200 * mb}
rootFsInfo := cadvisorapiv2.FsInfo{Capacity: 500 * mb, Available: 200 * mb}
mockCadvisor.On("DockerImagesFsInfo").Return(dockerimagesFsInfo, nil)
mockCadvisor.On("RootFsInfo").Return(rootFsInfo, nil)
dsp := DiskSpacePolicy{DockerFreeDiskMB: 100, RootFreeDiskMB: 100}
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, dsp)
if err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
diskSpaceManager.Unfreeze()
kubelet.diskSpaceManager = diskSpaceManager
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: "kubelet has sufficient disk space available",
2015-09-21 18:13:46 +00:00
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeReady,
Status: api.ConditionFalse,
Reason: "KubeletNotReady",
Message: fmt.Sprintf("container runtime is down"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
},
NodeInfo: api.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
OsImage: "Debian GNU/Linux 7 (wheezy)",
ContainerRuntimeVersion: "docker://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
},
}
kubelet.runtimeState = newRuntimeState(time.Duration(0), false, "" /* Pod CIDR */, func() error { return nil })
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(testclient.UpdateAction).GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1])
}
for i, cond := range updatedNode.Status.Conditions {
if cond.LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp")
}
if cond.LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp")
}
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if !reflect.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", util.ObjectDiff(expectedNode, updatedNode))
}
}
func TestUpdateNodeStatusError(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
// No matching node for the kubelet
testKubelet.fakeKubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{}}).ReactionChain
if err := kubelet.updateNodeStatus(); err == nil {
t.Errorf("unexpected non error: %v", err)
}
if len(testKubelet.fakeKubeClient.Actions()) != nodeStatusUpdateRetry {
t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions())
}
}
func TestCreateMirrorPod(t *testing.T) {
for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "foo",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "file",
},
},
}
pods := []*api.Pod{pod}
kl.podManager.SetPods(pods)
err := kl.syncPod(pod, nil, container.Pod{}, updateType)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
podFullName := kubecontainer.GetPodFullName(pod)
if !manager.HasPod(podFullName) {
t.Errorf("expected mirror pod %q to be created", podFullName)
}
if manager.NumOfPods() != 1 || !manager.HasPod(podFullName) {
t.Errorf("expected one mirror pod %q, got %v", podFullName, manager.GetPods())
}
}
}
func TestDeleteOutdatedMirrorPod(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "file",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "1234", Image: "foo"},
},
},
}
// Mirror pod has an outdated spec.
mirrorPod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "11111111",
Name: "foo",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "1234", Image: "bar"},
},
},
}
pods := []*api.Pod{pod, mirrorPod}
kl.podManager.SetPods(pods)
err := kl.syncPod(pod, mirrorPod, container.Pod{}, kubetypes.SyncPodUpdate)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
name := kubecontainer.GetPodFullName(pod)
creates, deletes := manager.GetCounts(name)
if creates != 0 || deletes != 1 {
t.Errorf("expected 0 creation and 1 deletion of %q, got %d, %d", name, creates, deletes)
}
}
func TestDeleteOrphanedMirrorPods(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
orphanPods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "pod1",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "12345679",
Name: "pod2",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
},
}
kl.podManager.SetPods(orphanPods)
// Sync with an empty pod list to delete all mirror pods.
kl.HandlePodCleanups()
if manager.NumOfPods() != 0 {
t.Errorf("expected zero mirror pods, got %v", manager.GetPods())
}
for _, pod := range orphanPods {
name := kubecontainer.GetPodFullName(pod)
creates, deletes := manager.GetCounts(name)
if creates != 0 || deletes != 1 {
t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
}
}
}
func TestGetContainerInfoForMirrorPods(t *testing.T) {
// pods contain one static and one mirror pod with the same name but
// different UIDs.
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "1234",
Name: "qux",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "file",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "5678",
Name: "qux",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
},
},
}
containerID := "ab2cdf"
containerPath := fmt.Sprintf("/docker/%v", containerID)
containerInfo := cadvisorapi.ContainerInfo{
ContainerReference: cadvisorapi.ContainerReference{
Name: containerPath,
},
}
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
mockCadvisor := testKubelet.fakeCadvisor
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil)
kubelet := testKubelet.kubelet
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "1234",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: kubecontainer.ContainerID{"test", containerID},
},
},
},
}
kubelet.podManager.SetPods(pods)
// Use the mirror pod UID to retrieve the stats.
stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if stats == nil {
t.Fatalf("stats should not be nil")
}
mockCadvisor.AssertExpectations(t)
}
func TestHostNetworkAllowed(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
2015-08-24 17:41:51 +00:00
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource},
2015-08-24 17:41:51 +00:00
},
})
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource,
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
},
}
kubelet.podManager.SetPods([]*api.Pod{pod})
err := kubelet.syncPod(pod, nil, container.Pod{}, kubetypes.SyncPodUpdate)
if err != nil {
t.Errorf("expected pod infra creation to succeed: %v", err)
}
}
func TestHostNetworkDisallowed(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
2015-08-24 17:41:51 +00:00
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{},
},
})
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource,
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
},
}
err := kubelet.syncPod(pod, nil, container.Pod{}, kubetypes.SyncPodUpdate)
if err == nil {
t.Errorf("expected pod infra creation to fail")
}
}
2015-05-09 21:17:36 +00:00
func TestPrivilegeContainerAllowed(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-05-09 21:17:36 +00:00
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
AllowPrivileged: true,
})
privileged := true
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
},
},
}
kubelet.podManager.SetPods([]*api.Pod{pod})
err := kubelet.syncPod(pod, nil, container.Pod{}, kubetypes.SyncPodUpdate)
2015-05-09 21:17:36 +00:00
if err != nil {
t.Errorf("expected pod infra creation to succeed: %v", err)
}
}
func TestPrivilegeContainerDisallowed(t *testing.T) {
testKubelet := newTestKubelet(t)
2015-05-09 21:17:36 +00:00
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
AllowPrivileged: false,
})
privileged := true
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
},
},
}
err := kubelet.syncPod(pod, nil, container.Pod{}, kubetypes.SyncPodUpdate)
2015-05-09 21:17:36 +00:00
if err == nil {
t.Errorf("expected pod infra creation to fail")
}
}
func TestFilterOutTerminatedPods(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
pods := newTestPods(5)
pods[0].Status.Phase = api.PodFailed
pods[1].Status.Phase = api.PodSucceeded
pods[2].Status.Phase = api.PodRunning
pods[3].Status.Phase = api.PodPending
expected := []*api.Pod{pods[2], pods[3], pods[4]}
kubelet.podManager.SetPods(pods)
actual := kubelet.filterOutTerminatedPods(pods)
if !reflect.DeepEqual(expected, actual) {
t.Errorf("expected %#v, got %#v", expected, actual)
}
}
func TestRegisterExistingNodeWithApiserver(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("create", "nodes", func(action testclient.Action) (bool, runtime.Object, error) {
// Return an error on create.
return true, &api.Node{}, &apierrors.StatusError{
2015-09-21 18:13:46 +00:00
ErrStatus: unversioned.Status{Reason: unversioned.StatusReasonAlreadyExists},
}
})
kubeClient.AddReactor("get", "nodes", func(action testclient.Action) (bool, runtime.Object, error) {
// Return an existing (matching) node on get.
return true, &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
}, nil
})
kubeClient.AddReactor("*", "*", func(action testclient.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
})
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
done := make(chan struct{})
go func() {
kubelet.registerWithApiserver()
done <- struct{}{}
}()
select {
case <-time.After(util.ForeverTestTimeout):
t.Errorf("timed out waiting for registration")
case <-done:
return
}
}
func TestGetNodeLabels(t *testing.T) {
kubelet := newTestKubelet(t).kubelet
testCases := []struct {
Expecting map[string]string
LabelOptions []string
FileContent string
Ok bool
}{
{
Ok: true,
Expecting: map[string]string{"key1": "pair1", "key2": "pair2", "key3": "pair3", "key4": "pair4", "key5": "pair5"},
LabelOptions: []string{"key5=pair5"},
FileContent: `---
key1: pair1
key2: pair2
key3: pair3
key4: pair4
`,
}, {
Ok: true,
Expecting: map[string]string{"key1": "pair1", "key2": "override"},
LabelOptions: []string{"key2=override"},
FileContent: `---
key1: pair1
key2: pair2
`,
},
}
for i, test := range testCases {
fd := createTestNodeLabelFile(t, test.FileContent)
defer func(f *os.File) {
os.Remove(f.Name())
}(fd)
kubelet.nodeLabels = test.LabelOptions
kubelet.nodeLabelsFile = fd.Name()
list, err := kubelet.getNodeLabels()
if test.Ok && err != nil {
t.Errorf("test case %d should not have failed, error: %s", i, err)
}
if !reflect.DeepEqual(test.Expecting, list) {
t.Errorf("test case %d are not the same, %v ~ %v", i, list, test.Expecting)
}
}
}
func TestRetrieveNodeLabels(t *testing.T) {
kubelet := newTestKubelet(t).kubelet
testCases := []struct {
Expecting map[string]string
LabelOptions []string
Ok bool
}{
{
Expecting: map[string]string{"key1": "pair1", "key2": "pair2", "key3": "pair3", "key4": "pair4"},
LabelOptions: []string{"key1=pair1", "key2=pair2", "key3=pair3", "key4=pair4"},
Ok: true,
},
{
Expecting: map[string]string{"key1": "pair1"},
LabelOptions: []string{"key1=pair1", "key2paiwdsr2"},
},
}
for i, test := range testCases {
list, err := kubelet.retrieveNodeLabels(test.LabelOptions)
if test.Ok && err != nil {
t.Errorf("test case %d should not have failed, error: %s", i, err)
}
if !reflect.DeepEqual(test.Expecting, list) {
t.Errorf("test case %d are not the same, %v ~ %v", i, list, test.Expecting)
}
}
}
func TestRetrieveNodeLabelsFile(t *testing.T) {
kubelet := newTestKubelet(t).kubelet
testCases := []struct {
Expecting map[string]string
Ok bool
FileContent string
}{
{
Expecting: map[string]string{"key1": "pair1", "key2": "pair2", "key3": "pair3", "key4": "pair4"},
Ok: true,
FileContent: `---
key1: pair1
key2: pair2
key3: pair3
key4: pair4`,
}, {
FileContent: `---
key1: pair1
hash_map:
key2: pair2
`,
}, {
Expecting: map[string]string{"key1": "pair1", "key2": "pair2"},
Ok: true,
FileContent: `
key1: pair1
key2: pair2
`,
}, {
FileContent: `---
key1: pair1
bad_key_pair
`,
}, {
Expecting: nil,
FileContent: `{
"key1": "pair1",
"key2": "pair2",
"key3": "pair3",
"key4": {
"some_key": "some_value"
}
}`,
}, {
FileContent: "",
}, {
Expecting: map[string]string{"key1": "pair1", "key2": "pair2", "key3": "pair3", "key4": "pair4"},
Ok: true,
FileContent: `---
key1: pair1
key2: pair2
key3: pair3
key4: pair4
`,
},
}
for i, test := range testCases {
fd := createTestNodeLabelFile(t, test.FileContent)
defer func(f *os.File) {
os.Remove(f.Name())
}(fd)
labels, err := kubelet.retrieveNodeLabelsFile(fd.Name())
if test.Ok && err != nil {
t.Errorf("test case %d should not have returned an error, %s", i, err)
continue
}
if test.Expecting != nil && !reflect.DeepEqual(test.Expecting, labels) {
t.Errorf("test case %d not as expected, got: %#v, expecting: %#v", i, labels, test.Expecting)
}
}
}
func createTestNodeLabelFile(t *testing.T, content string) *os.File {
f, err := ioutil.TempFile("", "node_label_file")
if err != nil {
t.Fatalf("unexpected error creating node_label_file: %v", err)
}
f.Close()
if err := ioutil.WriteFile(f.Name(), []byte(content), 0700); err != nil {
t.Fatalf("unexpected error writing node label file: %v", err)
}
return f
}
func TestMakePortMappings(t *testing.T) {
tests := []struct {
container *api.Container
expectedPortMappings []kubecontainer.PortMapping
}{
{
&api.Container{
Name: "fooContainer",
Ports: []api.ContainerPort{
{
Protocol: api.ProtocolTCP,
ContainerPort: 80,
HostPort: 8080,
HostIP: "127.0.0.1",
},
{
Protocol: api.ProtocolTCP,
ContainerPort: 443,
HostPort: 4343,
HostIP: "192.168.0.1",
},
{
Name: "foo",
Protocol: api.ProtocolUDP,
ContainerPort: 555,
HostPort: 5555,
},
{
Name: "foo", // Duplicated, should be ignored.
Protocol: api.ProtocolUDP,
ContainerPort: 888,
HostPort: 8888,
},
{
Protocol: api.ProtocolTCP, // Duplicated, should be ignored.
ContainerPort: 80,
HostPort: 8888,
},
},
},
[]kubecontainer.PortMapping{
{
Name: "fooContainer-TCP:80",
Protocol: api.ProtocolTCP,
ContainerPort: 80,
HostPort: 8080,
HostIP: "127.0.0.1",
},
{
Name: "fooContainer-TCP:443",
Protocol: api.ProtocolTCP,
ContainerPort: 443,
HostPort: 4343,
HostIP: "192.168.0.1",
},
{
Name: "fooContainer-foo",
Protocol: api.ProtocolUDP,
ContainerPort: 555,
HostPort: 5555,
HostIP: "",
},
},
},
}
for i, tt := range tests {
actual := makePortMappings(tt.container)
if !reflect.DeepEqual(tt.expectedPortMappings, actual) {
t.Errorf("%d: Expected: %#v, saw: %#v", i, tt.expectedPortMappings, actual)
}
}
}
func TestIsPodPastActiveDeadline(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
pods := newTestPods(5)
exceededActiveDeadlineSeconds := int64(30)
notYetActiveDeadlineSeconds := int64(120)
now := unversioned.Now()
startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
pods[0].Status.StartTime = &startTime
pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
pods[1].Status.StartTime = &startTime
pods[1].Spec.ActiveDeadlineSeconds = &notYetActiveDeadlineSeconds
tests := []struct {
pod *api.Pod
expected bool
}{{pods[0], true}, {pods[1], false}, {pods[2], false}, {pods[3], false}, {pods[4], false}}
kubelet.podManager.SetPods(pods)
for i, tt := range tests {
actual := kubelet.pastActiveDeadline(tt.pod)
if actual != tt.expected {
t.Errorf("[%d] expected %#v, got %#v", i, tt.expected, actual)
}
}
}
func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
now := unversioned.Now()
startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
exceededActiveDeadlineSeconds := int64(30)
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
},
Status: api.PodStatus{
StartTime: &startTime,
},
},
}
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: "bar",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "foo"},
},
},
}
// Let the pod worker sets the status to fail after this sync.
kubelet.HandlePodUpdates(pods)
status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
if !found {
t.Errorf("expected to found status for pod %q", pods[0].UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q, ot %q.", api.PodFailed, status.Phase)
}
}
func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
now := unversioned.Now()
startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
exceededActiveDeadlineSeconds := int64(300)
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
},
Status: api.PodStatus{
StartTime: &startTime,
},
},
}
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: "bar",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "foo"},
},
},
}
kubelet.podManager.SetPods(pods)
kubelet.HandlePodUpdates(pods)
status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
if !found {
t.Errorf("expected to found status for pod %q", pods[0].UID)
}
if status.Phase == api.PodFailed {
t.Fatalf("expected pod status to not be %q", status.Phase)
}
}
func TestDeletePodDirsForDeletedPods(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "pod1",
Namespace: "ns",
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "12345679",
Name: "pod2",
Namespace: "ns",
},
},
}
kl.podManager.SetPods(pods)
// Sync to create pod directories.
kl.HandlePodSyncs(kl.podManager.GetPods())
for i := range pods {
if !dirExists(kl.getPodDir(pods[i].UID)) {
t.Errorf("expected directory to exist for pod %d", i)
}
}
// Pod 1 has been deleted and no longer exists.
kl.podManager.SetPods([]*api.Pod{pods[0]})
kl.HandlePodCleanups()
if !dirExists(kl.getPodDir(pods[0].UID)) {
t.Errorf("expected directory to exist for pod 0")
}
if dirExists(kl.getPodDir(pods[1].UID)) {
t.Errorf("expected directory to be deleted for pod 1")
}
}
func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*api.Pod, podsToCheck []*api.Pod, shouldExist bool) {
kl := testKubelet.kubelet
kl.podManager.SetPods(pods)
kl.HandlePodSyncs(pods)
kl.HandlePodCleanups()
for i, pod := range podsToCheck {
exist := dirExists(kl.getPodDir(pod.UID))
if shouldExist && !exist {
t.Errorf("expected directory to exist for pod %d", i)
} else if !shouldExist && exist {
t.Errorf("expected directory to be removed for pod %d", i)
}
}
}
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "pod1",
Namespace: "ns",
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "12345679",
Name: "pod2",
Namespace: "ns",
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "12345680",
Name: "pod3",
Namespace: "ns",
},
},
}
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
// Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
// deleted.
kl.statusManager.SetPodStatus(pods[1], api.PodStatus{Phase: api.PodFailed})
kl.statusManager.SetPodStatus(pods[2], api.PodStatus{Phase: api.PodSucceeded})
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
}
func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
runningPod := &kubecontainer.Pod{
ID: "12345678",
Name: "pod1",
Namespace: "ns",
}
apiPod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: runningPod.ID,
Name: runningPod.Name,
Namespace: runningPod.Namespace,
},
}
// Sync once to create pod directory; confirm that the pod directory has
// already been created.
pods := []*api.Pod{apiPod}
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true)
// Pretend the pod is deleted from apiserver, but is still active on the node.
// The pod directory should not be removed.
pods = []*api.Pod{}
testKubelet.fakeRuntime.PodList = []*kubecontainer.Pod{runningPod}
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true)
// The pod is deleted and also not active on the node. The pod directory
// should be removed.
pods = []*api.Pod{}
testKubelet.fakeRuntime.PodList = []*kubecontainer.Pod{}
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, false)
}
func TestCleanupBandwidthLimits(t *testing.T) {
tests := []struct {
status *api.PodStatus
pods []*api.Pod
inputCIDRs []string
expectResetCIDRs []string
cacheStatus bool
expectedCalls []string
name string
}{
{
status: &api.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodRunning,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Annotations: map[string]string{
"net.alpha.kubernetes.io/ingress-bandwidth": "10M",
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
},
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"},
expectedCalls: []string{"GetAPIPodStatus"},
name: "pod running",
},
{
status: &api.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodRunning,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Annotations: map[string]string{
"net.alpha.kubernetes.io/ingress-bandwidth": "10M",
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
},
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"},
expectedCalls: []string{},
cacheStatus: true,
name: "pod running with cache",
},
{
status: &api.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodFailed,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Annotations: map[string]string{
"net.alpha.kubernetes.io/ingress-bandwidth": "10M",
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
},
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectedCalls: []string{"GetAPIPodStatus"},
name: "pod not running",
},
{
status: &api.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodFailed,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Annotations: map[string]string{
"net.alpha.kubernetes.io/ingress-bandwidth": "10M",
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
},
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectedCalls: []string{},
cacheStatus: true,
name: "pod not running with cache",
},
{
status: &api.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodRunning,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
},
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
name: "no bandwidth limits",
},
}
for _, test := range tests {
shaper := &bandwidth.FakeShaper{
CIDRs: test.inputCIDRs,
}
testKube := newTestKubelet(t)
testKube.kubelet.shaper = shaper
testKube.fakeRuntime.APIPodStatus = *test.status
if test.cacheStatus {
for _, pod := range test.pods {
testKube.kubelet.statusManager.SetPodStatus(pod, *test.status)
}
}
err := testKube.kubelet.cleanupBandwidthLimits(test.pods)
if err != nil {
t.Errorf("unexpected error: %v (%s)", test.name)
}
if !reflect.DeepEqual(shaper.ResetCIDRs, test.expectResetCIDRs) {
t.Errorf("[%s]\nexpected: %v, saw: %v", test.name, test.expectResetCIDRs, shaper.ResetCIDRs)
}
if test.cacheStatus {
if len(testKube.fakeRuntime.CalledFunctions) != 0 {
t.Errorf("unexpected function calls: %v", testKube.fakeRuntime.CalledFunctions)
}
} else if !reflect.DeepEqual(testKube.fakeRuntime.CalledFunctions, test.expectedCalls) {
t.Errorf("[%s], expected %v, saw %v", test.name, test.expectedCalls, testKube.fakeRuntime.CalledFunctions)
}
}
}
func TestExtractBandwidthResources(t *testing.T) {
four, _ := resource.ParseQuantity("4M")
ten, _ := resource.ParseQuantity("10M")
twenty, _ := resource.ParseQuantity("20M")
tests := []struct {
pod *api.Pod
expectedIngress *resource.Quantity
expectedEgress *resource.Quantity
expectError bool
}{
{
pod: &api.Pod{},
},
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"net.alpha.kubernetes.io/ingress-bandwidth": "10M",
},
},
},
expectedIngress: ten,
},
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"net.alpha.kubernetes.io/egress-bandwidth": "10M",
},
},
},
expectedEgress: ten,
},
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"net.alpha.kubernetes.io/ingress-bandwidth": "4M",
"net.alpha.kubernetes.io/egress-bandwidth": "20M",
},
},
},
expectedIngress: four,
expectedEgress: twenty,
},
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"net.alpha.kubernetes.io/ingress-bandwidth": "foo",
},
},
},
expectError: true,
},
}
for _, test := range tests {
ingress, egress, err := extractBandwidthResources(test.pod)
if test.expectError {
if err == nil {
t.Errorf("unexpected non-error")
}
continue
}
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if !reflect.DeepEqual(ingress, test.expectedIngress) {
t.Errorf("expected: %v, saw: %v", ingress, test.expectedIngress)
}
if !reflect.DeepEqual(egress, test.expectedEgress) {
t.Errorf("expected: %v, saw: %v", egress, test.expectedEgress)
}
}
}
func TestGetPodsToSync(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
pods := newTestPods(5)
podUIDs := []types.UID{}
for _, pod := range pods {
podUIDs = append(podUIDs, pod.UID)
}
exceededActiveDeadlineSeconds := int64(30)
notYetActiveDeadlineSeconds := int64(120)
now := unversioned.Now()
startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
pods[0].Status.StartTime = &startTime
pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
pods[1].Status.StartTime = &startTime
pods[1].Spec.ActiveDeadlineSeconds = &notYetActiveDeadlineSeconds
pods[2].Status.StartTime = &startTime
pods[2].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
kubelet.podManager.SetPods(pods)
kubelet.workQueue.Enqueue(pods[2].UID, 0)
kubelet.workQueue.Enqueue(pods[3].UID, 0)
kubelet.workQueue.Enqueue(pods[4].UID, time.Hour)
expectedPodsUID := []types.UID{pods[0].UID, pods[2].UID, pods[3].UID}
podsToSync := kubelet.getPodsToSync()
if len(podsToSync) == len(expectedPodsUID) {
var rightNum int
for _, podUID := range expectedPodsUID {
for _, podToSync := range podsToSync {
if podToSync.UID == podUID {
rightNum++
break
}
}
}
if rightNum != len(expectedPodsUID) {
// Just for report error
podsToSyncUID := []types.UID{}
for _, podToSync := range podsToSync {
podsToSyncUID = append(podsToSyncUID, podToSync.UID)
}
t.Errorf("expected pods %v to sync, got %v", expectedPodsUID, podsToSyncUID)
}
} else {
t.Errorf("expected %d pods to sync, got %d", 3, len(podsToSync))
}
}