k3s/pkg/kubelet/kubelet_test.go

5023 lines
156 KiB
Go
Raw Normal View History

2014-06-06 23:40:48 +00:00
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
2014-06-06 23:40:48 +00:00
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
2014-06-23 18:32:11 +00:00
2014-06-06 23:40:48 +00:00
package kubelet
import (
"bytes"
2014-06-06 23:40:48 +00:00
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"reflect"
goruntime "runtime"
"sort"
2016-05-11 11:59:54 +00:00
"strconv"
"strings"
2014-06-06 23:40:48 +00:00
"testing"
"time"
2014-06-06 23:40:48 +00:00
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"github.com/stretchr/testify/assert"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/componentconfig"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/capabilities"
2016-02-16 22:16:45 +00:00
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/testing/core"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
"k8s.io/kubernetes/pkg/kubelet/cm"
2016-04-26 17:58:12 +00:00
"k8s.io/kubernetes/pkg/kubelet/config"
2015-08-05 22:03:47 +00:00
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
2016-05-13 03:35:18 +00:00
"k8s.io/kubernetes/pkg/kubelet/eviction"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/kubelet/network"
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
"k8s.io/kubernetes/pkg/kubelet/pleg"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
2016-05-13 03:35:18 +00:00
"k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/queue"
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volume"
"k8s.io/kubernetes/pkg/runtime"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/bandwidth"
2016-03-11 02:43:55 +00:00
"k8s.io/kubernetes/pkg/util/diff"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/mount"
2016-05-11 11:59:54 +00:00
"k8s.io/kubernetes/pkg/util/rand"
2016-01-15 07:32:10 +00:00
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/version"
2015-08-05 22:03:47 +00:00
"k8s.io/kubernetes/pkg/volume"
_ "k8s.io/kubernetes/pkg/volume/host_path"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
2014-06-06 23:40:48 +00:00
)
func init() {
2016-01-15 07:32:10 +00:00
utilruntime.ReallyCrash = true
}
2016-05-11 11:59:54 +00:00
const (
testKubeletHostname = "127.0.0.1"
2015-04-23 20:57:30 +00:00
2016-05-11 11:59:54 +00:00
testReservationCPU = "200m"
testReservationMemory = "100M"
maxImageTagsForTest = 3
// TODO(harry) any global place for these two?
// Reasonable size range of all container images. 90%ile of images on dockerhub drops into this range.
minImgSize int64 = 23 * 1024 * 1024
maxImgSize int64 = 1000 * 1024 * 1024
)
type fakeHTTP struct {
url string
err error
}
func (f *fakeHTTP) Get(url string) (*http.Response, error) {
f.url = url
return nil, f.err
}
type TestKubelet struct {
kubelet *Kubelet
fakeRuntime *containertest.FakeRuntime
fakeCadvisor *cadvisortest.Mock
fakeKubeClient *fake.Clientset
fakeMirrorClient *podtest.FakeMirrorClient
fakeClock *util.FakeClock
mounter mount.Interface
volumePlugin *volumetest.FakeVolumePlugin
}
2016-05-11 11:59:54 +00:00
// newTestKubelet returns test kubelet with two images.
func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubelet {
2016-05-11 11:59:54 +00:00
imageList := []kubecontainer.Image{
{
ID: "abc",
RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
Size: 123,
},
{
ID: "efg",
RepoTags: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
Size: 456,
},
}
return newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled)
2016-05-11 11:59:54 +00:00
}
// generateTestingImageList generate randomly generated image list and corresponding expectedImageList.
func generateTestingImageList(count int) ([]kubecontainer.Image, []api.ContainerImage) {
// imageList is randomly generated image list
var imageList []kubecontainer.Image
for ; count > 0; count-- {
imageItem := kubecontainer.Image{
ID: string(util.NewUUID()),
RepoTags: generateImageTags(),
Size: rand.Int63nRange(minImgSize, maxImgSize+1),
}
imageList = append(imageList, imageItem)
}
// expectedImageList is generated by imageList according to size and maxImagesInNodeStatus
// 1. sort the imageList by size
sort.Sort(byImageSize(imageList))
// 2. convert sorted imageList to api.ContainerImage list
var expectedImageList []api.ContainerImage
for _, kubeImage := range imageList {
apiImage := api.ContainerImage{
Names: kubeImage.RepoTags,
SizeBytes: kubeImage.Size,
}
expectedImageList = append(expectedImageList, apiImage)
}
// 3. only returns the top maxImagesInNodeStatus images in expectedImageList
return imageList, expectedImageList[0:maxImagesInNodeStatus]
}
func generateImageTags() []string {
var tagList []string
count := rand.IntnRange(1, maxImageTagsForTest+1)
for ; count > 0; count-- {
tagList = append(tagList, "gcr.io/google_containers:v"+strconv.Itoa(count))
}
return tagList
}
func newTestKubeletWithImageList(
t *testing.T,
imageList []kubecontainer.Image,
controllerAttachDetachEnabled bool) *TestKubelet {
2016-05-11 11:59:54 +00:00
fakeRuntime := &containertest.FakeRuntime{}
fakeRuntime.RuntimeType = "test"
fakeRuntime.VersionInfo = "1.5.0"
fakeRuntime.ImageList = imageList
fakeRecorder := &record.FakeRecorder{}
fakeKubeClient := &fake.Clientset{}
kubelet := &Kubelet{}
kubelet.kubeClient = fakeKubeClient
2016-04-21 01:21:41 +00:00
kubelet.os = &containertest.FakeOS{}
kubelet.hostname = testKubeletHostname
kubelet.nodeName = testKubeletHostname
kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
kubelet.runtimeState.setNetworkState(nil)
2016-06-09 17:32:28 +00:00
kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kubelet.nonMasqueradeCIDR)
if tempDir, err := ioutil.TempDir("/tmp", "kubelet_test."); err != nil {
t.Fatalf("can't make a temp rootdir: %v", err)
} else {
kubelet.rootDirectory = tempDir
}
if err := os.MkdirAll(kubelet.rootDirectory, 0750); err != nil {
t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err)
}
2016-04-26 17:58:12 +00:00
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true })
kubelet.masterServiceNamespace = api.NamespaceDefault
kubelet.serviceLister = testServiceLister{}
kubelet.nodeLister = testNodeLister{}
kubelet.nodeInfo = testNodeInfo{}
kubelet.recorder = fakeRecorder
if err := kubelet.setupDataDirs(); err != nil {
t.Fatalf("can't initialize kubelet data dirs: %v", err)
}
kubelet.daemonEndpoints = &api.NodeDaemonEndpoints{}
mockCadvisor := &cadvisortest.Mock{}
kubelet.cadvisor = mockCadvisor
fakeMirrorClient := podtest.NewFakeMirrorClient()
kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient)
kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager)
kubelet.containerRefManager = kubecontainer.NewRefManager()
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{})
if err != nil {
t.Fatalf("can't initialize disk space manager: %v", err)
}
kubelet.diskSpaceManager = diskSpaceManager
kubelet.containerRuntime = fakeRuntime
kubelet.runtimeCache = containertest.NewFakeRuntimeCache(kubelet.containerRuntime)
2016-01-12 21:28:00 +00:00
kubelet.reasonCache = NewReasonCache()
kubelet.podCache = containertest.NewFakeCache(kubelet.containerRuntime)
kubelet.podWorkers = &fakePodWorkers{
syncPodFn: kubelet.syncPod,
cache: kubelet.podCache,
t: t,
}
kubelet.probeManager = probetest.FakeManager{}
kubelet.livenessManager = proberesults.NewManager()
kubelet.containerManager = cm.NewStubContainerManager()
fakeNodeRef := &api.ObjectReference{
Kind: "Node",
Name: testKubeletHostname,
UID: types.UID(testKubeletHostname),
Namespace: "",
}
fakeImageGCPolicy := ImageGCPolicy{
HighThresholdPercent: 90,
LowThresholdPercent: 80,
}
kubelet.imageManager, err = newImageManager(fakeRuntime, mockCadvisor, fakeRecorder, fakeNodeRef, fakeImageGCPolicy)
2016-02-01 18:50:05 +00:00
fakeClock := util.NewFakeClock(time.Now())
kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
kubelet.backOff.Clock = fakeClock
kubelet.podKillingCh = make(chan *kubecontainer.PodPair, 20)
2015-10-27 01:50:57 +00:00
kubelet.resyncInterval = 10 * time.Second
kubelet.reservation = kubetypes.Reservation{
Kubernetes: api.ResourceList{
api.ResourceCPU: resource.MustParse(testReservationCPU),
api.ResourceMemory: resource.MustParse(testReservationMemory),
},
}
kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock)
// Relist period does not affect the tests.
kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, util.RealClock{})
kubelet.clock = fakeClock
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
2016-05-13 03:35:18 +00:00
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
volumeStatsAggPeriod := time.Second * 10
kubelet.resourceAnalyzer = stats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod, kubelet.containerRuntime)
nodeRef := &api.ObjectReference{
Kind: "Node",
Name: kubelet.nodeName,
UID: types.UID(kubelet.nodeName),
Namespace: "",
}
// setup eviction manager
evictionManager, evictionAdmitHandler, err := eviction.NewManager(kubelet.resourceAnalyzer, eviction.Config{}, killPodNow(kubelet.podWorkers), fakeRecorder, nodeRef, kubelet.clock)
if err != nil {
t.Fatalf("failed to initialize eviction manager: %v", err)
}
kubelet.evictionManager = evictionManager
kubelet.AddPodAdmitHandler(evictionAdmitHandler)
2016-06-15 13:47:49 +00:00
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
kubelet.volumePluginMgr, err =
NewInitializedVolumePluginMgr(kubelet, []volume.VolumePlugin{plug})
if err != nil {
t.Fatalf("failed to initialize VolumePluginMgr: %v", err)
}
kubelet.volumeManager, err = kubeletvolume.NewVolumeManager(
controllerAttachDetachEnabled,
kubelet.hostname,
kubelet.podManager,
fakeKubeClient,
kubelet.volumePluginMgr)
if err != nil {
t.Fatalf("failed to initialize volume manager: %v", err)
}
return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
}
func newTestPods(count int) []*api.Pod {
pods := make([]*api.Pod, count)
for i := 0; i < count; i++ {
pods[i] = &api.Pod{
2015-10-21 17:17:27 +00:00
Spec: api.PodSpec{
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
},
ObjectMeta: api.ObjectMeta{
UID: types.UID(10000 + i),
Name: fmt.Sprintf("pod%d", i),
},
}
}
return pods
}
var emptyPodUIDs map[types.UID]kubetypes.SyncPodType
func TestSyncLoopTimeUpdate(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
loopTime1 := kubelet.LatestLoopEntryTime()
if !loopTime1.IsZero() {
t.Errorf("Unexpected sync loop time: %s, expected 0", loopTime1)
}
2015-10-27 01:50:57 +00:00
// Start sync ticker.
syncCh := make(chan time.Time, 1)
housekeepingCh := make(chan time.Time, 1)
plegCh := make(chan *pleg.PodLifecycleEvent)
syncCh <- time.Now()
kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet, syncCh, housekeepingCh, plegCh)
loopTime2 := kubelet.LatestLoopEntryTime()
if loopTime2.IsZero() {
t.Errorf("Unexpected sync loop time: 0, expected non-zero value.")
}
syncCh <- time.Now()
kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet, syncCh, housekeepingCh, plegCh)
loopTime3 := kubelet.LatestLoopEntryTime()
if !loopTime3.After(loopTime1) {
t.Errorf("Sync Loop Time was not updated correctly. Second update timestamp should be greater than first update timestamp")
}
}
func TestSyncLoopAbort(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
kubelet.runtimeState.setRuntimeSync(time.Now())
// The syncLoop waits on time.After(resyncInterval), set it really big so that we don't race for
// the channel close
kubelet.resyncInterval = time.Second * 30
ch := make(chan kubetypes.PodUpdate)
close(ch)
// sanity check (also prevent this test from hanging in the next step)
ok := kubelet.syncLoopIteration(ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
if ok {
t.Fatalf("expected syncLoopIteration to return !ok since update chan was closed")
}
// this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly
kubelet.syncLoop(ch, kubelet)
}
func TestSyncPodsStartPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
pods := []*api.Pod{
2016-04-27 04:53:07 +00:00
podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "bar"},
},
2016-04-27 04:53:07 +00:00
}),
}
kubelet.podManager.SetPods(pods)
kubelet.HandlePodSyncs(pods)
fakeRuntime.AssertStartedPods([]string{string(pods[0].UID)})
}
func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
ready := false
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
2016-04-26 17:58:12 +00:00
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return ready })
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
2016-04-27 04:53:07 +00:00
ID: "12345678",
Name: "foo",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "bar"},
},
}},
}
kubelet.HandlePodCleanups()
// Sources are not ready yet. Don't remove any pods.
fakeRuntime.AssertKilledPods([]string{})
ready = true
kubelet.HandlePodCleanups()
// Sources are ready. Remove unwanted pods.
fakeRuntime.AssertKilledPods([]string{"12345678"})
2015-01-13 05:47:49 +00:00
}
func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
2016-02-19 21:27:41 +00:00
kubelet.mounter = &mount.FakeMounter{}
2016-04-27 04:53:07 +00:00
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
},
},
2016-04-27 04:53:07 +00:00
})
stopCh := make(chan struct{})
go kubelet.volumeManager.Run(stopCh)
defer func() {
close(stopCh)
}()
kubelet.podManager.SetPods([]*api.Pod{pod})
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
expectedPodVolumes := []string{"vol1"}
if len(expectedPodVolumes) != len(podVolumes) {
t.Errorf("Unexpected volumes. Expected %#v got %#v. Manifest was: %#v", expectedPodVolumes, podVolumes, pod)
}
for _, name := range expectedPodVolumes {
if _, ok := podVolumes[name]; !ok {
2015-03-13 13:19:07 +00:00
t.Errorf("api.Pod volumes map is missing key: %s. %#v", name, podVolumes)
}
}
if testKubelet.volumePlugin.GetNewAttacherCallCount() < 1 {
t.Errorf("Expected plugin NewAttacher to be called at least once")
}
err = volumetest.VerifyWaitForAttachCallCount(
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyAttachCallCount(
1 /* expectedAttachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyMountDeviceCallCount(
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifySetUpCallCount(
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
}
func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
kubelet.mounter = &mount.FakeMounter{}
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
},
},
})
stopCh := make(chan struct{})
go kubelet.volumeManager.Run(stopCh)
defer func() {
close(stopCh)
}()
// Add pod
kubelet.podManager.SetPods([]*api.Pod{pod})
// Verify volumes attached
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
expectedPodVolumes := []string{"vol1"}
if len(expectedPodVolumes) != len(podVolumes) {
t.Errorf("Unexpected volumes. Expected %#v got %#v. Manifest was: %#v", expectedPodVolumes, podVolumes, pod)
}
for _, name := range expectedPodVolumes {
if _, ok := podVolumes[name]; !ok {
t.Errorf("api.Pod volumes map is missing key: %s. %#v", name, podVolumes)
}
}
if testKubelet.volumePlugin.GetNewAttacherCallCount() < 1 {
t.Errorf("Expected plugin NewAttacher to be called at least once")
}
err = volumetest.VerifyWaitForAttachCallCount(
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyAttachCallCount(
1 /* expectedAttachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyMountDeviceCallCount(
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifySetUpCallCount(
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
// Remove pod
kubelet.podManager.SetPods([]*api.Pod{})
err = waitForVolumeUnmount(kubelet.volumeManager, pod)
if err != nil {
t.Error(err)
}
// Verify volumes unmounted
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
if len(podVolumes) != 0 {
t.Errorf("Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
}
err = volumetest.VerifyTearDownCallCount(
1 /* expectedTearDownCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
// Verify volumes detached and no longer reported as in use
err = waitForVolumeDetach(kubelet.volumeManager)
if err != nil {
t.Error(err)
}
if testKubelet.volumePlugin.GetNewDetacherCallCount() < 1 {
t.Errorf("Expected plugin NewDetacher to be called at least once")
}
err = volumetest.VerifyDetachCallCount(
1 /* expectedDetachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
}
func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubelet.mounter = &mount.FakeMounter{}
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("get", "nodes",
func(action core.Action) (bool, runtime.Object, error) {
return true, &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Status: api.NodeStatus{
VolumesAttached: []api.AttachedVolume{
{
Name: "fake/vol1",
DevicePath: "fake/path",
},
}},
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
}, nil
})
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
})
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
2016-04-27 04:53:07 +00:00
Volumes: []api.Volume{
{
Name: "vol1",
2016-04-27 04:53:07 +00:00
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
},
},
2016-04-27 04:53:07 +00:00
})
stopCh := make(chan struct{})
go kubelet.volumeManager.Run(stopCh)
defer func() {
close(stopCh)
}()
kubelet.podManager.SetPods([]*api.Pod{pod})
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
expectedPodVolumes := []string{"vol1"}
if len(expectedPodVolumes) != len(podVolumes) {
t.Errorf("Unexpected volumes. Expected %#v got %#v. Manifest was: %#v", expectedPodVolumes, podVolumes, pod)
}
for _, name := range expectedPodVolumes {
if _, ok := podVolumes[name]; !ok {
t.Errorf("api.Pod volumes map is missing key: %s. %#v", name, podVolumes)
}
}
if testKubelet.volumePlugin.GetNewAttacherCallCount() < 1 {
t.Errorf("Expected plugin NewAttacher to be called at least once")
}
err = volumetest.VerifyWaitForAttachCallCount(
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyMountDeviceCallCount(
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifySetUpCallCount(
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
}
func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubelet.mounter = &mount.FakeMounter{}
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("get", "nodes",
func(action core.Action) (bool, runtime.Object, error) {
return true, &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Status: api.NodeStatus{
VolumesAttached: []api.AttachedVolume{
{
Name: "fake/vol1",
DevicePath: "fake/path",
},
}},
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
}, nil
})
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
})
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
},
},
})
stopCh := make(chan struct{})
go kubelet.volumeManager.Run(stopCh)
defer func() {
close(stopCh)
}()
// Add pod
kubelet.podManager.SetPods([]*api.Pod{pod})
// Verify volumes attached
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
expectedPodVolumes := []string{"vol1"}
if len(expectedPodVolumes) != len(podVolumes) {
t.Errorf("Unexpected volumes. Expected %#v got %#v. Manifest was: %#v", expectedPodVolumes, podVolumes, pod)
}
for _, name := range expectedPodVolumes {
if _, ok := podVolumes[name]; !ok {
t.Errorf("api.Pod volumes map is missing key: %s. %#v", name, podVolumes)
}
}
if testKubelet.volumePlugin.GetNewAttacherCallCount() < 1 {
t.Errorf("Expected plugin NewAttacher to be called at least once")
}
err = volumetest.VerifyWaitForAttachCallCount(
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifyMountDeviceCallCount(
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
err = volumetest.VerifySetUpCallCount(
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
// Remove pod
kubelet.podManager.SetPods([]*api.Pod{})
err = waitForVolumeUnmount(kubelet.volumeManager, pod)
if err != nil {
t.Error(err)
}
// Verify volumes unmounted
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
if len(podVolumes) != 0 {
t.Errorf("Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
}
err = volumetest.VerifyTearDownCallCount(
1 /* expectedTearDownCallCount */, testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
// Verify volumes detached and no longer reported as in use
err = waitForVolumeDetach(kubelet.volumeManager)
if err != nil {
t.Error(err)
}
if testKubelet.volumePlugin.GetNewDetacherCallCount() < 1 {
t.Errorf("Expected plugin NewDetacher to be called at least once")
}
err = volumetest.VerifyZeroDetachCallCount(testKubelet.volumePlugin)
if err != nil {
t.Error(err)
}
}
func TestPodVolumesExist(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device1",
},
},
},
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "pod2",
UID: "pod2uid",
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol2",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device2",
},
},
},
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "pod3",
UID: "pod3uid",
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol3",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
PDName: "fake-device3",
},
},
},
},
},
},
}
stopCh := make(chan struct{})
go kubelet.volumeManager.Run(stopCh)
defer func() {
close(stopCh)
}()
kubelet.podManager.SetPods(pods)
for _, pod := range pods {
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
}
for _, pod := range pods {
podVolumesExist := kubelet.podVolumesExist(pod.UID)
if !podVolumesExist {
t.Errorf(
"Expected to find volumes for pod %q, but podVolumesExist returned false",
pod.UID)
}
}
}
type stubVolume struct {
path string
volume.MetricsNil
}
func (f *stubVolume) GetPath() string {
return f.path
}
func (f *stubVolume) GetAttributes() volume.Attributes {
return volume.Attributes{}
}
2015-12-18 15:55:11 +00:00
func (f *stubVolume) SetUp(fsGroup *int64) error {
return nil
}
2015-12-18 15:55:11 +00:00
func (f *stubVolume) SetUpAt(dir string, fsGroup *int64) error {
return nil
}
func TestMakeVolumeMounts(t *testing.T) {
container := api.Container{
VolumeMounts: []api.VolumeMount{
2014-06-13 01:34:47 +00:00
{
2015-10-21 17:17:27 +00:00
MountPath: "/etc/hosts",
Name: "disk",
ReadOnly: false,
},
2014-06-19 23:59:48 +00:00
{
MountPath: "/mnt/path3",
2014-08-27 05:08:06 +00:00
Name: "disk",
ReadOnly: true,
},
{
MountPath: "/mnt/path4",
Name: "disk4",
ReadOnly: false,
},
{
MountPath: "/mnt/path5",
Name: "disk5",
ReadOnly: false,
},
},
}
podVolumes := kubecontainer.VolumeMap{
"disk": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/disk"}},
"disk4": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/host"}},
"disk5": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/var/lib/kubelet/podID/volumes/empty/disk5"}},
2014-08-27 05:08:06 +00:00
}
2014-06-19 23:59:48 +00:00
2015-10-21 17:17:27 +00:00
pod := api.Pod{
Spec: api.PodSpec{
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
},
}
2016-03-07 20:24:08 +00:00
mounts, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", "", podVolumes)
expectedMounts := []kubecontainer.Mount{
{
"disk",
2015-10-21 17:17:27 +00:00
"/etc/hosts",
"/mnt/disk",
false,
false,
},
{
"disk",
"/mnt/path3",
"/mnt/disk",
true,
false,
},
{
"disk4",
"/mnt/path4",
"/mnt/host",
false,
false,
},
{
"disk5",
"/mnt/path5",
"/var/lib/kubelet/podID/volumes/empty/disk5",
false,
false,
},
}
if !reflect.DeepEqual(mounts, expectedMounts) {
t.Errorf("Unexpected mounts: Expected %#v got %#v. Container was: %#v", expectedMounts, mounts, container)
2014-06-19 23:59:48 +00:00
}
}
func TestNodeIPParam(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
tests := []struct {
nodeIP string
success bool
testName string
}{
{
nodeIP: "",
success: true,
testName: "IP not set",
},
{
nodeIP: "127.0.0.1",
success: false,
testName: "loopback address",
},
{
nodeIP: "FE80::0202:B3FF:FE1E:8329",
success: false,
testName: "IPv6 address",
},
{
nodeIP: "1.2.3.4",
success: false,
testName: "IPv4 address that doesn't belong to host",
},
}
for _, test := range tests {
kubelet.nodeIP = net.ParseIP(test.nodeIP)
err := kubelet.validateNodeIP()
if err != nil && test.success {
t.Errorf("Test: %s, expected no error but got: %v", test.testName, err)
} else if err == nil && !test.success {
t.Errorf("Test: %s, expected an error", test.testName)
}
}
}
2014-08-07 18:15:11 +00:00
type fakeContainerCommandRunner struct {
Cmd []string
ID kubecontainer.ContainerID
PodID types.UID
E error
Stdin io.Reader
Stdout io.WriteCloser
Stderr io.WriteCloser
TTY bool
Port uint16
Stream io.ReadWriteCloser
2014-08-07 18:15:11 +00:00
}
func (f *fakeContainerCommandRunner) ExecInContainer(id kubecontainer.ContainerID, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error {
f.Cmd = cmd
f.ID = id
f.Stdin = in
f.Stdout = out
f.Stderr = err
f.TTY = tty
return f.E
}
func (f *fakeContainerCommandRunner) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error {
f.PodID = pod.ID
f.Port = port
f.Stream = stream
return nil
}
2014-08-07 18:15:11 +00:00
func TestRunInContainerNoSuchPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.PodList = []*containertest.FakePod{}
2014-08-07 18:15:11 +00:00
podName := "podFoo"
2015-03-11 23:40:20 +00:00
podNamespace := "nsFoo"
2014-08-07 18:15:11 +00:00
containerName := "containerFoo"
output, err := kubelet.RunInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
2014-09-05 09:49:11 +00:00
"",
2014-08-07 18:15:11 +00:00
containerName,
[]string{"ls"})
if output != nil {
t.Errorf("unexpected non-nil command: %v", output)
}
if err == nil {
t.Error("unexpected non-error")
}
}
func TestRunInContainer(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
2014-08-07 18:15:11 +00:00
kubelet.runner = &fakeCommandRunner
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: "podFoo",
Namespace: "nsFoo",
Containers: []*kubecontainer.Container{
{Name: "containerFoo",
ID: containerID,
},
},
}},
2014-08-07 18:15:11 +00:00
}
cmd := []string{"ls"}
_, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd)
2014-08-07 18:15:11 +00:00
if fakeCommandRunner.ID != containerID {
2015-01-18 07:32:34 +00:00
t.Errorf("unexpected Name: %s", fakeCommandRunner.ID)
2014-08-07 18:15:11 +00:00
}
if !reflect.DeepEqual(fakeCommandRunner.Cmd, cmd) {
2015-01-18 07:32:34 +00:00
t.Errorf("unexpected command: %s", fakeCommandRunner.Cmd)
2014-08-07 18:15:11 +00:00
}
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
type countingDNSScrubber struct {
counter *int
}
func (cds countingDNSScrubber) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
(*cds.counter)++
return nameservers, searches
}
func TestParseResolvConf(t *testing.T) {
testCases := []struct {
data string
nameservers []string
searches []string
}{
{"", []string{}, []string{}},
{" ", []string{}, []string{}},
{"\n", []string{}, []string{}},
{"\t\n\t", []string{}, []string{}},
{"#comment\n", []string{}, []string{}},
{" #comment\n", []string{}, []string{}},
{"#comment\n#comment", []string{}, []string{}},
{"#comment\nnameserver", []string{}, []string{}},
{"#comment\nnameserver\nsearch", []string{}, []string{}},
{"nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
{" nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
{"\tnameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
{"nameserver\t1.2.3.4", []string{"1.2.3.4"}, []string{}},
{"nameserver \t 1.2.3.4", []string{"1.2.3.4"}, []string{}},
{"nameserver 1.2.3.4\nnameserver 5.6.7.8", []string{"1.2.3.4", "5.6.7.8"}, []string{}},
{"search foo", []string{}, []string{"foo"}},
{"search foo bar", []string{}, []string{"foo", "bar"}},
{"search foo bar bat\n", []string{}, []string{"foo", "bar", "bat"}},
{"search foo\nsearch bar", []string{}, []string{"bar"}},
{"nameserver 1.2.3.4\nsearch foo bar", []string{"1.2.3.4"}, []string{"foo", "bar"}},
{"nameserver 1.2.3.4\nsearch foo\nnameserver 5.6.7.8\nsearch bar", []string{"1.2.3.4", "5.6.7.8"}, []string{"bar"}},
{"#comment\nnameserver 1.2.3.4\n#comment\nsearch foo\ncomment", []string{"1.2.3.4"}, []string{"foo"}},
}
for i, tc := range testCases {
ns, srch, err := parseResolvConf(strings.NewReader(tc.data), nil)
if err != nil {
t.Errorf("expected success, got %v", err)
continue
}
if !reflect.DeepEqual(ns, tc.nameservers) {
t.Errorf("[%d] expected nameservers %#v, got %#v", i, tc.nameservers, ns)
}
if !reflect.DeepEqual(srch, tc.searches) {
t.Errorf("[%d] expected searches %#v, got %#v", i, tc.searches, srch)
}
counter := 0
cds := countingDNSScrubber{&counter}
ns, srch, err = parseResolvConf(strings.NewReader(tc.data), cds)
if err != nil {
t.Errorf("expected success, got %v", err)
continue
}
if !reflect.DeepEqual(ns, tc.nameservers) {
t.Errorf("[%d] expected nameservers %#v, got %#v", i, tc.nameservers, ns)
}
if !reflect.DeepEqual(srch, tc.searches) {
t.Errorf("[%d] expected searches %#v, got %#v", i, tc.searches, srch)
}
if counter != 1 {
t.Errorf("[%d] expected dnsScrubber to have been called: got %d", i, counter)
}
}
}
func TestDNSConfigurationParams(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
clusterNS := "203.0.113.1"
kubelet.clusterDomain = "kubernetes.io"
kubelet.clusterDNS = net.ParseIP(clusterNS)
pods := newTestPods(2)
pods[0].Spec.DNSPolicy = api.DNSClusterFirst
pods[1].Spec.DNSPolicy = api.DNSDefault
options := make([]*kubecontainer.RunContainerOptions, 2)
for i, pod := range pods {
var err error
2016-03-07 20:24:08 +00:00
options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "")
if err != nil {
t.Fatalf("failed to generate container options: %v", err)
}
}
if len(options[0].DNS) != 1 || options[0].DNS[0] != clusterNS {
t.Errorf("expected nameserver %s, got %+v", clusterNS, options[0].DNS)
}
if len(options[0].DNSSearch) == 0 || options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
t.Errorf("expected search %s, got %+v", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
}
if len(options[1].DNS) != 1 || options[1].DNS[0] != "127.0.0.1" {
t.Errorf("expected nameserver 127.0.0.1, got %+v", options[1].DNS)
}
if len(options[1].DNSSearch) != 1 || options[1].DNSSearch[0] != "." {
t.Errorf("expected search \".\", got %+v", options[1].DNSSearch)
}
kubelet.resolverConfig = "/etc/resolv.conf"
for i, pod := range pods {
var err error
2016-03-07 20:24:08 +00:00
options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "")
if err != nil {
t.Fatalf("failed to generate container options: %v", err)
}
}
t.Logf("nameservers %+v", options[1].DNS)
if len(options[0].DNS) != 1 {
t.Errorf("expected cluster nameserver only, got %+v", options[0].DNS)
} else if options[0].DNS[0] != clusterNS {
t.Errorf("expected nameserver %s, got %v", clusterNS, options[0].DNS[0])
}
if len(options[0].DNSSearch) != len(options[1].DNSSearch)+3 {
t.Errorf("expected prepend of cluster domain, got %+v", options[0].DNSSearch)
} else if options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
t.Errorf("expected domain %s, got %s", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
}
}
type testServiceLister struct {
services []api.Service
}
func (ls testServiceLister) List() (api.ServiceList, error) {
return api.ServiceList{
Items: ls.services,
}, nil
}
type testNodeLister struct {
nodes []api.Node
}
type testNodeInfo struct {
nodes []api.Node
}
func (ls testNodeInfo) GetNodeInfo(id string) (*api.Node, error) {
for _, node := range ls.nodes {
if node.Name == id {
return &node, nil
}
}
return nil, fmt.Errorf("Node with name: %s does not exist", id)
}
func (ls testNodeLister) List() (api.NodeList, error) {
return api.NodeList{
Items: ls.nodes,
}, nil
}
type envs []kubecontainer.EnvVar
func (e envs) Len() int {
return len(e)
}
func (e envs) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e envs) Less(i, j int) bool { return e[i].Name < e[j].Name }
2016-04-24 22:47:42 +00:00
func buildService(name, namespace, clusterIP, protocol string, port int) api.Service {
return api.Service{
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Protocol: api.Protocol(protocol),
Port: int32(port),
}},
ClusterIP: clusterIP,
},
}
}
func TestMakeEnvironmentVariables(t *testing.T) {
services := []api.Service{
2016-04-24 22:47:42 +00:00
buildService("kubernetes", api.NamespaceDefault, "1.2.3.1", "TCP", 8081),
buildService("test", "test1", "1.2.3.3", "TCP", 8083),
buildService("kubernetes", "test2", "1.2.3.4", "TCP", 8084),
buildService("test", "test2", "1.2.3.5", "TCP", 8085),
buildService("test", "test2", "None", "TCP", 8085),
buildService("test", "test2", "", "TCP", 8085),
buildService("kubernetes", "kubernetes", "1.2.3.6", "TCP", 8086),
buildService("not-special", "kubernetes", "1.2.3.8", "TCP", 8088),
buildService("not-special", "kubernetes", "None", "TCP", 8088),
buildService("not-special", "kubernetes", "", "TCP", 8088),
}
testCases := []struct {
name string // the name of the test case
ns string // the namespace to generate environment for
container *api.Container // the container to use
masterServiceNs string // the namespace to read master service info from
nilLister bool // whether the lister should be nil
expectedEnvs []kubecontainer.EnvVar // a set of expected environment vars
}{
{
2015-05-12 21:14:32 +00:00
name: "api server = Y, kubelet = Y",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
2015-05-12 21:14:32 +00:00
masterServiceNs: api.NamespaceDefault,
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
2015-05-12 21:14:32 +00:00
name: "api server = Y, kubelet = N",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
2015-05-12 21:14:32 +00:00
masterServiceNs: api.NamespaceDefault,
nilLister: true,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
{
2015-05-12 21:14:32 +00:00
name: "api server = N; kubelet = Y",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "BAZ"},
},
},
2015-05-12 21:14:32 +00:00
masterServiceNs: api.NamespaceDefault,
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAZ"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
2015-05-12 21:14:32 +00:00
name: "master service in pod ns",
ns: "test2",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "ZAP"},
},
},
2015-05-12 21:14:32 +00:00
masterServiceNs: "kubernetes",
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "ZAP"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.5"},
{Name: "TEST_SERVICE_PORT", Value: "8085"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.5:8085"},
{Name: "TEST_PORT_8085_TCP", Value: "tcp://1.2.3.5:8085"},
{Name: "TEST_PORT_8085_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8085_TCP_PORT", Value: "8085"},
{Name: "TEST_PORT_8085_TCP_ADDR", Value: "1.2.3.5"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.4"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8084"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.4:8084"},
{Name: "KUBERNETES_PORT_8084_TCP", Value: "tcp://1.2.3.4:8084"},
{Name: "KUBERNETES_PORT_8084_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8084_TCP_PORT", Value: "8084"},
{Name: "KUBERNETES_PORT_8084_TCP_ADDR", Value: "1.2.3.4"},
},
},
{
2015-05-12 21:14:32 +00:00
name: "pod in master service ns",
ns: "kubernetes",
container: &api.Container{},
masterServiceNs: "kubernetes",
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "NOT_SPECIAL_SERVICE_HOST", Value: "1.2.3.8"},
{Name: "NOT_SPECIAL_SERVICE_PORT", Value: "8088"},
{Name: "NOT_SPECIAL_PORT", Value: "tcp://1.2.3.8:8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP", Value: "tcp://1.2.3.8:8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_PROTO", Value: "tcp"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_PORT", Value: "8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_ADDR", Value: "1.2.3.8"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.6"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8086"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.6:8086"},
{Name: "KUBERNETES_PORT_8086_TCP", Value: "tcp://1.2.3.6:8086"},
{Name: "KUBERNETES_PORT_8086_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8086_TCP_PORT", Value: "8086"},
{Name: "KUBERNETES_PORT_8086_TCP_ADDR", Value: "1.2.3.6"},
},
},
2015-04-23 20:57:30 +00:00
{
2015-05-12 21:14:32 +00:00
name: "downward api pod",
ns: "downward-api",
container: &api.Container{
2015-04-23 20:57:30 +00:00
Env: []api.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Default.GroupVersion().String(),
2015-04-23 20:57:30 +00:00
FieldPath: "metadata.name",
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Default.GroupVersion().String(),
2015-04-23 20:57:30 +00:00
FieldPath: "metadata.namespace",
},
},
},
{
Name: "POD_IP",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Default.GroupVersion().String(),
FieldPath: "status.podIP",
},
},
},
2015-04-23 20:57:30 +00:00
},
},
2015-05-12 21:14:32 +00:00
masterServiceNs: "nothing",
nilLister: true,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "POD_NAME", Value: "dapi-test-pod-name"},
{Name: "POD_NAMESPACE", Value: "downward-api"},
{Name: "POD_IP", Value: "1.2.3.4"},
},
2015-04-23 20:57:30 +00:00
},
{
name: "env expansion",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "POD_NAME",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Default.GroupVersion().String(),
FieldPath: "metadata.name",
},
},
},
{
Name: "OUT_OF_ORDER_TEST",
Value: "$(OUT_OF_ORDER_TARGET)",
},
{
Name: "OUT_OF_ORDER_TARGET",
Value: "FOO",
},
{
Name: "EMPTY_VAR",
},
{
Name: "EMPTY_TEST",
Value: "foo-$(EMPTY_VAR)",
},
{
Name: "POD_NAME_TEST2",
Value: "test2-$(POD_NAME)",
},
{
Name: "POD_NAME_TEST3",
Value: "$(POD_NAME_TEST2)-3",
},
{
Name: "LITERAL_TEST",
Value: "literal-$(TEST_LITERAL)",
},
{
Name: "SERVICE_VAR_TEST",
Value: "$(TEST_SERVICE_HOST):$(TEST_SERVICE_PORT)",
},
{
Name: "TEST_UNDEFINED",
Value: "$(UNDEFINED_VAR)",
},
},
},
masterServiceNs: "nothing",
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "POD_NAME",
Value: "dapi-test-pod-name",
},
{
Name: "POD_NAME_TEST2",
Value: "test2-dapi-test-pod-name",
},
{
Name: "POD_NAME_TEST3",
Value: "test2-dapi-test-pod-name-3",
},
{
Name: "LITERAL_TEST",
Value: "literal-test-test-test",
},
{
Name: "TEST_SERVICE_HOST",
Value: "1.2.3.3",
},
{
Name: "TEST_SERVICE_PORT",
Value: "8083",
},
{
Name: "TEST_PORT",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP_PROTO",
Value: "tcp",
},
{
Name: "TEST_PORT_8083_TCP_PORT",
Value: "8083",
},
{
Name: "TEST_PORT_8083_TCP_ADDR",
Value: "1.2.3.3",
},
{
Name: "SERVICE_VAR_TEST",
Value: "1.2.3.3:8083",
},
{
Name: "OUT_OF_ORDER_TEST",
Value: "$(OUT_OF_ORDER_TARGET)",
},
{
Name: "OUT_OF_ORDER_TARGET",
Value: "FOO",
},
{
Name: "TEST_UNDEFINED",
Value: "$(UNDEFINED_VAR)",
},
{
Name: "EMPTY_VAR",
},
{
Name: "EMPTY_TEST",
Value: "foo-",
},
},
},
}
for i, tc := range testCases {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
2015-02-23 21:04:45 +00:00
kl := testKubelet.kubelet
2015-05-12 21:14:32 +00:00
kl.masterServiceNamespace = tc.masterServiceNs
if tc.nilLister {
kl.serviceLister = nil
} else {
kl.serviceLister = testServiceLister{services}
}
2015-04-23 20:57:30 +00:00
testPod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Namespace: tc.ns,
Name: "dapi-test-pod-name",
},
}
2016-03-07 20:24:08 +00:00
podIP := "1.2.3.4"
2015-04-23 20:57:30 +00:00
2016-03-07 20:24:08 +00:00
result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP)
if err != nil {
t.Errorf("[%v] Unexpected error: %v", tc.name, err)
}
sort.Sort(envs(result))
sort.Sort(envs(tc.expectedEnvs))
2015-05-12 21:14:32 +00:00
if !reflect.DeepEqual(result, tc.expectedEnvs) {
t.Errorf("%d: [%v] Unexpected env entries; expected {%v}, got {%v}", i, tc.name, tc.expectedEnvs, result)
}
}
}
2015-01-28 17:56:35 +00:00
func waitingState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Waiting: &api.ContainerStateWaiting{},
},
}
}
func waitingStateWithLastTermination(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Waiting: &api.ContainerStateWaiting{},
},
LastTerminationState: api.ContainerState{
Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
},
},
}
}
func runningState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
}
}
func stoppedState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{},
},
}
}
func succeededState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
},
},
}
}
func failedState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{
ExitCode: -1,
},
},
}
}
2015-01-28 17:56:35 +00:00
func TestPodPhaseWithRestartAlways(t *testing.T) {
desiredState := api.PodSpec{
NodeName: "machine",
2015-01-28 17:56:35 +00:00
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicyAlways,
2015-01-28 17:56:35 +00:00
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
2015-04-02 12:52:03 +00:00
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
2015-01-28 17:56:35 +00:00
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"all running",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
stoppedState("containerA"),
stoppedState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"all stopped with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
stoppedState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"mixed state #1 with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodPending,
"mixed state #2 with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingState("containerB"),
},
},
},
api.PodPending,
"mixed state #3 with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingStateWithLastTermination("containerB"),
},
},
},
api.PodRunning,
"backoff crashloop container with restart always",
},
2015-01-28 17:56:35 +00:00
}
for _, test := range tests {
2015-06-12 11:11:53 +00:00
if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status {
2015-01-28 17:56:35 +00:00
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
}
}
}
func TestPodPhaseWithRestartNever(t *testing.T) {
desiredState := api.PodSpec{
NodeName: "machine",
2015-01-28 17:56:35 +00:00
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicyNever,
2015-01-28 17:56:35 +00:00
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
2015-04-02 12:52:03 +00:00
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
2015-01-28 17:56:35 +00:00
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"all running with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodSucceeded,
"all succeeded with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
failedState("containerA"),
failedState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodFailed,
"all failed with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
succeededState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"mixed state #1 with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodPending,
"mixed state #2 with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingState("containerB"),
},
},
},
api.PodPending,
"mixed state #3 with restart never",
},
2015-01-28 17:56:35 +00:00
}
for _, test := range tests {
2015-06-12 11:11:53 +00:00
if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status {
2015-01-28 17:56:35 +00:00
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
}
}
}
func TestPodPhaseWithRestartOnFailure(t *testing.T) {
desiredState := api.PodSpec{
NodeName: "machine",
2015-01-28 17:56:35 +00:00
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicyOnFailure,
2015-01-28 17:56:35 +00:00
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
2015-04-02 12:52:03 +00:00
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
2015-01-28 17:56:35 +00:00
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"all running with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodSucceeded,
"all succeeded with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
failedState("containerA"),
failedState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"all failed with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
succeededState("containerB"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodRunning,
"mixed state #1 with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
2015-01-28 17:56:35 +00:00
},
},
},
api.PodPending,
"mixed state #2 with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingState("containerB"),
},
},
},
api.PodPending,
"mixed state #3 with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
waitingStateWithLastTermination("containerB"),
},
},
},
api.PodRunning,
"backoff crashloop container with restart onfailure",
},
2015-01-28 17:56:35 +00:00
}
for _, test := range tests {
2015-06-12 11:11:53 +00:00
if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status {
2015-01-28 17:56:35 +00:00
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
}
}
}
2015-02-08 21:22:19 +00:00
func TestExecInContainerNoSuchPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
fakeRuntime.PodList = []*containertest.FakePod{}
podName := "podFoo"
2015-03-11 23:40:20 +00:00
podNamespace := "nsFoo"
containerID := "containerFoo"
err := kubelet.ExecInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
"",
containerID,
[]string{"ls"},
nil,
nil,
nil,
false,
)
if err == nil {
t.Fatal("unexpected non-error")
}
if !fakeCommandRunner.ID.IsEmpty() {
t.Fatal("unexpected invocation of runner.ExecInContainer")
}
}
func TestExecInContainerNoSuchContainer(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
podName := "podFoo"
2015-03-11 23:40:20 +00:00
podNamespace := "nsFoo"
containerID := "containerFoo"
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{Name: "bar",
ID: kubecontainer.ContainerID{Type: "test", ID: "barID"}},
},
}},
}
err := kubelet.ExecInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: podName,
Namespace: podNamespace,
}}),
"",
containerID,
[]string{"ls"},
nil,
nil,
nil,
false,
)
if err == nil {
t.Fatal("unexpected non-error")
}
if !fakeCommandRunner.ID.IsEmpty() {
t.Fatal("unexpected invocation of runner.ExecInContainer")
}
}
type fakeReadWriteCloser struct{}
func (f *fakeReadWriteCloser) Write(data []byte) (int, error) {
return 0, nil
}
func (f *fakeReadWriteCloser) Read(data []byte) (int, error) {
return 0, nil
}
func (f *fakeReadWriteCloser) Close() error {
return nil
}
func TestExecInContainer(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
podName := "podFoo"
2015-03-11 23:40:20 +00:00
podNamespace := "nsFoo"
containerID := "containerFoo"
command := []string{"ls"}
stdin := &bytes.Buffer{}
stdout := &fakeReadWriteCloser{}
stderr := &fakeReadWriteCloser{}
tty := true
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{Name: containerID,
ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
},
},
}},
}
2015-02-08 21:22:19 +00:00
err := kubelet.ExecInContainer(
2016-04-27 04:53:07 +00:00
kubecontainer.GetPodFullName(podWithUidNameNs("12345678", podName, podNamespace)),
"",
containerID,
[]string{"ls"},
stdin,
stdout,
stderr,
tty,
)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if e, a := containerID, fakeCommandRunner.ID.ID; e != a {
t.Fatalf("container name: expected %q, got %q", e, a)
}
if e, a := command, fakeCommandRunner.Cmd; !reflect.DeepEqual(e, a) {
t.Fatalf("command: expected '%v', got '%v'", e, a)
}
if e, a := stdin, fakeCommandRunner.Stdin; e != a {
t.Fatalf("stdin: expected %#v, got %#v", e, a)
}
if e, a := stdout, fakeCommandRunner.Stdout; e != a {
t.Fatalf("stdout: expected %#v, got %#v", e, a)
}
if e, a := stderr, fakeCommandRunner.Stderr; e != a {
t.Fatalf("stderr: expected %#v, got %#v", e, a)
}
if e, a := tty, fakeCommandRunner.TTY; e != a {
t.Fatalf("tty: expected %t, got %t", e, a)
}
}
func TestPortForwardNoSuchPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.PodList = []*containertest.FakePod{}
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
podName := "podFoo"
2015-03-11 23:40:20 +00:00
podNamespace := "nsFoo"
var port uint16 = 5000
err := kubelet.PortForward(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
"",
port,
nil,
)
if err == nil {
t.Fatal("unexpected non-error")
}
if !fakeCommandRunner.ID.IsEmpty() {
t.Fatal("unexpected invocation of runner.PortForward")
}
}
func TestPortForward(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
podName := "podFoo"
2015-03-11 23:40:20 +00:00
podNamespace := "nsFoo"
podID := types.UID("12345678")
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: podID,
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: kubecontainer.ContainerID{Type: "test", ID: "containerFoo"},
},
},
}},
}
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
var port uint16 = 5000
stream := &fakeReadWriteCloser{}
err := kubelet.PortForward(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: podName,
Namespace: podNamespace,
}}),
"",
port,
stream,
)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if e, a := podID, fakeCommandRunner.PodID; e != a {
t.Fatalf("container id: expected %q, got %q", e, a)
}
if e, a := port, fakeCommandRunner.Port; e != a {
t.Fatalf("port: expected %v, got %v", e, a)
}
if e, a := stream, fakeCommandRunner.Stream; e != a {
t.Fatalf("stream: expected %v, got %v", e, a)
}
2015-02-08 21:22:19 +00:00
}
// Tests that identify the host port conflicts are detected correctly.
func TestGetHostPortConflicts(t *testing.T) {
pods := []*api.Pod{
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}},
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 82}}}}}},
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 83}}}}}},
}
// Pods should not cause any conflict.
if hasHostPortConflicts(pods) {
t.Errorf("expected no conflicts, Got conflicts")
}
expected := &api.Pod{
Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}},
}
// The new pod should cause conflict and be reported.
pods = append(pods, expected)
if !hasHostPortConflicts(pods) {
2016-02-22 07:08:02 +00:00
t.Errorf("expected conflict, Got no conflicts")
}
}
// Tests that we handle port conflicts correctly by setting the failed status in status map.
func TestHandlePortConflicts(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
2015-02-23 21:04:45 +00:00
kl := testKubelet.kubelet
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl.nodeLister = testNodeLister{nodes: []api.Node{
2016-04-22 16:58:49 +00:00
{
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
2016-04-22 16:58:49 +00:00
{
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
spec := api.PodSpec{NodeName: kl.nodeName, Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
pods := []*api.Pod{
2016-04-27 04:53:07 +00:00
podWithUidNameNsSpec("123456789", "newpod", "foo", spec),
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec),
}
2015-03-13 13:19:07 +00:00
// Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = unversioned.NewTime(time.Now())
pods[0].CreationTimestamp = unversioned.NewTime(time.Now().Add(1 * time.Second))
// The newer pod should be rejected.
notfittingPod := pods[0]
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// notfittingPod should be Failed
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
// fittingPod should be Pending
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", fittingPod.UID)
}
if status.Phase != api.PodPending {
t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase)
}
}
// Tests that we handle host name conflicts correctly by setting the failed status in status map.
func TestHandleHostNameConflicts(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kl := testKubelet.kubelet
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl.nodeLister = testNodeLister{nodes: []api.Node{
2016-04-22 16:58:49 +00:00
{
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
2016-04-22 16:58:49 +00:00
{
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
2016-04-27 04:53:07 +00:00
// default NodeName in test is 127.0.0.1
pods := []*api.Pod{
2016-04-27 04:53:07 +00:00
podWithUidNameNsSpec("123456789", "notfittingpod", "foo", api.PodSpec{NodeName: "127.0.0.2"}),
podWithUidNameNsSpec("987654321", "fittingpod", "foo", api.PodSpec{NodeName: "127.0.0.1"}),
}
notfittingPod := pods[0]
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// notfittingPod should be Failed
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
// fittingPod should be Pending
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", fittingPod.UID)
}
if status.Phase != api.PodPending {
t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase)
}
}
// Tests that we handle not matching labels selector correctly by setting the failed status in status map.
func TestHandleNodeSelector(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kl := testKubelet.kubelet
2016-04-24 22:47:42 +00:00
nodes := []api.Node{
2016-04-22 16:58:49 +00:00
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
2016-04-24 22:47:42 +00:00
}
kl.nodeLister = testNodeLister{nodes: nodes}
kl.nodeInfo = testNodeInfo{nodes: nodes}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
pods := []*api.Pod{
2016-04-27 04:53:07 +00:00
podWithUidNameNsSpec("123456789", "podA", "foo", api.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
podWithUidNameNsSpec("987654321", "podB", "foo", api.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
}
// The first pod should be rejected.
notfittingPod := pods[0]
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// notfittingPod should be Failed
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
// fittingPod should be Pending
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", fittingPod.UID)
}
if status.Phase != api.PodPending {
t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase)
}
}
// Tests that we handle exceeded resources correctly by setting the failed status in status map.
func TestHandleMemExceeded(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kl := testKubelet.kubelet
2016-04-24 22:47:42 +00:00
nodes := []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI),
}}},
2016-04-24 22:47:42 +00:00
}
kl.nodeLister = testNodeLister{nodes: nodes}
kl.nodeInfo = testNodeInfo{nodes: nodes}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
spec := api.PodSpec{NodeName: kl.nodeName,
Containers: []api.Container{{Resources: api.ResourceRequirements{
Requests: api.ResourceList{
"memory": resource.MustParse("90"),
},
}}}}
pods := []*api.Pod{
2016-04-27 04:53:07 +00:00
podWithUidNameNsSpec("123456789", "newpod", "foo", spec),
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec),
}
// Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = unversioned.NewTime(time.Now())
pods[0].CreationTimestamp = unversioned.NewTime(time.Now().Add(1 * time.Second))
// The newer pod should be rejected.
notfittingPod := pods[0]
fittingPod := pods[1]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// notfittingPod should be Failed
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
// fittingPod should be Pending
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", fittingPod.UID)
}
if status.Phase != api.PodPending {
t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase)
}
}
// TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
testKubelet.fakeCadvisor.On("VersionInfo").Return(versionInfo, nil)
2015-02-23 21:04:45 +00:00
kl := testKubelet.kubelet
pods := []*api.Pod{
{ObjectMeta: api.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
{ObjectMeta: api.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
}
podToTest := pods[1]
// Run once to populate the status map.
kl.HandlePodAdditions(pods)
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); !found {
t.Fatalf("expected to have status cached for pod2")
}
// Sync with empty pods so that the entry in status map will be removed.
kl.podManager.SetPods([]*api.Pod{})
kl.HandlePodCleanups()
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found {
t.Fatalf("expected to not have status cached for pod2")
}
}
func TestValidateContainerLogStatus(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
2015-02-23 21:04:45 +00:00
kubelet := testKubelet.kubelet
containerName := "x"
testCases := []struct {
statuses []api.ContainerStatus
success bool
}{
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
LastTerminationState: api.ContainerState{
Terminated: &api.ContainerStateTerminated{},
},
},
},
success: true,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
},
},
success: true,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{},
},
},
},
success: true,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Waiting: &api.ContainerStateWaiting{},
},
},
},
success: false,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePull"}},
},
},
success: false,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}},
},
},
success: false,
},
}
for i, tc := range testCases {
_, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: tc.statuses,
}, containerName, false)
if tc.success {
if err != nil {
t.Errorf("[case %d]: unexpected failure - %v", i, err)
}
} else if err == nil {
t.Errorf("[case %d]: unexpected success", i)
}
}
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, "blah", false); err == nil {
t.Errorf("expected error with invalid container name")
}
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, containerName, true); err != nil {
t.Errorf("unexpected error with for previous terminated container - %v", err)
}
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, containerName, false); err != nil {
t.Errorf("unexpected error with for most recent container - %v", err)
}
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[1].statuses,
}, containerName, true); err == nil {
t.Errorf("expected error with for previous terminated container")
}
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
ContainerStatuses: testCases[1].statuses,
}, containerName, false); err != nil {
t.Errorf("unexpected error with for most recent container")
}
}
2015-02-23 21:04:45 +00:00
// updateDiskSpacePolicy creates a new DiskSpaceManager with a new policy. This new manager along
// with the mock FsInfo values added to Cadvisor should make the kubelet report that it has
// sufficient disk space or it is out of disk, depending on the capacity, availability and
// threshold values.
func updateDiskSpacePolicy(kubelet *Kubelet, mockCadvisor *cadvisortest.Mock, rootCap, dockerCap, rootAvail, dockerAvail uint64, rootThreshold, dockerThreshold int) error {
dockerimagesFsInfo := cadvisorapiv2.FsInfo{Capacity: rootCap * mb, Available: rootAvail * mb}
rootFsInfo := cadvisorapiv2.FsInfo{Capacity: dockerCap * mb, Available: dockerAvail * mb}
mockCadvisor.On("ImagesFsInfo").Return(dockerimagesFsInfo, nil)
mockCadvisor.On("RootFsInfo").Return(rootFsInfo, nil)
dsp := DiskSpacePolicy{DockerFreeDiskMB: rootThreshold, RootFreeDiskMB: dockerThreshold}
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, dsp)
if err != nil {
return err
}
kubelet.diskSpaceManager = diskSpaceManager
return nil
}
func TestUpdateNewNodeStatus(t *testing.T) {
2016-05-11 11:59:54 +00:00
// generate one more than maxImagesInNodeStatus in inputImageList
inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1)
testKubelet := newTestKubeletWithImageList(
t, inputImageList, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 10E9, // 10G
}
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("Start").Return(nil)
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Make kubelet report that it has sufficient disk space.
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
2016-05-13 03:35:18 +00:00
{
Type: api.NodeMemoryPressure,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
},
NodeInfo: api.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
2015-12-12 07:53:34 +00:00
OSImage: "Debian GNU/Linux 7 (wheezy)",
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
2016-05-11 11:59:54 +00:00
Images: expectedImageList,
},
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected object type")
}
for i, cond := range updatedNode.Status.Conditions {
if cond.LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type)
}
if cond.LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type)
}
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
2016-05-11 11:59:54 +00:00
if maxImagesInNodeStatus != len(updatedNode.Status.Images) {
t.Errorf("unexpected image list length in node status, expected: %v, got: %v", maxImagesInNodeStatus, len(updatedNode.Status.Images))
} else {
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
}
}
2016-05-11 11:59:54 +00:00
}
func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("Start").Return(nil)
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Make Kubelet report that it has sufficient disk space.
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
kubelet.outOfDiskTransitionFrequency = 10 * time.Second
expectedNodeOutOfDiskCondition := api.NodeCondition{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected object type")
}
var oodCondition api.NodeCondition
for i, cond := range updatedNode.Status.Conditions {
if cond.LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type)
}
if cond.LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type)
}
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
if cond.Type == api.NodeOutOfDisk {
oodCondition = updatedNode.Status.Conditions[i]
}
}
if !reflect.DeepEqual(expectedNodeOutOfDiskCondition, oodCondition) {
2016-03-11 02:43:55 +00:00
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNodeOutOfDiskCondition, oodCondition))
}
}
func TestUpdateExistingNodeStatus(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
2015-09-21 18:13:46 +00:00
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
2016-05-13 03:35:18 +00:00
{
Type: api.NodeMemoryPressure,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
},
},
}}).ReactionChain
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("Start").Return(nil)
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 20E9,
}
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Make kubelet report that it is out of disk space.
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 50, 50, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
2015-09-21 18:13:46 +00:00
LastHeartbeatTime: unversioned.Time{}, // placeholder
LastTransitionTime: unversioned.Time{}, // placeholder
},
2016-05-13 03:35:18 +00:00
{
Type: api.NodeMemoryPressure,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.Time{}, // placeholder
LastTransitionTime: unversioned.Time{}, // placeholder
},
},
NodeInfo: api.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
2015-12-12 07:53:34 +00:00
OSImage: "Debian GNU/Linux 7 (wheezy)",
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
2016-05-08 16:41:23 +00:00
// images will be sorted from max to min in node status.
Images: []api.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
SizeBytes: 456,
},
2016-05-08 16:41:23 +00:00
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
SizeBytes: 123,
},
},
},
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Errorf("unexpected actions: %v", actions)
}
updateAction, ok := actions[1].(core.UpdateAction)
if !ok {
t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1])
}
updatedNode, ok := updateAction.GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected object type")
}
for i, cond := range updatedNode.Status.Conditions {
// Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same.
if old := unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; reflect.DeepEqual(cond.LastHeartbeatTime.Rfc3339Copy().UTC(), old) {
t.Errorf("Condition %v LastProbeTime: expected \n%v\n, got \n%v", cond.Type, unversioned.Now(), old)
}
if got, want := cond.LastTransitionTime.Rfc3339Copy().UTC(), unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; !reflect.DeepEqual(got, want) {
t.Errorf("Condition %v LastTransitionTime: expected \n%#v\n, got \n%#v", cond.Type, want, got)
}
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("expected \n%v\n, got \n%v", expectedNode, updatedNode)
}
}
func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
clock := testKubelet.fakeClock
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.NewTime(clock.Now()),
LastTransitionTime: unversioned.NewTime(clock.Now()),
},
{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
LastHeartbeatTime: unversioned.NewTime(clock.Now()),
LastTransitionTime: unversioned.NewTime(clock.Now()),
},
},
},
},
}}).ReactionChain
mockCadvisor := testKubelet.fakeCadvisor
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor.On("Start").Return(nil)
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
kubelet.outOfDiskTransitionFrequency = 5 * time.Second
ood := api.NodeCondition{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
LastHeartbeatTime: unversioned.NewTime(clock.Now()), // placeholder
LastTransitionTime: unversioned.NewTime(clock.Now()), // placeholder
}
noOod := api.NodeCondition{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: unversioned.NewTime(clock.Now()), // placeholder
LastTransitionTime: unversioned.NewTime(clock.Now()), // placeholder
}
testCases := []struct {
rootFsAvail uint64
dockerFsAvail uint64
expected api.NodeCondition
}{
{
// NodeOutOfDisk==false
rootFsAvail: 200,
dockerFsAvail: 200,
expected: ood,
},
{
// NodeOutOfDisk==true
rootFsAvail: 50,
dockerFsAvail: 200,
expected: ood,
},
{
// NodeOutOfDisk==false
rootFsAvail: 200,
dockerFsAvail: 200,
expected: ood,
},
{
// NodeOutOfDisk==true
rootFsAvail: 200,
dockerFsAvail: 50,
expected: ood,
},
{
// NodeOutOfDisk==false
rootFsAvail: 200,
dockerFsAvail: 200,
expected: noOod,
},
}
kubelet.updateRuntimeUp()
for tcIdx, tc := range testCases {
// Step by a second
clock.Step(1 * time.Second)
// Setup expected times.
tc.expected.LastHeartbeatTime = unversioned.NewTime(clock.Now())
// In the last case, there should be a status transition for NodeOutOfDisk
if tcIdx == len(testCases)-1 {
tc.expected.LastTransitionTime = unversioned.NewTime(clock.Now())
}
// Make kubelet report that it has sufficient disk space
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, tc.rootFsAvail, tc.dockerFsAvail, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Errorf("%d. unexpected actions: %v", tcIdx, actions)
}
updateAction, ok := actions[1].(core.UpdateAction)
if !ok {
t.Errorf("%d. unexpected action type. expected UpdateAction, got %#v", tcIdx, actions[1])
}
updatedNode, ok := updateAction.GetObject().(*api.Node)
if !ok {
t.Errorf("%d. unexpected object type", tcIdx)
}
kubeClient.ClearActions()
var oodCondition api.NodeCondition
for i, cond := range updatedNode.Status.Conditions {
if cond.Type == api.NodeOutOfDisk {
oodCondition = updatedNode.Status.Conditions[i]
}
}
if !reflect.DeepEqual(tc.expected, oodCondition) {
t.Errorf("%d.\nwant \n%v\n, got \n%v", tcIdx, tc.expected, oodCondition)
}
}
}
func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
clock := testKubelet.fakeClock
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("Start").Return(nil)
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 10E9,
}
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
// Make kubelet report that it has sufficient disk space.
if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil {
t.Fatalf("can't update disk space manager: %v", err)
}
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: "kubelet has sufficient disk space available",
2015-09-21 18:13:46 +00:00
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
2016-05-13 03:35:18 +00:00
{
Type: api.NodeMemoryPressure,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{}, //placeholder
},
NodeInfo: api.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
2015-12-12 07:53:34 +00:00
OSImage: "Debian GNU/Linux 7 (wheezy)",
OperatingSystem: goruntime.GOOS,
Architecture: goruntime.GOARCH,
ContainerRuntimeVersion: "test://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
Images: []api.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
SizeBytes: 456,
},
2016-05-08 16:41:23 +00:00
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
SizeBytes: 123,
},
},
},
}
checkNodeStatus := func(status api.ConditionStatus, reason, message string) {
kubeClient.ClearActions()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1])
}
for i, cond := range updatedNode.Status.Conditions {
if cond.LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp")
}
if cond.LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp")
}
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
2016-05-13 03:35:18 +00:00
lastIndex := len(updatedNode.Status.Conditions) - 1
if updatedNode.Status.Conditions[lastIndex].Type != api.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
2016-05-13 03:35:18 +00:00
expectedNode.Status.Conditions[lastIndex] = api.NodeCondition{
Type: api.NodeReady,
Status: status,
Reason: reason,
Message: message,
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
}
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
2016-03-11 02:43:55 +00:00
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
}
}
readyMessage := "kubelet is posting ready status"
downMessage := "container runtime is down"
// Should report kubelet not ready if the runtime check is out of date
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage)
// Should report kubelet ready if the runtime check is updated
clock.SetTime(time.Now())
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionTrue, "KubeletReady", readyMessage)
// Should report kubelet not ready if the runtime check is out of date
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage)
// Should report kubelet not ready if the runtime check failed
fakeRuntime := testKubelet.fakeRuntime
// Inject error into fake runtime status check, node should be NotReady
fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
clock.SetTime(time.Now())
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionFalse, "KubeletNotReady", downMessage)
}
func TestUpdateNodeStatusError(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
// No matching node for the kubelet
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{}}).ReactionChain
if err := kubelet.updateNodeStatus(); err == nil {
t.Errorf("unexpected non error: %v", err)
}
if len(testKubelet.fakeKubeClient.Actions()) != nodeStatusUpdateRetry {
t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions())
}
}
func TestCreateMirrorPod(t *testing.T) {
for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
2016-04-27 04:53:07 +00:00
pod := podWithUidNameNs("12345678", "bar", "foo")
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
pods := []*api.Pod{pod}
kl.podManager.SetPods(pods)
2016-05-06 18:07:24 +00:00
err := kl.syncPod(syncPodOptions{
pod: pod,
podStatus: &kubecontainer.PodStatus{},
updateType: updateType,
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
podFullName := kubecontainer.GetPodFullName(pod)
if !manager.HasPod(podFullName) {
t.Errorf("expected mirror pod %q to be created", podFullName)
}
if manager.NumOfPods() != 1 || !manager.HasPod(podFullName) {
t.Errorf("expected one mirror pod %q, got %v", podFullName, manager.GetPods())
}
}
}
func TestDeleteOutdatedMirrorPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
2016-04-27 04:53:07 +00:00
pod := podWithUidNameNsSpec("12345678", "foo", "ns", api.PodSpec{
Containers: []api.Container{
{Name: "1234", Image: "foo"},
},
2016-04-27 04:53:07 +00:00
})
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
// Mirror pod has an outdated spec.
2016-04-27 04:53:07 +00:00
mirrorPod := podWithUidNameNsSpec("11111111", "foo", "ns", api.PodSpec{
Containers: []api.Container{
{Name: "1234", Image: "bar"},
},
2016-04-27 04:53:07 +00:00
})
mirrorPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "api"
mirrorPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = "mirror"
pods := []*api.Pod{pod, mirrorPod}
kl.podManager.SetPods(pods)
2016-05-06 18:07:24 +00:00
err := kl.syncPod(syncPodOptions{
pod: pod,
mirrorPod: mirrorPod,
podStatus: &kubecontainer.PodStatus{},
updateType: kubetypes.SyncPodUpdate,
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
name := kubecontainer.GetPodFullName(pod)
creates, deletes := manager.GetCounts(name)
if creates != 1 || deletes != 1 {
t.Errorf("expected 1 creation and 1 deletion of %q, got %d, %d", name, creates, deletes)
}
}
func TestDeleteOrphanedMirrorPods(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
orphanPods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "pod1",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "12345679",
Name: "pod2",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
},
}
kl.podManager.SetPods(orphanPods)
// Sync with an empty pod list to delete all mirror pods.
kl.HandlePodCleanups()
if manager.NumOfPods() != 0 {
t.Errorf("expected zero mirror pods, got %v", manager.GetPods())
}
for _, pod := range orphanPods {
name := kubecontainer.GetPodFullName(pod)
creates, deletes := manager.GetCounts(name)
if creates != 0 || deletes != 1 {
t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
}
}
}
func TestGetContainerInfoForMirrorPods(t *testing.T) {
// pods contain one static and one mirror pod with the same name but
// different UIDs.
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "1234",
Name: "qux",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "file",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "5678",
Name: "qux",
Namespace: "ns",
Annotations: map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
},
},
}
containerID := "ab2cdf"
containerPath := fmt.Sprintf("/docker/%v", containerID)
containerInfo := cadvisorapi.ContainerInfo{
ContainerReference: cadvisorapi.ContainerReference{
Name: containerPath,
},
}
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
fakeRuntime := testKubelet.fakeRuntime
mockCadvisor := testKubelet.fakeCadvisor
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil)
kubelet := testKubelet.kubelet
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "1234",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
},
},
}},
}
kubelet.podManager.SetPods(pods)
// Use the mirror pod UID to retrieve the stats.
stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if stats == nil {
t.Fatalf("stats should not be nil")
}
mockCadvisor.AssertExpectations(t)
}
func TestHostNetworkAllowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
2015-08-24 17:41:51 +00:00
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource},
2015-08-24 17:41:51 +00:00
},
})
2016-04-27 04:53:07 +00:00
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
2016-04-27 04:53:07 +00:00
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
2016-04-27 04:53:07 +00:00
})
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
kubelet.podManager.SetPods([]*api.Pod{pod})
2016-05-06 18:07:24 +00:00
err := kubelet.syncPod(syncPodOptions{
pod: pod,
podStatus: &kubecontainer.PodStatus{},
updateType: kubetypes.SyncPodUpdate,
})
if err != nil {
t.Errorf("expected pod infra creation to succeed: %v", err)
}
}
func TestHostNetworkDisallowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
2015-08-24 17:41:51 +00:00
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{},
},
})
2016-04-27 04:53:07 +00:00
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
2016-04-27 04:53:07 +00:00
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
2016-04-27 04:53:07 +00:00
})
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
2016-05-06 18:07:24 +00:00
err := kubelet.syncPod(syncPodOptions{
pod: pod,
podStatus: &kubecontainer.PodStatus{},
updateType: kubetypes.SyncPodUpdate,
})
if err == nil {
t.Errorf("expected pod infra creation to fail")
}
}
2015-05-09 21:17:36 +00:00
func TestPrivilegeContainerAllowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
2015-05-09 21:17:36 +00:00
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
AllowPrivileged: true,
})
privileged := true
2016-04-27 04:53:07 +00:00
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
2015-05-09 21:17:36 +00:00
},
2016-04-27 04:53:07 +00:00
})
2015-05-09 21:17:36 +00:00
kubelet.podManager.SetPods([]*api.Pod{pod})
2016-05-06 18:07:24 +00:00
err := kubelet.syncPod(syncPodOptions{
pod: pod,
podStatus: &kubecontainer.PodStatus{},
updateType: kubetypes.SyncPodUpdate,
})
2015-05-09 21:17:36 +00:00
if err != nil {
t.Errorf("expected pod infra creation to succeed: %v", err)
}
}
func TestPrivilegeContainerDisallowed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
2015-05-09 21:17:36 +00:00
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
AllowPrivileged: false,
})
privileged := true
2016-04-27 04:53:07 +00:00
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
Containers: []api.Container{
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
2015-05-09 21:17:36 +00:00
},
2016-04-27 04:53:07 +00:00
})
2016-05-06 18:07:24 +00:00
err := kubelet.syncPod(syncPodOptions{
pod: pod,
podStatus: &kubecontainer.PodStatus{},
updateType: kubetypes.SyncPodUpdate,
})
2015-05-09 21:17:36 +00:00
if err == nil {
t.Errorf("expected pod infra creation to fail")
}
}
func TestFilterOutTerminatedPods(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pods := newTestPods(5)
pods[0].Status.Phase = api.PodFailed
pods[1].Status.Phase = api.PodSucceeded
pods[2].Status.Phase = api.PodRunning
pods[3].Status.Phase = api.PodPending
expected := []*api.Pod{pods[2], pods[3], pods[4]}
kubelet.podManager.SetPods(pods)
actual := kubelet.filterOutTerminatedPods(pods)
if !reflect.DeepEqual(expected, actual) {
t.Errorf("expected %#v, got %#v", expected, actual)
}
}
func TestRegisterExistingNodeWithApiserver(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// Return an error on create.
return true, &api.Node{}, &apierrors.StatusError{
2015-09-21 18:13:46 +00:00
ErrStatus: unversioned.Status{Reason: unversioned.StatusReasonAlreadyExists},
}
})
kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// Return an existing (matching) node on get.
return true, &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
}, nil
})
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
})
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorapi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400 * mb,
Capacity: 1000 * mb,
Available: 600 * mb,
}, nil)
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 9 * mb,
Capacity: 10 * mb,
}, nil)
done := make(chan struct{})
go func() {
kubelet.registerWithApiserver()
done <- struct{}{}
}()
select {
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("timed out waiting for registration")
case <-done:
return
}
}
func TestMakePortMappings(t *testing.T) {
2016-04-27 04:53:07 +00:00
port := func(name string, protocol api.Protocol, containerPort, hostPort int32, ip string) api.ContainerPort {
return api.ContainerPort{
Name: name,
Protocol: protocol,
ContainerPort: containerPort,
HostPort: hostPort,
HostIP: ip,
}
}
portMapping := func(name string, protocol api.Protocol, containerPort, hostPort int, ip string) kubecontainer.PortMapping {
return kubecontainer.PortMapping{
Name: name,
Protocol: protocol,
ContainerPort: containerPort,
HostPort: hostPort,
HostIP: ip,
}
}
tests := []struct {
container *api.Container
expectedPortMappings []kubecontainer.PortMapping
}{
{
&api.Container{
Name: "fooContainer",
Ports: []api.ContainerPort{
2016-04-27 04:53:07 +00:00
port("", api.ProtocolTCP, 80, 8080, "127.0.0.1"),
port("", api.ProtocolTCP, 443, 4343, "192.168.0.1"),
port("foo", api.ProtocolUDP, 555, 5555, ""),
// Duplicated, should be ignored.
port("foo", api.ProtocolUDP, 888, 8888, ""),
// Duplicated, should be ignored.
port("", api.ProtocolTCP, 80, 8888, ""),
},
},
[]kubecontainer.PortMapping{
2016-04-27 04:53:07 +00:00
portMapping("fooContainer-TCP:80", api.ProtocolTCP, 80, 8080, "127.0.0.1"),
portMapping("fooContainer-TCP:443", api.ProtocolTCP, 443, 4343, "192.168.0.1"),
portMapping("fooContainer-foo", api.ProtocolUDP, 555, 5555, ""),
},
},
}
for i, tt := range tests {
actual := makePortMappings(tt.container)
if !reflect.DeepEqual(tt.expectedPortMappings, actual) {
t.Errorf("%d: Expected: %#v, saw: %#v", i, tt.expectedPortMappings, actual)
}
}
}
func TestIsPodPastActiveDeadline(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pods := newTestPods(5)
exceededActiveDeadlineSeconds := int64(30)
notYetActiveDeadlineSeconds := int64(120)
now := unversioned.Now()
startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
pods[0].Status.StartTime = &startTime
pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
pods[1].Status.StartTime = &startTime
pods[1].Spec.ActiveDeadlineSeconds = &notYetActiveDeadlineSeconds
tests := []struct {
pod *api.Pod
expected bool
}{{pods[0], true}, {pods[1], false}, {pods[2], false}, {pods[3], false}, {pods[4], false}}
kubelet.podManager.SetPods(pods)
for i, tt := range tests {
actual := kubelet.pastActiveDeadline(tt.pod)
if actual != tt.expected {
t.Errorf("[%d] expected %#v, got %#v", i, tt.expected, actual)
}
}
}
func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
now := unversioned.Now()
startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
exceededActiveDeadlineSeconds := int64(30)
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
},
Status: api.PodStatus{
StartTime: &startTime,
},
},
}
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: "bar",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "foo"},
},
}},
}
// Let the pod worker sets the status to fail after this sync.
kubelet.HandlePodUpdates(pods)
status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
if !found {
t.Errorf("expected to found status for pod %q", pods[0].UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q, ot %q.", api.PodFailed, status.Phase)
}
}
func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
now := unversioned.Now()
startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
exceededActiveDeadlineSeconds := int64(300)
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
},
Status: api.PodStatus{
StartTime: &startTime,
},
},
}
fakeRuntime.PodList = []*containertest.FakePod{
{Pod: &kubecontainer.Pod{
ID: "12345678",
Name: "bar",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "foo"},
},
}},
}
kubelet.podManager.SetPods(pods)
kubelet.HandlePodUpdates(pods)
status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
if !found {
t.Errorf("expected to found status for pod %q", pods[0].UID)
}
if status.Phase == api.PodFailed {
t.Fatalf("expected pod status to not be %q", status.Phase)
}
}
2016-04-27 04:53:07 +00:00
func podWithUidNameNs(uid types.UID, name, namespace string) *api.Pod {
return &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: uid,
Name: name,
Namespace: namespace,
Annotations: map[string]string{},
},
}
}
func podWithUidNameNsSpec(uid types.UID, name, namespace string, spec api.PodSpec) *api.Pod {
pod := podWithUidNameNs(uid, name, namespace)
pod.Spec = spec
return pod
}
func TestDeletePodDirsForDeletedPods(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
2016-04-27 04:53:07 +00:00
podWithUidNameNs("12345678", "pod1", "ns"),
podWithUidNameNs("12345679", "pod2", "ns"),
}
kl.podManager.SetPods(pods)
// Sync to create pod directories.
kl.HandlePodSyncs(kl.podManager.GetPods())
for i := range pods {
if !dirExists(kl.getPodDir(pods[i].UID)) {
t.Errorf("expected directory to exist for pod %d", i)
}
}
// Pod 1 has been deleted and no longer exists.
kl.podManager.SetPods([]*api.Pod{pods[0]})
kl.HandlePodCleanups()
if !dirExists(kl.getPodDir(pods[0].UID)) {
t.Errorf("expected directory to exist for pod 0")
}
if dirExists(kl.getPodDir(pods[1].UID)) {
t.Errorf("expected directory to be deleted for pod 1")
}
}
func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*api.Pod, podsToCheck []*api.Pod, shouldExist bool) {
kl := testKubelet.kubelet
kl.podManager.SetPods(pods)
kl.HandlePodSyncs(pods)
kl.HandlePodCleanups()
for i, pod := range podsToCheck {
exist := dirExists(kl.getPodDir(pod.UID))
if shouldExist && !exist {
t.Errorf("expected directory to exist for pod %d", i)
} else if !shouldExist && exist {
t.Errorf("expected directory to be removed for pod %d", i)
}
}
}
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
2016-04-27 04:53:07 +00:00
podWithUidNameNs("12345678", "pod1", "ns"),
podWithUidNameNs("12345679", "pod2", "ns"),
podWithUidNameNs("12345680", "pod3", "ns"),
}
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
// Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
// deleted.
kl.statusManager.SetPodStatus(pods[1], api.PodStatus{Phase: api.PodFailed})
kl.statusManager.SetPodStatus(pods[2], api.PodStatus{Phase: api.PodSucceeded})
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
}
func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("Start").Return(nil)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
runningPod := &kubecontainer.Pod{
ID: "12345678",
Name: "pod1",
Namespace: "ns",
}
2016-04-27 04:53:07 +00:00
apiPod := podWithUidNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace)
// Sync once to create pod directory; confirm that the pod directory has
// already been created.
pods := []*api.Pod{apiPod}
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true)
// Pretend the pod is deleted from apiserver, but is still active on the node.
// The pod directory should not be removed.
pods = []*api.Pod{}
testKubelet.fakeRuntime.PodList = []*containertest.FakePod{{runningPod, ""}}
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true)
// The pod is deleted and also not active on the node. The pod directory
// should be removed.
pods = []*api.Pod{}
testKubelet.fakeRuntime.PodList = []*containertest.FakePod{}
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, false)
}
func TestCleanupBandwidthLimits(t *testing.T) {
2016-04-27 04:53:07 +00:00
testPod := func(name, ingress string) *api.Pod {
pod := podWithUidNameNs("", name, "")
if len(ingress) != 0 {
pod.Annotations["kubernetes.io/ingress-bandwidth"] = ingress
}
return pod
}
2016-01-27 01:12:12 +00:00
// TODO(random-liu): We removed the test case for pod status not cached here. We should add a higher
// layer status getter function and test that function instead.
tests := []struct {
status *api.PodStatus
pods []*api.Pod
inputCIDRs []string
expectResetCIDRs []string
name string
}{
{
status: &api.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodRunning,
},
pods: []*api.Pod{
2016-04-27 04:53:07 +00:00
testPod("foo", "10M"),
testPod("bar", ""),
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"},
name: "pod running",
},
{
status: &api.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodFailed,
},
pods: []*api.Pod{
2016-04-27 04:53:07 +00:00
testPod("foo", "10M"),
testPod("bar", ""),
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
name: "pod not running",
},
{
status: &api.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodFailed,
},
pods: []*api.Pod{
2016-04-27 04:53:07 +00:00
testPod("foo", ""),
testPod("bar", ""),
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
name: "no bandwidth limits",
},
}
for _, test := range tests {
shaper := &bandwidth.FakeShaper{
CIDRs: test.inputCIDRs,
}
testKube := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKube.kubelet.shaper = shaper
2016-01-27 01:12:12 +00:00
for _, pod := range test.pods {
testKube.kubelet.statusManager.SetPodStatus(pod, *test.status)
}
err := testKube.kubelet.cleanupBandwidthLimits(test.pods)
if err != nil {
t.Errorf("unexpected error: %v (%s)", test.name, err)
}
if !reflect.DeepEqual(shaper.ResetCIDRs, test.expectResetCIDRs) {
t.Errorf("[%s]\nexpected: %v, saw: %v", test.name, test.expectResetCIDRs, shaper.ResetCIDRs)
}
}
}
func TestExtractBandwidthResources(t *testing.T) {
four, _ := resource.ParseQuantity("4M")
ten, _ := resource.ParseQuantity("10M")
twenty, _ := resource.ParseQuantity("20M")
2016-04-27 04:53:07 +00:00
testPod := func(ingress, egress string) *api.Pod {
pod := &api.Pod{ObjectMeta: api.ObjectMeta{Annotations: map[string]string{}}}
if len(ingress) != 0 {
pod.Annotations["kubernetes.io/ingress-bandwidth"] = ingress
}
if len(egress) != 0 {
pod.Annotations["kubernetes.io/egress-bandwidth"] = egress
}
return pod
}
tests := []struct {
pod *api.Pod
expectedIngress *resource.Quantity
expectedEgress *resource.Quantity
expectError bool
}{
{
pod: &api.Pod{},
},
{
2016-04-27 04:53:07 +00:00
pod: testPod("10M", ""),
expectedIngress: &ten,
},
{
2016-04-27 04:53:07 +00:00
pod: testPod("", "10M"),
expectedEgress: &ten,
},
{
2016-04-27 04:53:07 +00:00
pod: testPod("4M", "20M"),
expectedIngress: &four,
expectedEgress: &twenty,
},
{
2016-04-27 04:53:07 +00:00
pod: testPod("foo", ""),
expectError: true,
},
}
for _, test := range tests {
ingress, egress, err := bandwidth.ExtractPodBandwidthResources(test.pod.Annotations)
if test.expectError {
if err == nil {
t.Errorf("unexpected non-error")
}
continue
}
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if !reflect.DeepEqual(ingress, test.expectedIngress) {
t.Errorf("expected: %v, saw: %v", ingress, test.expectedIngress)
}
if !reflect.DeepEqual(egress, test.expectedEgress) {
t.Errorf("expected: %v, saw: %v", egress, test.expectedEgress)
}
}
}
func TestGetPodsToSync(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
clock := testKubelet.fakeClock
pods := newTestPods(5)
exceededActiveDeadlineSeconds := int64(30)
notYetActiveDeadlineSeconds := int64(120)
startTime := unversioned.NewTime(clock.Now())
pods[0].Status.StartTime = &startTime
pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
pods[1].Status.StartTime = &startTime
pods[1].Spec.ActiveDeadlineSeconds = &notYetActiveDeadlineSeconds
pods[2].Status.StartTime = &startTime
pods[2].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
kubelet.podManager.SetPods(pods)
kubelet.workQueue.Enqueue(pods[2].UID, 0)
kubelet.workQueue.Enqueue(pods[3].UID, 30*time.Second)
kubelet.workQueue.Enqueue(pods[4].UID, 2*time.Minute)
clock.Step(1 * time.Minute)
expectedPods := []*api.Pod{pods[0], pods[2], pods[3]}
podsToSync := kubelet.getPodsToSync()
if len(podsToSync) == len(expectedPods) {
for _, expect := range expectedPods {
var found bool
for _, got := range podsToSync {
if expect.UID == got.UID {
found = true
break
}
}
if !found {
t.Errorf("expected pod not found: %+v", expect)
}
}
} else {
t.Errorf("expected %d pods to sync, got %d", len(expectedPods), len(podsToSync))
}
}
2016-02-14 09:05:12 +00:00
func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
2016-02-14 09:05:12 +00:00
kubelet := testKubelet.kubelet
numContainers := 10
expectedOrder := []string{}
cStatuses := []*kubecontainer.ContainerStatus{}
specContainerList := []api.Container{}
for i := 0; i < numContainers; i++ {
id := fmt.Sprintf("%v", i)
containerName := fmt.Sprintf("%vcontainer", id)
expectedOrder = append(expectedOrder, containerName)
cStatus := &kubecontainer.ContainerStatus{
ID: kubecontainer.BuildContainerID("test", id),
Name: containerName,
}
// Rearrange container statuses
if i%2 == 0 {
cStatuses = append(cStatuses, cStatus)
} else {
cStatuses = append([]*kubecontainer.ContainerStatus{cStatus}, cStatuses...)
}
specContainerList = append(specContainerList, api.Container{Name: containerName})
}
2016-04-27 04:53:07 +00:00
pod := podWithUidNameNs("uid1", "foo", "test")
pod.Spec = api.PodSpec{
Containers: specContainerList,
2016-02-14 09:05:12 +00:00
}
2016-04-27 04:53:07 +00:00
2016-02-14 09:05:12 +00:00
status := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
ContainerStatuses: cStatuses,
}
for i := 0; i < 5; i++ {
apiStatus := kubelet.generateAPIPodStatus(pod, status)
for i, c := range apiStatus.ContainerStatuses {
if expectedOrder[i] != c.Name {
t.Fatalf("Container status not sorted, expected %v at index %d, but found %v", expectedOrder[i], i, c.Name)
}
}
}
}
func verifyContainerStatuses(statuses []api.ContainerStatus, state, lastTerminationState map[string]api.ContainerState) error {
for _, s := range statuses {
if !reflect.DeepEqual(s.State, state[s.Name]) {
return fmt.Errorf("unexpected state: %s", diff.ObjectDiff(state[s.Name], s.State))
}
if !reflect.DeepEqual(s.LastTerminationState, lastTerminationState[s.Name]) {
return fmt.Errorf("unexpected last termination state %s", diff.ObjectDiff(
lastTerminationState[s.Name], s.LastTerminationState))
}
}
return nil
}
// Test generateAPIPodStatus with different reason cache and old api pod status.
2016-02-14 09:05:12 +00:00
func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
// The following waiting reason and message are generated in convertStatusToAPIStatus()
startWaitingReason := "ContainerCreating"
initWaitingReason := "PodInitializing"
2016-02-14 09:05:12 +00:00
testTimestamp := time.Unix(123456789, 987654321)
testErrorReason := fmt.Errorf("test-error")
emptyContainerID := (&kubecontainer.ContainerID{}).String()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
2016-02-14 09:05:12 +00:00
kubelet := testKubelet.kubelet
2016-04-27 04:53:07 +00:00
pod := podWithUidNameNs("12345678", "foo", "new")
pod.Spec = api.PodSpec{RestartPolicy: api.RestartPolicyOnFailure}
2016-02-14 09:05:12 +00:00
podStatus := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
}
tests := []struct {
containers []api.Container
statuses []*kubecontainer.ContainerStatus
reasons map[string]error
oldStatuses []api.ContainerStatus
expectedState map[string]api.ContainerState
// Only set expectedInitState when it is different from expectedState
expectedInitState map[string]api.ContainerState
2016-02-14 09:05:12 +00:00
expectedLastTerminationState map[string]api.ContainerState
}{
// For container with no historical record, State should be Waiting, LastTerminationState should be retrieved from
// old status from apiserver.
{
containers: []api.Container{{Name: "without-old-record"}, {Name: "with-old-record"}},
statuses: []*kubecontainer.ContainerStatus{},
reasons: map[string]error{},
oldStatuses: []api.ContainerStatus{{
2016-02-14 09:05:12 +00:00
Name: "with-old-record",
LastTerminationState: api.ContainerState{Terminated: &api.ContainerStateTerminated{}},
}},
expectedState: map[string]api.ContainerState{
2016-02-14 09:05:12 +00:00
"without-old-record": {Waiting: &api.ContainerStateWaiting{
Reason: startWaitingReason,
}},
"with-old-record": {Waiting: &api.ContainerStateWaiting{
Reason: startWaitingReason,
}},
},
expectedInitState: map[string]api.ContainerState{
"without-old-record": {Waiting: &api.ContainerStateWaiting{
Reason: initWaitingReason,
}},
"with-old-record": {Waiting: &api.ContainerStateWaiting{
Reason: initWaitingReason,
}},
},
expectedLastTerminationState: map[string]api.ContainerState{
2016-02-14 09:05:12 +00:00
"with-old-record": {Terminated: &api.ContainerStateTerminated{}},
},
},
// For running container, State should be Running, LastTerminationState should be retrieved from latest terminated status.
{
containers: []api.Container{{Name: "running"}},
statuses: []*kubecontainer.ContainerStatus{
2016-02-14 09:05:12 +00:00
{
Name: "running",
State: kubecontainer.ContainerStateRunning,
StartedAt: testTimestamp,
},
{
Name: "running",
State: kubecontainer.ContainerStateExited,
ExitCode: 1,
},
},
reasons: map[string]error{},
oldStatuses: []api.ContainerStatus{},
expectedState: map[string]api.ContainerState{
2016-02-14 09:05:12 +00:00
"running": {Running: &api.ContainerStateRunning{
StartedAt: unversioned.NewTime(testTimestamp),
}},
},
expectedLastTerminationState: map[string]api.ContainerState{
2016-02-14 09:05:12 +00:00
"running": {Terminated: &api.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
},
// For terminated container:
// * If there is no recent start error record, State should be Terminated, LastTerminationState should be retrieved from
// second latest terminated status;
// * If there is recent start error record, State should be Waiting, LastTerminationState should be retrieved from latest
// terminated status;
// * If ExitCode = 0, restart policy is RestartPolicyOnFailure, the container shouldn't be restarted. No matter there is
// recent start error or not, State should be Terminated, LastTerminationState should be retrieved from second latest
// terminated status.
{
containers: []api.Container{{Name: "without-reason"}, {Name: "with-reason"}},
statuses: []*kubecontainer.ContainerStatus{
2016-02-14 09:05:12 +00:00
{
Name: "without-reason",
State: kubecontainer.ContainerStateExited,
ExitCode: 1,
},
{
Name: "with-reason",
State: kubecontainer.ContainerStateExited,
ExitCode: 2,
},
{
Name: "without-reason",
State: kubecontainer.ContainerStateExited,
ExitCode: 3,
},
{
Name: "with-reason",
State: kubecontainer.ContainerStateExited,
ExitCode: 4,
},
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 0,
},
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 5,
},
},
reasons: map[string]error{"with-reason": testErrorReason, "succeed": testErrorReason},
oldStatuses: []api.ContainerStatus{},
expectedState: map[string]api.ContainerState{
2016-02-14 09:05:12 +00:00
"without-reason": {Terminated: &api.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
"with-reason": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
},
expectedLastTerminationState: map[string]api.ContainerState{
2016-02-14 09:05:12 +00:00
"without-reason": {Terminated: &api.ContainerStateTerminated{
ExitCode: 3,
ContainerID: emptyContainerID,
}},
"with-reason": {Terminated: &api.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 5,
ContainerID: emptyContainerID,
}},
},
},
}
for i, test := range tests {
kubelet.reasonCache = NewReasonCache()
for n, e := range test.reasons {
kubelet.reasonCache.add(pod.UID, n, e, "")
}
pod.Spec.Containers = test.containers
pod.Status.ContainerStatuses = test.oldStatuses
podStatus.ContainerStatuses = test.statuses
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
assert.NoError(t, verifyContainerStatuses(apiStatus.ContainerStatuses, test.expectedState, test.expectedLastTerminationState), "case %d", i)
}
// Everything should be the same for init containers
for i, test := range tests {
kubelet.reasonCache = NewReasonCache()
for n, e := range test.reasons {
kubelet.reasonCache.add(pod.UID, n, e, "")
}
pod.Spec.InitContainers = test.containers
pod.Status.InitContainerStatuses = test.oldStatuses
podStatus.ContainerStatuses = test.statuses
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
expectedState := test.expectedState
if test.expectedInitState != nil {
expectedState = test.expectedInitState
}
assert.NoError(t, verifyContainerStatuses(apiStatus.InitContainerStatuses, expectedState, test.expectedLastTerminationState), "case %d", i)
}
}
// Test generateAPIPodStatus with different restart policies.
func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
testErrorReason := fmt.Errorf("test-error")
emptyContainerID := (&kubecontainer.ContainerID{}).String()
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
pod := podWithUidNameNs("12345678", "foo", "new")
containers := []api.Container{{Name: "succeed"}, {Name: "failed"}}
podStatus := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
ContainerStatuses: []*kubecontainer.ContainerStatus{
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 0,
},
{
Name: "failed",
State: kubecontainer.ContainerStateExited,
ExitCode: 1,
},
{
Name: "succeed",
State: kubecontainer.ContainerStateExited,
ExitCode: 2,
},
{
Name: "failed",
State: kubecontainer.ContainerStateExited,
ExitCode: 3,
},
},
}
kubelet.reasonCache.add(pod.UID, "succeed", testErrorReason, "")
kubelet.reasonCache.add(pod.UID, "failed", testErrorReason, "")
for c, test := range []struct {
restartPolicy api.RestartPolicy
expectedState map[string]api.ContainerState
expectedLastTerminationState map[string]api.ContainerState
// Only set expectedInitState when it is different from expectedState
expectedInitState map[string]api.ContainerState
// Only set expectedInitLastTerminationState when it is different from expectedLastTerminationState
expectedInitLastTerminationState map[string]api.ContainerState
}{
{
restartPolicy: api.RestartPolicyNever,
expectedState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
expectedLastTerminationState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 3,
ContainerID: emptyContainerID,
}},
},
},
{
restartPolicy: api.RestartPolicyOnFailure,
expectedState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
},
expectedLastTerminationState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
},
{
restartPolicy: api.RestartPolicyAlways,
expectedState: map[string]api.ContainerState{
"succeed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
"failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
},
expectedLastTerminationState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
// If the init container is terminated with exit code 0, it won't be restarted even when the
// restart policy is RestartAlways.
expectedInitState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
ContainerID: emptyContainerID,
}},
"failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
},
expectedInitLastTerminationState: map[string]api.ContainerState{
"succeed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 2,
ContainerID: emptyContainerID,
}},
"failed": {Terminated: &api.ContainerStateTerminated{
ExitCode: 1,
ContainerID: emptyContainerID,
}},
},
},
} {
pod.Spec.RestartPolicy = test.restartPolicy
// Test normal containers
pod.Spec.Containers = containers
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
expectedState, expectedLastTerminationState := test.expectedState, test.expectedLastTerminationState
assert.NoError(t, verifyContainerStatuses(apiStatus.ContainerStatuses, expectedState, expectedLastTerminationState), "case %d", c)
pod.Spec.Containers = nil
// Test init containers
pod.Spec.InitContainers = containers
apiStatus = kubelet.generateAPIPodStatus(pod, podStatus)
if test.expectedInitState != nil {
expectedState = test.expectedInitState
}
if test.expectedInitLastTerminationState != nil {
expectedLastTerminationState = test.expectedInitLastTerminationState
}
assert.NoError(t, verifyContainerStatuses(apiStatus.InitContainerStatuses, expectedState, expectedLastTerminationState), "case %d", c)
pod.Spec.InitContainers = nil
2016-02-14 09:05:12 +00:00
}
}
// testPodAdmitHandler is a lifecycle.PodAdmitHandler for testing.
type testPodAdmitHandler struct {
// list of pods to reject.
podsToReject []*api.Pod
}
// Admit rejects all pods in the podsToReject list with a matching UID.
func (a *testPodAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
for _, podToReject := range a.podsToReject {
if podToReject.UID == attrs.Pod.UID {
return lifecycle.PodAdmitResult{Admit: false, Reason: "Rejected", Message: "Pod is rejected"}
}
}
return lifecycle.PodAdmitResult{Admit: true}
}
// Test verifies that the kubelet invokes an admission handler during HandlePodAdditions.
func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kl := testKubelet.kubelet
kl.nodeLister = testNodeLister{nodes: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "123456789",
Name: "podA",
Namespace: "foo",
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "987654321",
Name: "podB",
Namespace: "foo",
},
},
}
podToReject := pods[0]
podToAdmit := pods[1]
podsToReject := []*api.Pod{podToReject}
kl.AddPodAdmitHandler(&testPodAdmitHandler{podsToReject: podsToReject})
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
// podToReject should be Failed
status, found := kl.statusManager.GetPodStatus(podToReject.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", podToReject.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
// podToAdmit should be Pending
status, found = kl.statusManager.GetPodStatus(podToAdmit.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", podToAdmit.UID)
}
if status.Phase != api.PodPending {
t.Fatalf("expected pod status %q. Got %q.", api.PodPending, status.Phase)
}
}
// testPodSyncLoopHandler is a lifecycle.PodSyncLoopHandler that is used for testing.
type testPodSyncLoopHandler struct {
// list of pods to sync
podsToSync []*api.Pod
}
// ShouldSync evaluates if the pod should be synced from the kubelet.
func (a *testPodSyncLoopHandler) ShouldSync(pod *api.Pod) bool {
for _, podToSync := range a.podsToSync {
if podToSync.UID == pod.UID {
return true
}
}
return false
}
// TestGetPodsToSyncInvokesPodSyncLoopHandlers ensures that the get pods to sync routine invokes the handler.
func TestGetPodsToSyncInvokesPodSyncLoopHandlers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pods := newTestPods(5)
podUIDs := []types.UID{}
for _, pod := range pods {
podUIDs = append(podUIDs, pod.UID)
}
podsToSync := []*api.Pod{pods[0]}
kubelet.AddPodSyncLoopHandler(&testPodSyncLoopHandler{podsToSync})
kubelet.podManager.SetPods(pods)
expectedPodsUID := []types.UID{pods[0].UID}
podsToSync = kubelet.getPodsToSync()
if len(podsToSync) == len(expectedPodsUID) {
var rightNum int
for _, podUID := range expectedPodsUID {
for _, podToSync := range podsToSync {
if podToSync.UID == podUID {
rightNum++
break
}
}
}
if rightNum != len(expectedPodsUID) {
// Just for report error
podsToSyncUID := []types.UID{}
for _, podToSync := range podsToSync {
podsToSyncUID = append(podsToSyncUID, podToSync.UID)
}
t.Errorf("expected pods %v to sync, got %v", expectedPodsUID, podsToSyncUID)
}
} else {
t.Errorf("expected %d pods to sync, got %d", 3, len(podsToSync))
}
}
// testPodSyncHandler is a lifecycle.PodSyncHandler that is used for testing.
type testPodSyncHandler struct {
// list of pods to evict.
podsToEvict []*api.Pod
// the reason for the eviction
reason string
// the mesage for the eviction
message string
}
// ShouldEvict evaluates if the pod should be evicted from the kubelet.
func (a *testPodSyncHandler) ShouldEvict(pod *api.Pod) lifecycle.ShouldEvictResponse {
for _, podToEvict := range a.podsToEvict {
if podToEvict.UID == pod.UID {
return lifecycle.ShouldEvictResponse{Evict: true, Reason: a.reason, Message: a.message}
}
}
return lifecycle.ShouldEvictResponse{Evict: false}
}
// TestGenerateAPIPodStatusInvokesPodSyncHandlers invokes the handlers and reports the proper status
func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pod := newTestPods(1)[0]
podsToEvict := []*api.Pod{pod}
kubelet.AddPodSyncHandler(&testPodSyncHandler{podsToEvict, "Evicted", "because"})
status := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
}
apiStatus := kubelet.generateAPIPodStatus(pod, status)
if apiStatus.Phase != api.PodFailed {
t.Fatalf("Expected phase %v, but got %v", api.PodFailed, apiStatus.Phase)
}
if apiStatus.Reason != "Evicted" {
t.Fatalf("Expected reason %v, but got %v", "Evicted", apiStatus.Reason)
}
if apiStatus.Message != "because" {
t.Fatalf("Expected message %v, but got %v", "because", apiStatus.Message)
}
}
2016-05-06 18:07:24 +00:00
func TestSyncPodKillPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
2016-05-06 18:07:24 +00:00
kl := testKubelet.kubelet
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "foo",
},
}
pods := []*api.Pod{pod}
kl.podManager.SetPods(pods)
gracePeriodOverride := int64(0)
err := kl.syncPod(syncPodOptions{
pod: pod,
podStatus: &kubecontainer.PodStatus{},
updateType: kubetypes.SyncPodKill,
killPodOptions: &KillPodOptions{
PodStatusFunc: func(p *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus {
return api.PodStatus{
Phase: api.PodFailed,
Reason: "reason",
Message: "message",
}
},
PodTerminationGracePeriodSecondsOverride: &gracePeriodOverride,
},
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// Check pod status stored in the status map.
status, found := kl.statusManager.GetPodStatus(pod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", pod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
}
func waitForVolumeUnmount(
volumeManager kubeletvolume.VolumeManager,
pod *api.Pod) error {
var podVolumes kubecontainer.VolumeMap
err := retryWithExponentialBackOff(
time.Duration(50*time.Millisecond),
func() (bool, error) {
// Verify volumes detached
podVolumes = volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
if len(podVolumes) != 0 {
return false, nil
}
return true, nil
},
)
if err != nil {
return fmt.Errorf(
"Expected volumes to be unmounted. But some volumes are still mounted: %#v", podVolumes)
}
return nil
}
func waitForVolumeDetach(
volumeManager kubeletvolume.VolumeManager) error {
attachedVolumes := []api.UniqueVolumeName{}
err := retryWithExponentialBackOff(
time.Duration(50*time.Millisecond),
func() (bool, error) {
// Verify volumes detached
attachedVolumes = volumeManager.GetVolumesInUse()
if len(attachedVolumes) != 0 {
return false, nil
}
return true, nil
},
)
if err != nil {
return fmt.Errorf(
"Expected volumes to be detached. But some volumes are still attached: %#v", attachedVolumes)
}
return nil
}
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
backoff := wait.Backoff{
Duration: initialDuration,
Factor: 3,
Jitter: 0,
Steps: 6,
}
return wait.ExponentialBackoff(backoff, fn)
}