2014-06-06 23:40:48 +00:00
|
|
|
/*
|
2016-06-03 00:25:58 +00:00
|
|
|
Copyright 2014 The Kubernetes Authors.
|
2014-06-06 23:40:48 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
2014-06-23 18:32:11 +00:00
|
|
|
|
2014-06-06 23:40:48 +00:00
|
|
|
package kubelet
|
|
|
|
|
|
|
|
import (
|
2015-01-08 20:41:38 +00:00
|
|
|
"bytes"
|
2016-08-17 01:41:26 +00:00
|
|
|
"errors"
|
2014-06-06 23:40:48 +00:00
|
|
|
"fmt"
|
2015-01-08 20:41:38 +00:00
|
|
|
"io"
|
2014-11-23 15:47:25 +00:00
|
|
|
"io/ioutil"
|
2015-07-28 18:54:32 +00:00
|
|
|
"net"
|
2014-11-29 19:02:28 +00:00
|
|
|
"os"
|
2015-03-26 22:14:02 +00:00
|
|
|
"sort"
|
2014-06-06 23:40:48 +00:00
|
|
|
"testing"
|
2014-08-08 04:49:17 +00:00
|
|
|
"time"
|
2014-06-06 23:40:48 +00:00
|
|
|
|
2015-10-16 03:00:28 +00:00
|
|
|
cadvisorapi "github.com/google/cadvisor/info/v1"
|
|
|
|
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
2016-05-19 23:57:36 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2016-09-01 19:25:01 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api"
|
|
|
|
"k8s.io/kubernetes/pkg/api/resource"
|
|
|
|
"k8s.io/kubernetes/pkg/api/testapi"
|
2015-09-17 22:21:55 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
2016-03-31 22:20:04 +00:00
|
|
|
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/capabilities"
|
2016-02-16 22:16:45 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
2015-09-03 21:40:58 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/record"
|
2016-02-01 22:30:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/testing/core"
|
2016-02-25 23:40:44 +00:00
|
|
|
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
2015-10-10 00:09:53 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/cm"
|
2016-04-26 17:58:12 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/config"
|
2015-08-05 22:03:47 +00:00
|
|
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
2016-02-25 23:40:44 +00:00
|
|
|
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
2016-05-13 03:35:18 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
2016-08-04 19:26:06 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/images"
|
2016-04-15 18:17:17 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/network"
|
2016-02-25 23:40:44 +00:00
|
|
|
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
|
2015-08-07 21:42:21 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/pleg"
|
2015-10-12 23:28:23 +00:00
|
|
|
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
2016-02-25 23:40:44 +00:00
|
|
|
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
2015-10-19 22:15:59 +00:00
|
|
|
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
2016-02-25 23:40:44 +00:00
|
|
|
probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
|
2016-05-13 03:35:18 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
2015-09-11 19:22:01 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/status"
|
2015-10-09 17:24:31 +00:00
|
|
|
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
2015-09-02 17:18:11 +00:00
|
|
|
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
2016-07-02 01:50:25 +00:00
|
|
|
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
2015-09-21 17:58:54 +00:00
|
|
|
"k8s.io/kubernetes/pkg/runtime"
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/types"
|
2016-05-26 03:08:56 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/clock"
|
2016-03-09 02:58:24 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
2016-02-18 20:38:02 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/mount"
|
2016-01-15 07:32:10 +00:00
|
|
|
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
2015-10-06 01:20:57 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/sets"
|
2016-04-18 16:54:44 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/term"
|
2016-02-02 10:57:06 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/wait"
|
2015-08-05 22:03:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/volume"
|
|
|
|
_ "k8s.io/kubernetes/pkg/volume/host_path"
|
2016-02-29 21:31:42 +00:00
|
|
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
2016-05-30 02:22:22 +00:00
|
|
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
2014-06-06 23:40:48 +00:00
|
|
|
)
|
|
|
|
|
2014-12-10 01:53:29 +00:00
|
|
|
func init() {
|
2016-01-15 07:32:10 +00:00
|
|
|
utilruntime.ReallyCrash = true
|
2014-12-10 01:53:29 +00:00
|
|
|
}
|
|
|
|
|
2016-05-11 11:59:54 +00:00
|
|
|
const (
|
|
|
|
testKubeletHostname = "127.0.0.1"
|
2015-04-23 20:57:30 +00:00
|
|
|
|
2016-05-11 11:59:54 +00:00
|
|
|
testReservationCPU = "200m"
|
|
|
|
testReservationMemory = "100M"
|
|
|
|
|
|
|
|
maxImageTagsForTest = 3
|
|
|
|
|
|
|
|
// TODO(harry) any global place for these two?
|
|
|
|
// Reasonable size range of all container images. 90%ile of images on dockerhub drops into this range.
|
|
|
|
minImgSize int64 = 23 * 1024 * 1024
|
|
|
|
maxImgSize int64 = 1000 * 1024 * 1024
|
|
|
|
)
|
2015-12-12 01:51:39 +00:00
|
|
|
|
2015-06-05 22:44:16 +00:00
|
|
|
type TestKubelet struct {
|
2015-05-26 23:45:38 +00:00
|
|
|
kubelet *Kubelet
|
2016-02-25 23:40:44 +00:00
|
|
|
fakeRuntime *containertest.FakeRuntime
|
|
|
|
fakeCadvisor *cadvisortest.Mock
|
2016-02-01 22:30:47 +00:00
|
|
|
fakeKubeClient *fake.Clientset
|
2016-02-25 23:40:44 +00:00
|
|
|
fakeMirrorClient *podtest.FakeMirrorClient
|
2016-05-26 03:08:56 +00:00
|
|
|
fakeClock *clock.FakeClock
|
2016-02-18 20:38:02 +00:00
|
|
|
mounter mount.Interface
|
2016-05-30 02:22:22 +00:00
|
|
|
volumePlugin *volumetest.FakeVolumePlugin
|
2015-05-26 23:45:38 +00:00
|
|
|
}
|
|
|
|
|
2016-05-11 11:59:54 +00:00
|
|
|
// newTestKubelet returns test kubelet with two images.
|
2016-05-30 02:22:22 +00:00
|
|
|
func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubelet {
|
2016-05-11 11:59:54 +00:00
|
|
|
imageList := []kubecontainer.Image{
|
2015-12-02 08:53:56 +00:00
|
|
|
{
|
|
|
|
ID: "abc",
|
|
|
|
RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
|
|
|
|
Size: 123,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ID: "efg",
|
|
|
|
RepoTags: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
|
|
|
|
Size: 456,
|
|
|
|
},
|
|
|
|
}
|
2016-05-30 02:22:22 +00:00
|
|
|
return newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled)
|
2016-05-11 11:59:54 +00:00
|
|
|
}
|
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
func newTestKubeletWithImageList(
|
|
|
|
t *testing.T,
|
|
|
|
imageList []kubecontainer.Image,
|
|
|
|
controllerAttachDetachEnabled bool) *TestKubelet {
|
2016-05-11 11:59:54 +00:00
|
|
|
fakeRuntime := &containertest.FakeRuntime{}
|
|
|
|
fakeRuntime.RuntimeType = "test"
|
|
|
|
fakeRuntime.VersionInfo = "1.5.0"
|
|
|
|
fakeRuntime.ImageList = imageList
|
2015-05-26 23:45:38 +00:00
|
|
|
fakeRecorder := &record.FakeRecorder{}
|
2016-02-01 22:30:47 +00:00
|
|
|
fakeKubeClient := &fake.Clientset{}
|
2015-05-26 23:45:38 +00:00
|
|
|
kubelet := &Kubelet{}
|
2016-04-22 21:30:35 +00:00
|
|
|
kubelet.recorder = fakeRecorder
|
2015-05-26 23:45:38 +00:00
|
|
|
kubelet.kubeClient = fakeKubeClient
|
2016-04-21 01:21:41 +00:00
|
|
|
kubelet.os = &containertest.FakeOS{}
|
2015-05-26 23:45:38 +00:00
|
|
|
|
|
|
|
kubelet.hostname = testKubeletHostname
|
2015-06-18 19:22:51 +00:00
|
|
|
kubelet.nodeName = testKubeletHostname
|
2016-04-22 22:23:03 +00:00
|
|
|
kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
|
|
|
|
kubelet.runtimeState.setNetworkState(nil)
|
2016-08-10 16:09:43 +00:00
|
|
|
kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kubelet.nonMasqueradeCIDR, 1440)
|
2015-05-26 23:45:38 +00:00
|
|
|
if tempDir, err := ioutil.TempDir("/tmp", "kubelet_test."); err != nil {
|
|
|
|
t.Fatalf("can't make a temp rootdir: %v", err)
|
|
|
|
} else {
|
|
|
|
kubelet.rootDirectory = tempDir
|
|
|
|
}
|
|
|
|
if err := os.MkdirAll(kubelet.rootDirectory, 0750); err != nil {
|
|
|
|
t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err)
|
|
|
|
}
|
2016-04-26 17:58:12 +00:00
|
|
|
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true })
|
2015-05-26 23:45:38 +00:00
|
|
|
kubelet.masterServiceNamespace = api.NamespaceDefault
|
|
|
|
kubelet.serviceLister = testServiceLister{}
|
2015-09-21 17:58:54 +00:00
|
|
|
kubelet.nodeLister = testNodeLister{}
|
2015-11-26 08:57:26 +00:00
|
|
|
kubelet.nodeInfo = testNodeInfo{}
|
2015-05-26 23:45:38 +00:00
|
|
|
kubelet.recorder = fakeRecorder
|
|
|
|
if err := kubelet.setupDataDirs(); err != nil {
|
|
|
|
t.Fatalf("can't initialize kubelet data dirs: %v", err)
|
|
|
|
}
|
2015-09-21 17:58:54 +00:00
|
|
|
kubelet.daemonEndpoints = &api.NodeDaemonEndpoints{}
|
2016-06-16 17:43:32 +00:00
|
|
|
|
2016-02-25 23:40:44 +00:00
|
|
|
mockCadvisor := &cadvisortest.Mock{}
|
2015-05-26 23:45:38 +00:00
|
|
|
kubelet.cadvisor = mockCadvisor
|
2016-06-16 17:43:32 +00:00
|
|
|
|
2016-02-25 23:40:44 +00:00
|
|
|
fakeMirrorClient := podtest.NewFakeMirrorClient()
|
2015-10-12 23:28:23 +00:00
|
|
|
kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient)
|
2015-10-29 21:04:00 +00:00
|
|
|
kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager)
|
2015-05-26 23:45:38 +00:00
|
|
|
kubelet.containerRefManager = kubecontainer.NewRefManager()
|
|
|
|
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("can't initialize disk space manager: %v", err)
|
|
|
|
}
|
|
|
|
kubelet.diskSpaceManager = diskSpaceManager
|
|
|
|
|
|
|
|
kubelet.containerRuntime = fakeRuntime
|
2016-02-25 23:40:44 +00:00
|
|
|
kubelet.runtimeCache = containertest.NewFakeRuntimeCache(kubelet.containerRuntime)
|
2016-01-12 21:28:00 +00:00
|
|
|
kubelet.reasonCache = NewReasonCache()
|
2016-02-25 23:40:44 +00:00
|
|
|
kubelet.podCache = containertest.NewFakeCache(kubelet.containerRuntime)
|
2015-05-26 23:45:38 +00:00
|
|
|
kubelet.podWorkers = &fakePodWorkers{
|
2016-01-20 21:26:02 +00:00
|
|
|
syncPodFn: kubelet.syncPod,
|
|
|
|
cache: kubelet.podCache,
|
|
|
|
t: t,
|
2015-05-26 23:45:38 +00:00
|
|
|
}
|
2015-08-25 17:39:41 +00:00
|
|
|
|
2016-02-25 23:40:44 +00:00
|
|
|
kubelet.probeManager = probetest.FakeManager{}
|
2015-10-19 22:15:59 +00:00
|
|
|
kubelet.livenessManager = proberesults.NewManager()
|
2015-08-25 17:39:41 +00:00
|
|
|
|
2015-09-25 18:00:14 +00:00
|
|
|
kubelet.containerManager = cm.NewStubContainerManager()
|
2015-12-02 08:53:56 +00:00
|
|
|
fakeNodeRef := &api.ObjectReference{
|
|
|
|
Kind: "Node",
|
|
|
|
Name: testKubeletHostname,
|
|
|
|
UID: types.UID(testKubeletHostname),
|
|
|
|
Namespace: "",
|
|
|
|
}
|
2016-08-04 19:26:06 +00:00
|
|
|
fakeImageGCPolicy := images.ImageGCPolicy{
|
2015-12-02 08:53:56 +00:00
|
|
|
HighThresholdPercent: 90,
|
|
|
|
LowThresholdPercent: 80,
|
|
|
|
}
|
2016-08-04 19:26:06 +00:00
|
|
|
kubelet.imageManager, err = images.NewImageGCManager(fakeRuntime, mockCadvisor, fakeRecorder, fakeNodeRef, fakeImageGCPolicy)
|
2016-05-26 03:08:56 +00:00
|
|
|
fakeClock := clock.NewFakeClock(time.Now())
|
2016-03-09 02:58:24 +00:00
|
|
|
kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
|
2015-08-13 12:59:15 +00:00
|
|
|
kubelet.backOff.Clock = fakeClock
|
2016-01-31 23:56:55 +00:00
|
|
|
kubelet.podKillingCh = make(chan *kubecontainer.PodPair, 20)
|
2015-10-27 01:50:57 +00:00
|
|
|
kubelet.resyncInterval = 10 * time.Second
|
2015-12-12 01:51:39 +00:00
|
|
|
kubelet.reservation = kubetypes.Reservation{
|
|
|
|
Kubernetes: api.ResourceList{
|
|
|
|
api.ResourceCPU: resource.MustParse(testReservationCPU),
|
|
|
|
api.ResourceMemory: resource.MustParse(testReservationMemory),
|
|
|
|
},
|
|
|
|
}
|
2016-04-29 01:39:46 +00:00
|
|
|
kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock)
|
2015-08-07 21:42:21 +00:00
|
|
|
// Relist period does not affect the tests.
|
2016-05-26 03:08:56 +00:00
|
|
|
kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, 100, time.Hour, nil, clock.RealClock{})
|
2016-01-04 20:03:28 +00:00
|
|
|
kubelet.clock = fakeClock
|
2016-02-19 01:54:48 +00:00
|
|
|
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
|
2016-05-13 03:35:18 +00:00
|
|
|
|
|
|
|
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
|
|
|
volumeStatsAggPeriod := time.Second * 10
|
|
|
|
kubelet.resourceAnalyzer = stats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod, kubelet.containerRuntime)
|
|
|
|
nodeRef := &api.ObjectReference{
|
|
|
|
Kind: "Node",
|
|
|
|
Name: kubelet.nodeName,
|
|
|
|
UID: types.UID(kubelet.nodeName),
|
|
|
|
Namespace: "",
|
|
|
|
}
|
|
|
|
// setup eviction manager
|
2016-08-03 20:55:52 +00:00
|
|
|
evictionManager, evictionAdmitHandler, err := eviction.NewManager(kubelet.resourceAnalyzer, eviction.Config{}, killPodNow(kubelet.podWorkers), kubelet.imageManager, fakeRecorder, nodeRef, kubelet.clock)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.NoError(t, err, "Failed to initialize eviction manager")
|
|
|
|
|
2016-05-13 03:35:18 +00:00
|
|
|
kubelet.evictionManager = evictionManager
|
|
|
|
kubelet.AddPodAdmitHandler(evictionAdmitHandler)
|
2016-06-15 13:47:49 +00:00
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
|
|
|
|
kubelet.volumePluginMgr, err =
|
|
|
|
NewInitializedVolumePluginMgr(kubelet, []volume.VolumePlugin{plug})
|
2016-09-01 19:25:01 +00:00
|
|
|
require.NoError(t, err, "Failed to initialize VolumePluginMgr")
|
2016-05-30 02:22:22 +00:00
|
|
|
|
2016-07-06 17:42:56 +00:00
|
|
|
kubelet.mounter = &mount.FakeMounter{}
|
2016-05-30 02:22:22 +00:00
|
|
|
kubelet.volumeManager, err = kubeletvolume.NewVolumeManager(
|
|
|
|
controllerAttachDetachEnabled,
|
|
|
|
kubelet.hostname,
|
|
|
|
kubelet.podManager,
|
2016-06-16 06:48:04 +00:00
|
|
|
fakeKubeClient,
|
2016-06-28 13:01:07 +00:00
|
|
|
kubelet.volumePluginMgr,
|
2016-07-06 17:42:56 +00:00
|
|
|
fakeRuntime,
|
2016-06-23 19:46:21 +00:00
|
|
|
kubelet.mounter,
|
2016-06-21 16:13:23 +00:00
|
|
|
kubelet.getPodsDir(),
|
|
|
|
kubelet.recorder)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.NoError(t, err, "Failed to initialize volume manager")
|
2016-05-30 02:22:22 +00:00
|
|
|
|
2016-04-22 21:30:35 +00:00
|
|
|
// enable active deadline handler
|
|
|
|
activeDeadlineHandler, err := newActiveDeadlineHandler(kubelet.statusManager, kubelet.recorder, kubelet.clock)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.NoError(t, err, "Can't initialize active deadline handler")
|
|
|
|
|
2016-04-22 21:30:35 +00:00
|
|
|
kubelet.AddPodSyncLoopHandler(activeDeadlineHandler)
|
|
|
|
kubelet.AddPodSyncHandler(activeDeadlineHandler)
|
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
|
2015-05-26 23:45:38 +00:00
|
|
|
}
|
|
|
|
|
2015-04-24 18:20:23 +00:00
|
|
|
func newTestPods(count int) []*api.Pod {
|
|
|
|
pods := make([]*api.Pod, count)
|
|
|
|
for i := 0; i < count; i++ {
|
|
|
|
pods[i] = &api.Pod{
|
2015-10-21 17:17:27 +00:00
|
|
|
Spec: api.PodSpec{
|
|
|
|
SecurityContext: &api.PodSecurityContext{
|
|
|
|
HostNetwork: true,
|
|
|
|
},
|
|
|
|
},
|
2015-04-24 18:20:23 +00:00
|
|
|
ObjectMeta: api.ObjectMeta{
|
2015-10-29 21:04:00 +00:00
|
|
|
UID: types.UID(10000 + i),
|
2015-04-24 18:20:23 +00:00
|
|
|
Name: fmt.Sprintf("pod%d", i),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return pods
|
|
|
|
}
|
|
|
|
|
2015-10-09 17:24:31 +00:00
|
|
|
var emptyPodUIDs map[types.UID]kubetypes.SyncPodType
|
2015-02-24 23:29:18 +00:00
|
|
|
|
2015-06-17 22:31:46 +00:00
|
|
|
func TestSyncLoopTimeUpdate(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2015-06-17 22:31:46 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
|
|
|
|
loopTime1 := kubelet.LatestLoopEntryTime()
|
2016-09-01 19:25:01 +00:00
|
|
|
require.True(t, loopTime1.IsZero(), "Expect sync loop time to be zero")
|
2015-06-17 22:31:46 +00:00
|
|
|
|
2015-10-27 01:50:57 +00:00
|
|
|
// Start sync ticker.
|
2015-11-03 18:03:39 +00:00
|
|
|
syncCh := make(chan time.Time, 1)
|
|
|
|
housekeepingCh := make(chan time.Time, 1)
|
2015-08-07 21:42:21 +00:00
|
|
|
plegCh := make(chan *pleg.PodLifecycleEvent)
|
2015-11-03 18:03:39 +00:00
|
|
|
syncCh <- time.Now()
|
2015-08-07 21:42:21 +00:00
|
|
|
kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet, syncCh, housekeepingCh, plegCh)
|
2015-06-17 22:31:46 +00:00
|
|
|
loopTime2 := kubelet.LatestLoopEntryTime()
|
2016-09-01 19:25:01 +00:00
|
|
|
require.False(t, loopTime2.IsZero(), "Expect sync loop time to be non-zero")
|
2015-11-03 18:03:39 +00:00
|
|
|
|
|
|
|
syncCh <- time.Now()
|
2015-08-07 21:42:21 +00:00
|
|
|
kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet, syncCh, housekeepingCh, plegCh)
|
2015-06-17 22:31:46 +00:00
|
|
|
loopTime3 := kubelet.LatestLoopEntryTime()
|
2016-09-01 19:25:01 +00:00
|
|
|
require.True(t, loopTime3.After(loopTime1),
|
|
|
|
"Sync Loop Time was not updated correctly. Second update timestamp %v should be greater than first update timestamp %v",
|
|
|
|
loopTime3, loopTime1)
|
2015-06-17 22:31:46 +00:00
|
|
|
}
|
|
|
|
|
2015-08-30 19:47:24 +00:00
|
|
|
func TestSyncLoopAbort(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2015-08-30 19:47:24 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2015-09-24 22:26:25 +00:00
|
|
|
kubelet.runtimeState.setRuntimeSync(time.Now())
|
|
|
|
// The syncLoop waits on time.After(resyncInterval), set it really big so that we don't race for
|
|
|
|
// the channel close
|
|
|
|
kubelet.resyncInterval = time.Second * 30
|
2015-08-30 19:47:24 +00:00
|
|
|
|
2015-10-09 17:24:31 +00:00
|
|
|
ch := make(chan kubetypes.PodUpdate)
|
2015-08-30 19:47:24 +00:00
|
|
|
close(ch)
|
|
|
|
|
|
|
|
// sanity check (also prevent this test from hanging in the next step)
|
2015-08-07 21:42:21 +00:00
|
|
|
ok := kubelet.syncLoopIteration(ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
|
2016-09-01 19:25:01 +00:00
|
|
|
require.False(t, ok, "Expected syncLoopIteration to return !ok since update chan was closed")
|
2015-08-30 19:47:24 +00:00
|
|
|
|
|
|
|
// this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly
|
|
|
|
kubelet.syncLoop(ch, kubelet)
|
|
|
|
}
|
|
|
|
|
2015-05-26 23:45:38 +00:00
|
|
|
func TestSyncPodsStartPod(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-06-16 17:43:32 +00:00
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-05-18 05:05:55 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-02-23 21:04:45 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2015-05-26 23:45:38 +00:00
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
2015-04-03 22:51:50 +00:00
|
|
|
pods := []*api.Pod{
|
2016-04-27 04:53:07 +00:00
|
|
|
podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "bar"},
|
2014-08-08 04:49:17 +00:00
|
|
|
},
|
2016-04-27 04:53:07 +00:00
|
|
|
}),
|
2015-02-09 21:55:36 +00:00
|
|
|
}
|
2015-03-21 00:22:02 +00:00
|
|
|
kubelet.podManager.SetPods(pods)
|
2015-08-19 00:52:26 +00:00
|
|
|
kubelet.HandlePodSyncs(pods)
|
2015-05-26 23:45:38 +00:00
|
|
|
fakeRuntime.AssertStartedPods([]string{string(pods[0].UID)})
|
2014-08-08 04:49:17 +00:00
|
|
|
}
|
|
|
|
|
2015-05-26 23:45:38 +00:00
|
|
|
func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
|
|
|
|
ready := false
|
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-05-26 23:45:38 +00:00
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-05-18 05:05:55 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-02-23 21:04:45 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2016-04-26 17:58:12 +00:00
|
|
|
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return ready })
|
2014-12-17 05:11:27 +00:00
|
|
|
|
2016-06-20 22:14:08 +00:00
|
|
|
fakeRuntime.PodList = []*containertest.FakePod{
|
|
|
|
{Pod: &kubecontainer.Pod{
|
2016-04-27 04:53:07 +00:00
|
|
|
ID: "12345678",
|
|
|
|
Name: "foo",
|
|
|
|
Namespace: "new",
|
2015-06-02 01:14:08 +00:00
|
|
|
Containers: []*kubecontainer.Container{
|
|
|
|
{Name: "bar"},
|
|
|
|
},
|
2016-06-20 22:14:08 +00:00
|
|
|
}},
|
2014-12-17 05:11:27 +00:00
|
|
|
}
|
2015-08-19 00:52:26 +00:00
|
|
|
kubelet.HandlePodCleanups()
|
2015-05-26 23:45:38 +00:00
|
|
|
// Sources are not ready yet. Don't remove any pods.
|
|
|
|
fakeRuntime.AssertKilledPods([]string{})
|
2014-12-17 05:11:27 +00:00
|
|
|
|
|
|
|
ready = true
|
2015-08-19 00:52:26 +00:00
|
|
|
kubelet.HandlePodCleanups()
|
2014-12-17 05:11:27 +00:00
|
|
|
|
2015-05-26 23:45:38 +00:00
|
|
|
// Sources are ready. Remove unwanted pods.
|
|
|
|
fakeRuntime.AssertKilledPods([]string{"12345678"})
|
2015-01-13 05:47:49 +00:00
|
|
|
}
|
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-02-23 21:04:45 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2014-11-23 15:47:25 +00:00
|
|
|
|
2016-04-27 04:53:07 +00:00
|
|
|
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
|
|
|
|
Volumes: []api.Volume{
|
|
|
|
{
|
2016-05-30 02:22:22 +00:00
|
|
|
Name: "vol1",
|
|
|
|
VolumeSource: api.VolumeSource{
|
|
|
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
|
|
|
PDName: "fake-device",
|
|
|
|
},
|
|
|
|
},
|
2014-07-15 01:39:30 +00:00
|
|
|
},
|
|
|
|
},
|
2016-04-27 04:53:07 +00:00
|
|
|
})
|
|
|
|
|
2016-06-23 19:46:21 +00:00
|
|
|
stopCh := runVolumeManager(kubelet)
|
2016-05-30 02:22:22 +00:00
|
|
|
defer func() {
|
|
|
|
close(stopCh)
|
|
|
|
}()
|
|
|
|
|
|
|
|
kubelet.podManager.SetPods([]*api.Pod{pod})
|
|
|
|
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, err)
|
2016-05-30 02:22:22 +00:00
|
|
|
|
|
|
|
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
|
|
|
volumehelper.GetUniquePodName(pod))
|
|
|
|
|
2014-11-23 15:47:25 +00:00
|
|
|
expectedPodVolumes := []string{"vol1"}
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
2014-11-23 15:47:25 +00:00
|
|
|
for _, name := range expectedPodVolumes {
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod)
|
2016-04-29 19:29:59 +00:00
|
|
|
}
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
|
|
|
|
assert.NoError(t, volumetest.VerifyWaitForAttachCallCount(
|
|
|
|
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin))
|
|
|
|
assert.NoError(t, volumetest.VerifyAttachCallCount(
|
|
|
|
1 /* expectedAttachCallCount */, testKubelet.volumePlugin))
|
|
|
|
assert.NoError(t, volumetest.VerifyMountDeviceCallCount(
|
|
|
|
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin))
|
|
|
|
assert.NoError(t, volumetest.VerifySetUpCallCount(
|
|
|
|
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin))
|
2014-07-15 01:39:30 +00:00
|
|
|
}
|
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-02-23 21:04:45 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2014-11-23 15:47:25 +00:00
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
|
|
|
|
Volumes: []api.Volume{
|
|
|
|
{
|
|
|
|
Name: "vol1",
|
|
|
|
VolumeSource: api.VolumeSource{
|
|
|
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
|
|
|
PDName: "fake-device",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
2014-11-23 15:47:25 +00:00
|
|
|
|
2016-06-23 19:46:21 +00:00
|
|
|
stopCh := runVolumeManager(kubelet)
|
2016-05-30 02:22:22 +00:00
|
|
|
defer func() {
|
|
|
|
close(stopCh)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Add pod
|
|
|
|
kubelet.podManager.SetPods([]*api.Pod{pod})
|
|
|
|
|
|
|
|
// Verify volumes attached
|
|
|
|
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, err)
|
2014-11-23 15:47:25 +00:00
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
|
|
|
volumehelper.GetUniquePodName(pod))
|
|
|
|
|
|
|
|
expectedPodVolumes := []string{"vol1"}
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
2016-05-30 02:22:22 +00:00
|
|
|
for _, name := range expectedPodVolumes {
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod)
|
2016-01-11 21:23:28 +00:00
|
|
|
}
|
2014-11-23 15:47:25 +00:00
|
|
|
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
|
|
|
|
assert.NoError(t, volumetest.VerifyWaitForAttachCallCount(
|
|
|
|
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin))
|
|
|
|
assert.NoError(t, volumetest.VerifyAttachCallCount(
|
|
|
|
1 /* expectedAttachCallCount */, testKubelet.volumePlugin))
|
|
|
|
assert.NoError(t, volumetest.VerifyMountDeviceCallCount(
|
|
|
|
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin))
|
|
|
|
assert.NoError(t, volumetest.VerifySetUpCallCount(
|
|
|
|
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin))
|
2016-02-03 09:00:09 +00:00
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
// Remove pod
|
|
|
|
kubelet.podManager.SetPods([]*api.Pod{})
|
|
|
|
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, waitForVolumeUnmount(kubelet.volumeManager, pod))
|
2016-02-03 09:00:09 +00:00
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
// Verify volumes unmounted
|
|
|
|
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
|
|
|
|
volumehelper.GetUniquePodName(pod))
|
|
|
|
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Len(t, podVolumes, 0,
|
|
|
|
"Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
|
2016-05-30 02:22:22 +00:00
|
|
|
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, volumetest.VerifyTearDownCallCount(
|
|
|
|
1 /* expectedTearDownCallCount */, testKubelet.volumePlugin))
|
2016-05-30 02:22:22 +00:00
|
|
|
|
|
|
|
// Verify volumes detached and no longer reported as in use
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, waitForVolumeDetach(api.UniqueVolumeName("fake/vol1"), kubelet.volumeManager))
|
|
|
|
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
|
|
|
|
assert.NoError(t, volumetest.VerifyDetachCallCount(
|
|
|
|
1 /* expectedDetachCallCount */, testKubelet.volumePlugin))
|
2016-05-30 02:22:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */)
|
|
|
|
kubelet := testKubelet.kubelet
|
2016-06-16 06:48:04 +00:00
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
|
|
|
kubeClient.AddReactor("get", "nodes",
|
|
|
|
func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
return true, &api.Node{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
|
|
|
|
Status: api.NodeStatus{
|
|
|
|
VolumesAttached: []api.AttachedVolume{
|
|
|
|
{
|
|
|
|
Name: "fake/vol1",
|
|
|
|
DevicePath: "fake/path",
|
|
|
|
},
|
|
|
|
}},
|
|
|
|
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
|
|
|
|
}, nil
|
|
|
|
})
|
|
|
|
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
|
|
|
|
})
|
2016-05-30 02:22:22 +00:00
|
|
|
|
|
|
|
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
|
2016-04-27 04:53:07 +00:00
|
|
|
Volumes: []api.Volume{
|
|
|
|
{
|
2016-05-30 02:22:22 +00:00
|
|
|
Name: "vol1",
|
2016-04-27 04:53:07 +00:00
|
|
|
VolumeSource: api.VolumeSource{
|
2016-05-30 02:22:22 +00:00
|
|
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
|
|
|
PDName: "fake-device",
|
2016-02-03 09:00:09 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-04-27 04:53:07 +00:00
|
|
|
})
|
2016-02-03 09:00:09 +00:00
|
|
|
|
2016-06-23 19:46:21 +00:00
|
|
|
stopCh := runVolumeManager(kubelet)
|
2016-05-30 02:22:22 +00:00
|
|
|
defer func() {
|
|
|
|
close(stopCh)
|
|
|
|
}()
|
|
|
|
|
|
|
|
kubelet.podManager.SetPods([]*api.Pod{pod})
|
2016-06-27 00:33:01 +00:00
|
|
|
|
|
|
|
// Fake node status update
|
|
|
|
go simulateVolumeInUseUpdate(
|
|
|
|
api.UniqueVolumeName("fake/vol1"),
|
|
|
|
stopCh,
|
|
|
|
kubelet.volumeManager)
|
|
|
|
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod))
|
2016-02-03 09:00:09 +00:00
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
|
|
|
volumehelper.GetUniquePodName(pod))
|
|
|
|
|
|
|
|
expectedPodVolumes := []string{"vol1"}
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
2016-05-30 02:22:22 +00:00
|
|
|
for _, name := range expectedPodVolumes {
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod)
|
2016-04-29 19:29:59 +00:00
|
|
|
}
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
|
|
|
|
assert.NoError(t, volumetest.VerifyWaitForAttachCallCount(
|
|
|
|
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin))
|
|
|
|
assert.NoError(t, volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin))
|
|
|
|
assert.NoError(t, volumetest.VerifyMountDeviceCallCount(
|
|
|
|
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin))
|
|
|
|
assert.NoError(t, volumetest.VerifySetUpCallCount(
|
|
|
|
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin))
|
2016-05-30 02:22:22 +00:00
|
|
|
}
|
2016-04-29 19:29:59 +00:00
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
|
|
|
|
testKubelet := newTestKubelet(t, true /* controllerAttachDetachEnabled */)
|
|
|
|
kubelet := testKubelet.kubelet
|
2016-06-16 06:48:04 +00:00
|
|
|
kubeClient := testKubelet.fakeKubeClient
|
|
|
|
kubeClient.AddReactor("get", "nodes",
|
|
|
|
func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
return true, &api.Node{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
|
|
|
|
Status: api.NodeStatus{
|
|
|
|
VolumesAttached: []api.AttachedVolume{
|
|
|
|
{
|
|
|
|
Name: "fake/vol1",
|
|
|
|
DevicePath: "fake/path",
|
|
|
|
},
|
|
|
|
}},
|
|
|
|
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
|
|
|
|
}, nil
|
|
|
|
})
|
|
|
|
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
|
|
|
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
|
|
|
|
})
|
2016-04-29 19:29:59 +00:00
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
|
|
|
|
Volumes: []api.Volume{
|
|
|
|
{
|
|
|
|
Name: "vol1",
|
|
|
|
VolumeSource: api.VolumeSource{
|
|
|
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
|
|
|
PDName: "fake-device",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
2016-06-23 19:46:21 +00:00
|
|
|
stopCh := runVolumeManager(kubelet)
|
2016-05-30 02:22:22 +00:00
|
|
|
defer func() {
|
|
|
|
close(stopCh)
|
|
|
|
}()
|
2016-04-29 19:29:59 +00:00
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
// Add pod
|
|
|
|
kubelet.podManager.SetPods([]*api.Pod{pod})
|
|
|
|
|
2016-06-27 00:33:01 +00:00
|
|
|
// Fake node status update
|
|
|
|
go simulateVolumeInUseUpdate(
|
|
|
|
api.UniqueVolumeName("fake/vol1"),
|
|
|
|
stopCh,
|
|
|
|
kubelet.volumeManager)
|
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
// Verify volumes attached
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod))
|
2016-04-29 19:29:59 +00:00
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
|
|
|
|
volumehelper.GetUniquePodName(pod))
|
|
|
|
|
|
|
|
expectedPodVolumes := []string{"vol1"}
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
|
2016-05-30 02:22:22 +00:00
|
|
|
for _, name := range expectedPodVolumes {
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod)
|
2016-04-29 19:29:59 +00:00
|
|
|
}
|
2016-05-10 19:36:38 +00:00
|
|
|
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
|
|
|
|
assert.NoError(t, volumetest.VerifyWaitForAttachCallCount(
|
|
|
|
1 /* expectedWaitForAttachCallCount */, testKubelet.volumePlugin))
|
|
|
|
assert.NoError(t, volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin))
|
|
|
|
assert.NoError(t, volumetest.VerifyMountDeviceCallCount(
|
|
|
|
1 /* expectedMountDeviceCallCount */, testKubelet.volumePlugin))
|
|
|
|
assert.NoError(t, volumetest.VerifySetUpCallCount(
|
|
|
|
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin))
|
2016-05-10 19:36:38 +00:00
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
// Remove pod
|
|
|
|
kubelet.podManager.SetPods([]*api.Pod{})
|
|
|
|
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, waitForVolumeUnmount(kubelet.volumeManager, pod))
|
2016-05-10 19:36:38 +00:00
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
// Verify volumes unmounted
|
|
|
|
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
|
|
|
|
volumehelper.GetUniquePodName(pod))
|
|
|
|
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Len(t, podVolumes, 0,
|
|
|
|
"Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
|
2016-05-10 19:36:38 +00:00
|
|
|
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, volumetest.VerifyTearDownCallCount(
|
|
|
|
1 /* expectedTearDownCallCount */, testKubelet.volumePlugin))
|
2016-05-30 02:22:22 +00:00
|
|
|
|
|
|
|
// Verify volumes detached and no longer reported as in use
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, waitForVolumeDetach(api.UniqueVolumeName("fake/vol1"), kubelet.volumeManager))
|
|
|
|
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
|
|
|
|
assert.NoError(t, volumetest.VerifyZeroDetachCallCount(testKubelet.volumePlugin))
|
2016-05-10 19:36:38 +00:00
|
|
|
}
|
|
|
|
|
2014-11-23 15:47:25 +00:00
|
|
|
type stubVolume struct {
|
|
|
|
path string
|
2015-12-04 20:40:01 +00:00
|
|
|
volume.MetricsNil
|
2014-11-23 15:47:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (f *stubVolume) GetPath() string {
|
|
|
|
return f.path
|
|
|
|
}
|
|
|
|
|
2015-10-30 20:25:36 +00:00
|
|
|
func (f *stubVolume) GetAttributes() volume.Attributes {
|
|
|
|
return volume.Attributes{}
|
2015-10-07 19:19:06 +00:00
|
|
|
}
|
|
|
|
|
2015-12-18 15:55:11 +00:00
|
|
|
func (f *stubVolume) SetUp(fsGroup *int64) error {
|
2015-10-07 19:19:06 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-12-18 15:55:11 +00:00
|
|
|
func (f *stubVolume) SetUpAt(dir string, fsGroup *int64) error {
|
2015-10-07 19:19:06 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-05-12 21:49:35 +00:00
|
|
|
func TestMakeVolumeMounts(t *testing.T) {
|
2014-06-09 20:47:25 +00:00
|
|
|
container := api.Container{
|
|
|
|
VolumeMounts: []api.VolumeMount{
|
2014-06-13 01:34:47 +00:00
|
|
|
{
|
2015-10-21 17:17:27 +00:00
|
|
|
MountPath: "/etc/hosts",
|
2014-06-09 20:47:25 +00:00
|
|
|
Name: "disk",
|
|
|
|
ReadOnly: false,
|
|
|
|
},
|
2014-06-19 23:59:48 +00:00
|
|
|
{
|
|
|
|
MountPath: "/mnt/path3",
|
2014-08-27 05:08:06 +00:00
|
|
|
Name: "disk",
|
|
|
|
ReadOnly: true,
|
2014-06-09 20:47:25 +00:00
|
|
|
},
|
2014-07-15 01:39:30 +00:00
|
|
|
{
|
|
|
|
MountPath: "/mnt/path4",
|
|
|
|
Name: "disk4",
|
|
|
|
ReadOnly: false,
|
|
|
|
},
|
2014-07-19 00:13:34 +00:00
|
|
|
{
|
|
|
|
MountPath: "/mnt/path5",
|
|
|
|
Name: "disk5",
|
|
|
|
ReadOnly: false,
|
|
|
|
},
|
2014-06-09 20:47:25 +00:00
|
|
|
},
|
|
|
|
}
|
2014-07-15 01:39:30 +00:00
|
|
|
|
2015-05-06 23:50:57 +00:00
|
|
|
podVolumes := kubecontainer.VolumeMap{
|
2016-03-23 05:12:21 +00:00
|
|
|
"disk": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/disk"}},
|
|
|
|
"disk4": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/host"}},
|
|
|
|
"disk5": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/var/lib/kubelet/podID/volumes/empty/disk5"}},
|
2014-08-27 05:08:06 +00:00
|
|
|
}
|
2014-06-19 23:59:48 +00:00
|
|
|
|
2015-10-21 17:17:27 +00:00
|
|
|
pod := api.Pod{
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
SecurityContext: &api.PodSecurityContext{
|
|
|
|
HostNetwork: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-03-07 20:24:08 +00:00
|
|
|
mounts, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", "", podVolumes)
|
2014-07-15 20:24:41 +00:00
|
|
|
|
2015-05-12 21:49:35 +00:00
|
|
|
expectedMounts := []kubecontainer.Mount{
|
|
|
|
{
|
2016-07-25 21:34:05 +00:00
|
|
|
Name: "disk",
|
|
|
|
ContainerPath: "/etc/hosts",
|
|
|
|
HostPath: "/mnt/disk",
|
|
|
|
ReadOnly: false,
|
|
|
|
SELinuxRelabel: false,
|
2015-05-12 21:49:35 +00:00
|
|
|
},
|
|
|
|
{
|
2016-07-25 21:34:05 +00:00
|
|
|
Name: "disk",
|
|
|
|
ContainerPath: "/mnt/path3",
|
|
|
|
HostPath: "/mnt/disk",
|
|
|
|
ReadOnly: true,
|
|
|
|
SELinuxRelabel: false,
|
2015-05-12 21:49:35 +00:00
|
|
|
},
|
|
|
|
{
|
2016-07-25 21:34:05 +00:00
|
|
|
Name: "disk4",
|
|
|
|
ContainerPath: "/mnt/path4",
|
|
|
|
HostPath: "/mnt/host",
|
|
|
|
ReadOnly: false,
|
|
|
|
SELinuxRelabel: false,
|
2015-05-12 21:49:35 +00:00
|
|
|
},
|
|
|
|
{
|
2016-07-25 21:34:05 +00:00
|
|
|
Name: "disk5",
|
|
|
|
ContainerPath: "/mnt/path5",
|
|
|
|
HostPath: "/var/lib/kubelet/podID/volumes/empty/disk5",
|
|
|
|
ReadOnly: false,
|
|
|
|
SELinuxRelabel: false,
|
2015-05-12 21:49:35 +00:00
|
|
|
},
|
2014-06-09 20:47:25 +00:00
|
|
|
}
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Equal(t, expectedMounts, mounts, "mounts of container %+v", container)
|
2014-06-09 20:47:25 +00:00
|
|
|
}
|
|
|
|
|
2014-08-07 18:15:11 +00:00
|
|
|
type fakeContainerCommandRunner struct {
|
2016-08-17 01:41:26 +00:00
|
|
|
// what was passed in
|
2015-01-08 20:41:38 +00:00
|
|
|
Cmd []string
|
2015-10-07 17:58:05 +00:00
|
|
|
ID kubecontainer.ContainerID
|
2015-06-05 21:10:45 +00:00
|
|
|
PodID types.UID
|
2015-01-08 20:41:38 +00:00
|
|
|
E error
|
|
|
|
Stdin io.Reader
|
|
|
|
Stdout io.WriteCloser
|
|
|
|
Stderr io.WriteCloser
|
|
|
|
TTY bool
|
|
|
|
Port uint16
|
|
|
|
Stream io.ReadWriteCloser
|
2016-08-17 01:41:26 +00:00
|
|
|
|
|
|
|
// what to return
|
|
|
|
StdoutData string
|
|
|
|
StderrData string
|
2014-08-07 18:15:11 +00:00
|
|
|
}
|
|
|
|
|
2016-04-18 16:54:44 +00:00
|
|
|
func (f *fakeContainerCommandRunner) ExecInContainer(id kubecontainer.ContainerID, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan term.Size) error {
|
2016-08-17 01:41:26 +00:00
|
|
|
// record params
|
2015-01-08 20:41:38 +00:00
|
|
|
f.Cmd = cmd
|
|
|
|
f.ID = id
|
|
|
|
f.Stdin = in
|
|
|
|
f.Stdout = out
|
|
|
|
f.Stderr = err
|
|
|
|
f.TTY = tty
|
2016-08-17 01:41:26 +00:00
|
|
|
|
|
|
|
// Copy stdout/stderr data
|
|
|
|
fmt.Fprint(out, f.StdoutData)
|
|
|
|
fmt.Fprint(out, f.StderrData)
|
|
|
|
|
2015-01-08 20:41:38 +00:00
|
|
|
return f.E
|
|
|
|
}
|
|
|
|
|
2015-04-06 23:58:34 +00:00
|
|
|
func (f *fakeContainerCommandRunner) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error {
|
2015-06-05 21:10:45 +00:00
|
|
|
f.PodID = pod.ID
|
2015-01-08 20:41:38 +00:00
|
|
|
f.Port = port
|
|
|
|
f.Stream = stream
|
|
|
|
return nil
|
|
|
|
}
|
2015-05-11 22:32:51 +00:00
|
|
|
|
2014-08-07 18:15:11 +00:00
|
|
|
func TestRunInContainerNoSuchPod(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-02-23 21:04:45 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2015-05-27 17:32:53 +00:00
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
2016-06-20 22:14:08 +00:00
|
|
|
fakeRuntime.PodList = []*containertest.FakePod{}
|
2014-08-07 18:15:11 +00:00
|
|
|
|
|
|
|
podName := "podFoo"
|
2015-03-11 23:40:20 +00:00
|
|
|
podNamespace := "nsFoo"
|
2014-08-07 18:15:11 +00:00
|
|
|
containerName := "containerFoo"
|
|
|
|
output, err := kubelet.RunInContainer(
|
2015-03-23 17:14:30 +00:00
|
|
|
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
|
2014-09-05 09:49:11 +00:00
|
|
|
"",
|
2014-08-07 18:15:11 +00:00
|
|
|
containerName,
|
|
|
|
[]string{"ls"})
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Error(t, err)
|
|
|
|
assert.Nil(t, output, "output should be nil")
|
2014-08-07 18:15:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestRunInContainer(t *testing.T) {
|
2016-08-17 01:41:26 +00:00
|
|
|
for _, testError := range []error{nil, errors.New("foo")} {
|
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
|
|
|
fakeCommandRunner := fakeContainerCommandRunner{
|
|
|
|
E: testError,
|
|
|
|
StdoutData: "foo",
|
|
|
|
StderrData: "bar",
|
|
|
|
}
|
|
|
|
kubelet.runner = &fakeCommandRunner
|
|
|
|
|
|
|
|
containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
|
|
|
|
fakeRuntime.PodList = []*containertest.FakePod{
|
|
|
|
{Pod: &kubecontainer.Pod{
|
|
|
|
ID: "12345678",
|
|
|
|
Name: "podFoo",
|
|
|
|
Namespace: "nsFoo",
|
|
|
|
Containers: []*kubecontainer.Container{
|
|
|
|
{Name: "containerFoo",
|
|
|
|
ID: containerID,
|
|
|
|
},
|
2015-06-02 01:14:08 +00:00
|
|
|
},
|
2016-08-17 01:41:26 +00:00
|
|
|
}},
|
|
|
|
}
|
|
|
|
cmd := []string{"ls"}
|
|
|
|
actualOutput, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd)
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Equal(t, containerID, fakeCommandRunner.ID, "(testError=%v) ID", testError)
|
|
|
|
assert.Equal(t, cmd, fakeCommandRunner.Cmd, "(testError=%v) command", testError)
|
2016-08-17 01:41:26 +00:00
|
|
|
// this isn't 100% foolproof as a bug in a real ContainerCommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Equal(t, "foobar", string(actualOutput), "(testError=%v) output", testError)
|
|
|
|
assert.Equal(t, fmt.Sprintf("%s", err), fmt.Sprintf("%s", testError), "(testError=%v) err", testError)
|
2014-08-07 18:15:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-28 18:54:32 +00:00
|
|
|
func TestDNSConfigurationParams(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-07-28 18:54:32 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
|
|
|
|
clusterNS := "203.0.113.1"
|
|
|
|
kubelet.clusterDomain = "kubernetes.io"
|
|
|
|
kubelet.clusterDNS = net.ParseIP(clusterNS)
|
|
|
|
|
|
|
|
pods := newTestPods(2)
|
|
|
|
pods[0].Spec.DNSPolicy = api.DNSClusterFirst
|
|
|
|
pods[1].Spec.DNSPolicy = api.DNSDefault
|
|
|
|
|
|
|
|
options := make([]*kubecontainer.RunContainerOptions, 2)
|
|
|
|
for i, pod := range pods {
|
|
|
|
var err error
|
2016-03-07 20:24:08 +00:00
|
|
|
options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "")
|
2015-07-28 18:54:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to generate container options: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(options[0].DNS) != 1 || options[0].DNS[0] != clusterNS {
|
|
|
|
t.Errorf("expected nameserver %s, got %+v", clusterNS, options[0].DNS)
|
|
|
|
}
|
|
|
|
if len(options[0].DNSSearch) == 0 || options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
|
|
|
|
t.Errorf("expected search %s, got %+v", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
|
|
|
|
}
|
|
|
|
if len(options[1].DNS) != 1 || options[1].DNS[0] != "127.0.0.1" {
|
|
|
|
t.Errorf("expected nameserver 127.0.0.1, got %+v", options[1].DNS)
|
|
|
|
}
|
|
|
|
if len(options[1].DNSSearch) != 1 || options[1].DNSSearch[0] != "." {
|
|
|
|
t.Errorf("expected search \".\", got %+v", options[1].DNSSearch)
|
|
|
|
}
|
|
|
|
|
|
|
|
kubelet.resolverConfig = "/etc/resolv.conf"
|
|
|
|
for i, pod := range pods {
|
|
|
|
var err error
|
2016-03-07 20:24:08 +00:00
|
|
|
options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{}, "")
|
2015-07-28 18:54:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to generate container options: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.Logf("nameservers %+v", options[1].DNS)
|
2015-10-14 17:34:29 +00:00
|
|
|
if len(options[0].DNS) != 1 {
|
|
|
|
t.Errorf("expected cluster nameserver only, got %+v", options[0].DNS)
|
2015-07-28 18:54:32 +00:00
|
|
|
} else if options[0].DNS[0] != clusterNS {
|
|
|
|
t.Errorf("expected nameserver %s, got %v", clusterNS, options[0].DNS[0])
|
|
|
|
}
|
|
|
|
if len(options[0].DNSSearch) != len(options[1].DNSSearch)+3 {
|
|
|
|
t.Errorf("expected prepend of cluster domain, got %+v", options[0].DNSSearch)
|
|
|
|
} else if options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
|
|
|
|
t.Errorf("expected domain %s, got %s", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-08 15:25:14 +00:00
|
|
|
type testServiceLister struct {
|
|
|
|
services []api.Service
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ls testServiceLister) List() (api.ServiceList, error) {
|
|
|
|
return api.ServiceList{
|
|
|
|
Items: ls.services,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2015-09-21 17:58:54 +00:00
|
|
|
type testNodeLister struct {
|
|
|
|
nodes []api.Node
|
|
|
|
}
|
|
|
|
|
2015-11-26 08:57:26 +00:00
|
|
|
type testNodeInfo struct {
|
|
|
|
nodes []api.Node
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ls testNodeInfo) GetNodeInfo(id string) (*api.Node, error) {
|
2015-09-21 17:58:54 +00:00
|
|
|
for _, node := range ls.nodes {
|
|
|
|
if node.Name == id {
|
|
|
|
return &node, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("Node with name: %s does not exist", id)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ls testNodeLister) List() (api.NodeList, error) {
|
|
|
|
return api.NodeList{
|
|
|
|
Items: ls.nodes,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2015-05-12 21:49:35 +00:00
|
|
|
type envs []kubecontainer.EnvVar
|
|
|
|
|
|
|
|
func (e envs) Len() int {
|
|
|
|
return len(e)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e envs) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
|
|
|
|
|
|
|
func (e envs) Less(i, j int) bool { return e[i].Name < e[j].Name }
|
|
|
|
|
2016-04-24 22:47:42 +00:00
|
|
|
func buildService(name, namespace, clusterIP, protocol string, port int) api.Service {
|
|
|
|
return api.Service{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace},
|
|
|
|
Spec: api.ServiceSpec{
|
|
|
|
Ports: []api.ServicePort{{
|
|
|
|
Protocol: api.Protocol(protocol),
|
|
|
|
Port: int32(port),
|
|
|
|
}},
|
|
|
|
ClusterIP: clusterIP,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-08 15:25:14 +00:00
|
|
|
func TestMakeEnvironmentVariables(t *testing.T) {
|
|
|
|
services := []api.Service{
|
2016-04-24 22:47:42 +00:00
|
|
|
buildService("kubernetes", api.NamespaceDefault, "1.2.3.1", "TCP", 8081),
|
|
|
|
buildService("test", "test1", "1.2.3.3", "TCP", 8083),
|
|
|
|
buildService("kubernetes", "test2", "1.2.3.4", "TCP", 8084),
|
|
|
|
buildService("test", "test2", "1.2.3.5", "TCP", 8085),
|
|
|
|
buildService("test", "test2", "None", "TCP", 8085),
|
|
|
|
buildService("test", "test2", "", "TCP", 8085),
|
|
|
|
buildService("kubernetes", "kubernetes", "1.2.3.6", "TCP", 8086),
|
|
|
|
buildService("not-special", "kubernetes", "1.2.3.8", "TCP", 8088),
|
|
|
|
buildService("not-special", "kubernetes", "None", "TCP", 8088),
|
|
|
|
buildService("not-special", "kubernetes", "", "TCP", 8088),
|
2015-01-08 15:25:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []struct {
|
2015-05-12 21:49:35 +00:00
|
|
|
name string // the name of the test case
|
|
|
|
ns string // the namespace to generate environment for
|
|
|
|
container *api.Container // the container to use
|
|
|
|
masterServiceNs string // the namespace to read master service info from
|
|
|
|
nilLister bool // whether the lister should be nil
|
|
|
|
expectedEnvs []kubecontainer.EnvVar // a set of expected environment vars
|
2015-01-08 15:25:14 +00:00
|
|
|
}{
|
|
|
|
{
|
2015-05-12 21:14:32 +00:00
|
|
|
name: "api server = Y, kubelet = Y",
|
|
|
|
ns: "test1",
|
|
|
|
container: &api.Container{
|
2015-01-08 15:25:14 +00:00
|
|
|
Env: []api.EnvVar{
|
|
|
|
{Name: "FOO", Value: "BAR"},
|
|
|
|
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
|
|
|
|
{Name: "TEST_SERVICE_PORT", Value: "8083"},
|
|
|
|
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
|
|
|
|
},
|
|
|
|
},
|
2015-05-12 21:14:32 +00:00
|
|
|
masterServiceNs: api.NamespaceDefault,
|
|
|
|
nilLister: false,
|
2015-05-12 21:49:35 +00:00
|
|
|
expectedEnvs: []kubecontainer.EnvVar{
|
|
|
|
{Name: "FOO", Value: "BAR"},
|
|
|
|
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
|
|
|
|
{Name: "TEST_SERVICE_PORT", Value: "8083"},
|
|
|
|
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
|
|
|
|
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
|
|
|
|
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
|
|
|
|
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
|
|
|
|
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
|
|
|
|
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
|
|
|
|
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
|
|
|
|
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
|
|
|
|
},
|
2015-01-08 15:25:14 +00:00
|
|
|
},
|
|
|
|
{
|
2015-05-12 21:14:32 +00:00
|
|
|
name: "api server = Y, kubelet = N",
|
|
|
|
ns: "test1",
|
|
|
|
container: &api.Container{
|
2015-01-08 15:25:14 +00:00
|
|
|
Env: []api.EnvVar{
|
|
|
|
{Name: "FOO", Value: "BAR"},
|
|
|
|
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
|
|
|
|
{Name: "TEST_SERVICE_PORT", Value: "8083"},
|
|
|
|
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
|
|
|
|
},
|
|
|
|
},
|
2015-05-12 21:14:32 +00:00
|
|
|
masterServiceNs: api.NamespaceDefault,
|
|
|
|
nilLister: true,
|
2015-05-12 21:49:35 +00:00
|
|
|
expectedEnvs: []kubecontainer.EnvVar{
|
|
|
|
{Name: "FOO", Value: "BAR"},
|
|
|
|
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
|
|
|
|
{Name: "TEST_SERVICE_PORT", Value: "8083"},
|
|
|
|
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
|
|
|
|
},
|
2015-01-08 15:25:14 +00:00
|
|
|
},
|
|
|
|
{
|
2015-05-12 21:14:32 +00:00
|
|
|
name: "api server = N; kubelet = Y",
|
|
|
|
ns: "test1",
|
|
|
|
container: &api.Container{
|
2015-01-08 15:25:14 +00:00
|
|
|
Env: []api.EnvVar{
|
|
|
|
{Name: "FOO", Value: "BAZ"},
|
|
|
|
},
|
|
|
|
},
|
2015-05-12 21:14:32 +00:00
|
|
|
masterServiceNs: api.NamespaceDefault,
|
|
|
|
nilLister: false,
|
2015-05-12 21:49:35 +00:00
|
|
|
expectedEnvs: []kubecontainer.EnvVar{
|
|
|
|
{Name: "FOO", Value: "BAZ"},
|
|
|
|
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
|
|
|
|
{Name: "TEST_SERVICE_PORT", Value: "8083"},
|
|
|
|
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
|
|
|
|
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
|
|
|
|
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
|
|
|
|
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
|
|
|
|
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
|
|
|
|
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
|
|
|
|
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
|
|
|
|
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
|
|
|
|
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
|
|
|
|
},
|
2015-01-08 15:25:14 +00:00
|
|
|
},
|
|
|
|
{
|
2015-05-12 21:14:32 +00:00
|
|
|
name: "master service in pod ns",
|
|
|
|
ns: "test2",
|
|
|
|
container: &api.Container{
|
2015-01-08 15:25:14 +00:00
|
|
|
Env: []api.EnvVar{
|
|
|
|
{Name: "FOO", Value: "ZAP"},
|
|
|
|
},
|
|
|
|
},
|
2015-05-12 21:14:32 +00:00
|
|
|
masterServiceNs: "kubernetes",
|
|
|
|
nilLister: false,
|
2015-05-12 21:49:35 +00:00
|
|
|
expectedEnvs: []kubecontainer.EnvVar{
|
|
|
|
{Name: "FOO", Value: "ZAP"},
|
|
|
|
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.5"},
|
|
|
|
{Name: "TEST_SERVICE_PORT", Value: "8085"},
|
|
|
|
{Name: "TEST_PORT", Value: "tcp://1.2.3.5:8085"},
|
|
|
|
{Name: "TEST_PORT_8085_TCP", Value: "tcp://1.2.3.5:8085"},
|
|
|
|
{Name: "TEST_PORT_8085_TCP_PROTO", Value: "tcp"},
|
|
|
|
{Name: "TEST_PORT_8085_TCP_PORT", Value: "8085"},
|
|
|
|
{Name: "TEST_PORT_8085_TCP_ADDR", Value: "1.2.3.5"},
|
|
|
|
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.4"},
|
|
|
|
{Name: "KUBERNETES_SERVICE_PORT", Value: "8084"},
|
|
|
|
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.4:8084"},
|
|
|
|
{Name: "KUBERNETES_PORT_8084_TCP", Value: "tcp://1.2.3.4:8084"},
|
|
|
|
{Name: "KUBERNETES_PORT_8084_TCP_PROTO", Value: "tcp"},
|
|
|
|
{Name: "KUBERNETES_PORT_8084_TCP_PORT", Value: "8084"},
|
|
|
|
{Name: "KUBERNETES_PORT_8084_TCP_ADDR", Value: "1.2.3.4"},
|
|
|
|
},
|
2015-01-08 15:25:14 +00:00
|
|
|
},
|
|
|
|
{
|
2015-05-12 21:14:32 +00:00
|
|
|
name: "pod in master service ns",
|
|
|
|
ns: "kubernetes",
|
|
|
|
container: &api.Container{},
|
|
|
|
masterServiceNs: "kubernetes",
|
|
|
|
nilLister: false,
|
2015-05-12 21:49:35 +00:00
|
|
|
expectedEnvs: []kubecontainer.EnvVar{
|
|
|
|
{Name: "NOT_SPECIAL_SERVICE_HOST", Value: "1.2.3.8"},
|
|
|
|
{Name: "NOT_SPECIAL_SERVICE_PORT", Value: "8088"},
|
|
|
|
{Name: "NOT_SPECIAL_PORT", Value: "tcp://1.2.3.8:8088"},
|
|
|
|
{Name: "NOT_SPECIAL_PORT_8088_TCP", Value: "tcp://1.2.3.8:8088"},
|
|
|
|
{Name: "NOT_SPECIAL_PORT_8088_TCP_PROTO", Value: "tcp"},
|
|
|
|
{Name: "NOT_SPECIAL_PORT_8088_TCP_PORT", Value: "8088"},
|
|
|
|
{Name: "NOT_SPECIAL_PORT_8088_TCP_ADDR", Value: "1.2.3.8"},
|
|
|
|
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.6"},
|
|
|
|
{Name: "KUBERNETES_SERVICE_PORT", Value: "8086"},
|
|
|
|
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.6:8086"},
|
|
|
|
{Name: "KUBERNETES_PORT_8086_TCP", Value: "tcp://1.2.3.6:8086"},
|
|
|
|
{Name: "KUBERNETES_PORT_8086_TCP_PROTO", Value: "tcp"},
|
|
|
|
{Name: "KUBERNETES_PORT_8086_TCP_PORT", Value: "8086"},
|
|
|
|
{Name: "KUBERNETES_PORT_8086_TCP_ADDR", Value: "1.2.3.6"},
|
|
|
|
},
|
2015-01-08 15:25:14 +00:00
|
|
|
},
|
2015-04-23 20:57:30 +00:00
|
|
|
{
|
2015-05-12 21:14:32 +00:00
|
|
|
name: "downward api pod",
|
|
|
|
ns: "downward-api",
|
|
|
|
container: &api.Container{
|
2015-04-23 20:57:30 +00:00
|
|
|
Env: []api.EnvVar{
|
|
|
|
{
|
|
|
|
Name: "POD_NAME",
|
|
|
|
ValueFrom: &api.EnvVarSource{
|
2015-05-04 17:31:36 +00:00
|
|
|
FieldRef: &api.ObjectFieldSelector{
|
2015-11-30 20:28:48 +00:00
|
|
|
APIVersion: testapi.Default.GroupVersion().String(),
|
2015-04-23 20:57:30 +00:00
|
|
|
FieldPath: "metadata.name",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "POD_NAMESPACE",
|
|
|
|
ValueFrom: &api.EnvVarSource{
|
2015-05-04 17:31:36 +00:00
|
|
|
FieldRef: &api.ObjectFieldSelector{
|
2015-11-30 20:28:48 +00:00
|
|
|
APIVersion: testapi.Default.GroupVersion().String(),
|
2015-04-23 20:57:30 +00:00
|
|
|
FieldPath: "metadata.namespace",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-06-22 16:27:17 +00:00
|
|
|
{
|
|
|
|
Name: "POD_NODE_NAME",
|
|
|
|
ValueFrom: &api.EnvVarSource{
|
|
|
|
FieldRef: &api.ObjectFieldSelector{
|
|
|
|
APIVersion: testapi.Default.GroupVersion().String(),
|
|
|
|
FieldPath: "spec.nodeName",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "POD_SERVICE_ACCOUNT_NAME",
|
|
|
|
ValueFrom: &api.EnvVarSource{
|
|
|
|
FieldRef: &api.ObjectFieldSelector{
|
|
|
|
APIVersion: testapi.Default.GroupVersion().String(),
|
|
|
|
FieldPath: "spec.serviceAccountName",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-08-12 18:14:49 +00:00
|
|
|
{
|
|
|
|
Name: "POD_IP",
|
|
|
|
ValueFrom: &api.EnvVarSource{
|
|
|
|
FieldRef: &api.ObjectFieldSelector{
|
2015-11-30 20:28:48 +00:00
|
|
|
APIVersion: testapi.Default.GroupVersion().String(),
|
2015-08-12 18:14:49 +00:00
|
|
|
FieldPath: "status.podIP",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-04-23 20:57:30 +00:00
|
|
|
},
|
|
|
|
},
|
2015-05-12 21:14:32 +00:00
|
|
|
masterServiceNs: "nothing",
|
|
|
|
nilLister: true,
|
2015-05-12 21:49:35 +00:00
|
|
|
expectedEnvs: []kubecontainer.EnvVar{
|
|
|
|
{Name: "POD_NAME", Value: "dapi-test-pod-name"},
|
|
|
|
{Name: "POD_NAMESPACE", Value: "downward-api"},
|
2016-06-22 16:27:17 +00:00
|
|
|
{Name: "POD_NODE_NAME", Value: "node-name"},
|
|
|
|
{Name: "POD_SERVICE_ACCOUNT_NAME", Value: "special"},
|
2015-08-12 18:14:49 +00:00
|
|
|
{Name: "POD_IP", Value: "1.2.3.4"},
|
2015-05-12 21:49:35 +00:00
|
|
|
},
|
2015-04-23 20:57:30 +00:00
|
|
|
},
|
2015-05-22 22:21:03 +00:00
|
|
|
{
|
|
|
|
name: "env expansion",
|
|
|
|
ns: "test1",
|
|
|
|
container: &api.Container{
|
|
|
|
Env: []api.EnvVar{
|
|
|
|
{
|
|
|
|
Name: "TEST_LITERAL",
|
|
|
|
Value: "test-test-test",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "POD_NAME",
|
|
|
|
ValueFrom: &api.EnvVarSource{
|
|
|
|
FieldRef: &api.ObjectFieldSelector{
|
2015-11-30 20:28:48 +00:00
|
|
|
APIVersion: testapi.Default.GroupVersion().String(),
|
2015-05-22 22:21:03 +00:00
|
|
|
FieldPath: "metadata.name",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "OUT_OF_ORDER_TEST",
|
|
|
|
Value: "$(OUT_OF_ORDER_TARGET)",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "OUT_OF_ORDER_TARGET",
|
|
|
|
Value: "FOO",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "EMPTY_VAR",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "EMPTY_TEST",
|
|
|
|
Value: "foo-$(EMPTY_VAR)",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "POD_NAME_TEST2",
|
|
|
|
Value: "test2-$(POD_NAME)",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "POD_NAME_TEST3",
|
|
|
|
Value: "$(POD_NAME_TEST2)-3",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "LITERAL_TEST",
|
|
|
|
Value: "literal-$(TEST_LITERAL)",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "SERVICE_VAR_TEST",
|
|
|
|
Value: "$(TEST_SERVICE_HOST):$(TEST_SERVICE_PORT)",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "TEST_UNDEFINED",
|
|
|
|
Value: "$(UNDEFINED_VAR)",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
masterServiceNs: "nothing",
|
|
|
|
nilLister: false,
|
|
|
|
expectedEnvs: []kubecontainer.EnvVar{
|
|
|
|
{
|
|
|
|
Name: "TEST_LITERAL",
|
|
|
|
Value: "test-test-test",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "POD_NAME",
|
|
|
|
Value: "dapi-test-pod-name",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "POD_NAME_TEST2",
|
|
|
|
Value: "test2-dapi-test-pod-name",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "POD_NAME_TEST3",
|
|
|
|
Value: "test2-dapi-test-pod-name-3",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "LITERAL_TEST",
|
|
|
|
Value: "literal-test-test-test",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "TEST_SERVICE_HOST",
|
|
|
|
Value: "1.2.3.3",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "TEST_SERVICE_PORT",
|
|
|
|
Value: "8083",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "TEST_PORT",
|
|
|
|
Value: "tcp://1.2.3.3:8083",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "TEST_PORT_8083_TCP",
|
|
|
|
Value: "tcp://1.2.3.3:8083",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "TEST_PORT_8083_TCP_PROTO",
|
|
|
|
Value: "tcp",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "TEST_PORT_8083_TCP_PORT",
|
|
|
|
Value: "8083",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "TEST_PORT_8083_TCP_ADDR",
|
|
|
|
Value: "1.2.3.3",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "SERVICE_VAR_TEST",
|
|
|
|
Value: "1.2.3.3:8083",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "OUT_OF_ORDER_TEST",
|
|
|
|
Value: "$(OUT_OF_ORDER_TARGET)",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "OUT_OF_ORDER_TARGET",
|
|
|
|
Value: "FOO",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "TEST_UNDEFINED",
|
|
|
|
Value: "$(UNDEFINED_VAR)",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "EMPTY_VAR",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "EMPTY_TEST",
|
|
|
|
Value: "foo-",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-01-08 15:25:14 +00:00
|
|
|
}
|
|
|
|
|
2016-09-01 19:25:01 +00:00
|
|
|
for _, tc := range testCases {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-02-23 21:04:45 +00:00
|
|
|
kl := testKubelet.kubelet
|
2015-05-12 21:14:32 +00:00
|
|
|
kl.masterServiceNamespace = tc.masterServiceNs
|
2015-01-08 15:25:14 +00:00
|
|
|
if tc.nilLister {
|
|
|
|
kl.serviceLister = nil
|
|
|
|
} else {
|
|
|
|
kl.serviceLister = testServiceLister{services}
|
|
|
|
}
|
|
|
|
|
2015-04-23 20:57:30 +00:00
|
|
|
testPod := &api.Pod{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
Namespace: tc.ns,
|
|
|
|
Name: "dapi-test-pod-name",
|
|
|
|
},
|
2016-06-22 16:27:17 +00:00
|
|
|
Spec: api.PodSpec{
|
|
|
|
ServiceAccountName: "special",
|
|
|
|
NodeName: "node-name",
|
|
|
|
},
|
2015-04-23 20:57:30 +00:00
|
|
|
}
|
2016-03-07 20:24:08 +00:00
|
|
|
podIP := "1.2.3.4"
|
2015-04-23 20:57:30 +00:00
|
|
|
|
2016-03-07 20:24:08 +00:00
|
|
|
result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP)
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, err, "[%s]", tc.name)
|
2015-01-08 15:25:14 +00:00
|
|
|
|
2015-05-12 21:49:35 +00:00
|
|
|
sort.Sort(envs(result))
|
|
|
|
sort.Sort(envs(tc.expectedEnvs))
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Equal(t, tc.expectedEnvs, result, "[%s] env entries", tc.name)
|
2015-01-08 15:25:14 +00:00
|
|
|
}
|
|
|
|
}
|
2015-01-28 17:56:35 +00:00
|
|
|
|
2015-10-16 22:15:25 +00:00
|
|
|
func waitingState(cName string) api.ContainerStatus {
|
|
|
|
return api.ContainerStatus{
|
|
|
|
Name: cName,
|
|
|
|
State: api.ContainerState{
|
|
|
|
Waiting: &api.ContainerStateWaiting{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func waitingStateWithLastTermination(cName string) api.ContainerStatus {
|
|
|
|
return api.ContainerStatus{
|
|
|
|
Name: cName,
|
|
|
|
State: api.ContainerState{
|
|
|
|
Waiting: &api.ContainerStateWaiting{},
|
|
|
|
},
|
|
|
|
LastTerminationState: api.ContainerState{
|
|
|
|
Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
2015-03-25 11:09:35 +00:00
|
|
|
func runningState(cName string) api.ContainerStatus {
|
|
|
|
return api.ContainerStatus{
|
|
|
|
Name: cName,
|
|
|
|
State: api.ContainerState{
|
|
|
|
Running: &api.ContainerStateRunning{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func stoppedState(cName string) api.ContainerStatus {
|
|
|
|
return api.ContainerStatus{
|
|
|
|
Name: cName,
|
|
|
|
State: api.ContainerState{
|
2015-05-27 22:02:11 +00:00
|
|
|
Terminated: &api.ContainerStateTerminated{},
|
2015-03-25 11:09:35 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func succeededState(cName string) api.ContainerStatus {
|
|
|
|
return api.ContainerStatus{
|
|
|
|
Name: cName,
|
|
|
|
State: api.ContainerState{
|
2015-05-27 22:02:11 +00:00
|
|
|
Terminated: &api.ContainerStateTerminated{
|
2015-03-25 11:09:35 +00:00
|
|
|
ExitCode: 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
func failedState(cName string) api.ContainerStatus {
|
|
|
|
return api.ContainerStatus{
|
|
|
|
Name: cName,
|
|
|
|
State: api.ContainerState{
|
2015-05-27 22:02:11 +00:00
|
|
|
Terminated: &api.ContainerStateTerminated{
|
2015-03-25 11:09:35 +00:00
|
|
|
ExitCode: -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-28 17:56:35 +00:00
|
|
|
func TestPodPhaseWithRestartAlways(t *testing.T) {
|
|
|
|
desiredState := api.PodSpec{
|
2015-05-22 23:40:57 +00:00
|
|
|
NodeName: "machine",
|
2015-01-28 17:56:35 +00:00
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "containerA"},
|
|
|
|
{Name: "containerB"},
|
|
|
|
},
|
2015-03-14 01:38:07 +00:00
|
|
|
RestartPolicy: api.RestartPolicyAlways,
|
2015-01-28 17:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
pod *api.Pod
|
|
|
|
status api.PodPhase
|
|
|
|
test string
|
|
|
|
}{
|
2015-04-02 12:52:03 +00:00
|
|
|
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
|
2015-01-28 17:56:35 +00:00
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
|
|
|
runningState("containerB"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodRunning,
|
|
|
|
"all running",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
stoppedState("containerA"),
|
|
|
|
stoppedState("containerB"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodRunning,
|
|
|
|
"all stopped with restart always",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
|
|
|
stoppedState("containerB"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodRunning,
|
|
|
|
"mixed state #1 with restart always",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodPending,
|
|
|
|
"mixed state #2 with restart always",
|
|
|
|
},
|
2015-10-16 22:15:25 +00:00
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
|
|
|
waitingState("containerB"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodPending,
|
|
|
|
"mixed state #3 with restart always",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
|
|
|
waitingStateWithLastTermination("containerB"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodRunning,
|
|
|
|
"backoff crashloop container with restart always",
|
|
|
|
},
|
2015-01-28 17:56:35 +00:00
|
|
|
}
|
|
|
|
for _, test := range tests {
|
2016-09-01 19:25:01 +00:00
|
|
|
status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses)
|
|
|
|
assert.Equal(t, test.status, status, "[test %s]", test.test)
|
2015-01-28 17:56:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestPodPhaseWithRestartNever(t *testing.T) {
|
|
|
|
desiredState := api.PodSpec{
|
2015-05-22 23:40:57 +00:00
|
|
|
NodeName: "machine",
|
2015-01-28 17:56:35 +00:00
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "containerA"},
|
|
|
|
{Name: "containerB"},
|
|
|
|
},
|
2015-03-14 01:38:07 +00:00
|
|
|
RestartPolicy: api.RestartPolicyNever,
|
2015-01-28 17:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
pod *api.Pod
|
|
|
|
status api.PodPhase
|
|
|
|
test string
|
|
|
|
}{
|
2015-04-02 12:52:03 +00:00
|
|
|
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
|
2015-01-28 17:56:35 +00:00
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
|
|
|
runningState("containerB"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodRunning,
|
|
|
|
"all running with restart never",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
succeededState("containerA"),
|
|
|
|
succeededState("containerB"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodSucceeded,
|
|
|
|
"all succeeded with restart never",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
failedState("containerA"),
|
|
|
|
failedState("containerB"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodFailed,
|
|
|
|
"all failed with restart never",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
|
|
|
succeededState("containerB"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodRunning,
|
|
|
|
"mixed state #1 with restart never",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodPending,
|
|
|
|
"mixed state #2 with restart never",
|
|
|
|
},
|
2015-10-16 22:15:25 +00:00
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
|
|
|
waitingState("containerB"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodPending,
|
|
|
|
"mixed state #3 with restart never",
|
|
|
|
},
|
2015-01-28 17:56:35 +00:00
|
|
|
}
|
|
|
|
for _, test := range tests {
|
2016-09-01 19:25:01 +00:00
|
|
|
status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses)
|
|
|
|
assert.Equal(t, test.status, status, "[test %s]", test.test)
|
2015-01-28 17:56:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestPodPhaseWithRestartOnFailure(t *testing.T) {
|
|
|
|
desiredState := api.PodSpec{
|
2015-05-22 23:40:57 +00:00
|
|
|
NodeName: "machine",
|
2015-01-28 17:56:35 +00:00
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "containerA"},
|
|
|
|
{Name: "containerB"},
|
|
|
|
},
|
2015-03-14 01:38:07 +00:00
|
|
|
RestartPolicy: api.RestartPolicyOnFailure,
|
2015-01-28 17:56:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
pod *api.Pod
|
|
|
|
status api.PodPhase
|
|
|
|
test string
|
|
|
|
}{
|
2015-04-02 12:52:03 +00:00
|
|
|
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
|
2015-01-28 17:56:35 +00:00
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
|
|
|
runningState("containerB"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodRunning,
|
|
|
|
"all running with restart onfailure",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
succeededState("containerA"),
|
|
|
|
succeededState("containerB"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodSucceeded,
|
|
|
|
"all succeeded with restart onfailure",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
failedState("containerA"),
|
|
|
|
failedState("containerB"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodRunning,
|
|
|
|
"all failed with restart never",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
|
|
|
succeededState("containerB"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodRunning,
|
|
|
|
"mixed state #1 with restart onfailure",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
2015-03-25 11:09:35 +00:00
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
2015-01-28 17:56:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodPending,
|
|
|
|
"mixed state #2 with restart onfailure",
|
|
|
|
},
|
2015-10-16 22:15:25 +00:00
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
|
|
|
waitingState("containerB"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodPending,
|
|
|
|
"mixed state #3 with restart onfailure",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
&api.Pod{
|
|
|
|
Spec: desiredState,
|
|
|
|
Status: api.PodStatus{
|
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
runningState("containerA"),
|
|
|
|
waitingStateWithLastTermination("containerB"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
api.PodRunning,
|
|
|
|
"backoff crashloop container with restart onfailure",
|
|
|
|
},
|
2015-01-28 17:56:35 +00:00
|
|
|
}
|
|
|
|
for _, test := range tests {
|
2016-09-01 19:25:01 +00:00
|
|
|
status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses)
|
|
|
|
assert.Equal(t, test.status, status, "[test %s]", test.test)
|
2015-01-28 17:56:35 +00:00
|
|
|
}
|
|
|
|
}
|
2015-02-08 21:22:19 +00:00
|
|
|
|
2015-01-08 20:41:38 +00:00
|
|
|
func TestExecInContainerNoSuchPod(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-02-23 21:04:45 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2015-05-27 17:32:53 +00:00
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
|
|
|
fakeCommandRunner := fakeContainerCommandRunner{}
|
2015-01-08 20:41:38 +00:00
|
|
|
kubelet.runner = &fakeCommandRunner
|
2016-06-20 22:14:08 +00:00
|
|
|
fakeRuntime.PodList = []*containertest.FakePod{}
|
2015-01-08 20:41:38 +00:00
|
|
|
|
|
|
|
podName := "podFoo"
|
2015-03-11 23:40:20 +00:00
|
|
|
podNamespace := "nsFoo"
|
2015-05-27 17:32:53 +00:00
|
|
|
containerID := "containerFoo"
|
2015-01-08 20:41:38 +00:00
|
|
|
err := kubelet.ExecInContainer(
|
2015-03-23 17:14:30 +00:00
|
|
|
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
|
2015-01-08 20:41:38 +00:00
|
|
|
"",
|
2015-05-27 17:32:53 +00:00
|
|
|
containerID,
|
2015-01-08 20:41:38 +00:00
|
|
|
[]string{"ls"},
|
|
|
|
nil,
|
|
|
|
nil,
|
|
|
|
nil,
|
|
|
|
false,
|
2016-04-18 16:54:44 +00:00
|
|
|
nil,
|
2015-01-08 20:41:38 +00:00
|
|
|
)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.True(t, fakeCommandRunner.ID.IsEmpty(), "Unexpected invocation of runner.ExecInContainer")
|
2015-01-08 20:41:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestExecInContainerNoSuchContainer(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-02-23 21:04:45 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2015-05-27 17:32:53 +00:00
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
|
|
|
fakeCommandRunner := fakeContainerCommandRunner{}
|
2015-01-08 20:41:38 +00:00
|
|
|
kubelet.runner = &fakeCommandRunner
|
|
|
|
|
|
|
|
podName := "podFoo"
|
2015-03-11 23:40:20 +00:00
|
|
|
podNamespace := "nsFoo"
|
2015-01-08 20:41:38 +00:00
|
|
|
containerID := "containerFoo"
|
2016-06-20 22:14:08 +00:00
|
|
|
fakeRuntime.PodList = []*containertest.FakePod{
|
|
|
|
{Pod: &kubecontainer.Pod{
|
2015-06-02 01:14:08 +00:00
|
|
|
ID: "12345678",
|
|
|
|
Name: podName,
|
|
|
|
Namespace: podNamespace,
|
|
|
|
Containers: []*kubecontainer.Container{
|
|
|
|
{Name: "bar",
|
2016-03-23 23:45:24 +00:00
|
|
|
ID: kubecontainer.ContainerID{Type: "test", ID: "barID"}},
|
2015-06-02 01:14:08 +00:00
|
|
|
},
|
2016-06-20 22:14:08 +00:00
|
|
|
}},
|
2015-01-08 20:41:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err := kubelet.ExecInContainer(
|
2015-03-23 17:14:30 +00:00
|
|
|
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
|
2015-02-26 20:27:14 +00:00
|
|
|
UID: "12345678",
|
|
|
|
Name: podName,
|
|
|
|
Namespace: podNamespace,
|
2015-01-08 20:41:38 +00:00
|
|
|
}}),
|
|
|
|
"",
|
|
|
|
containerID,
|
|
|
|
[]string{"ls"},
|
|
|
|
nil,
|
|
|
|
nil,
|
|
|
|
nil,
|
|
|
|
false,
|
2016-04-18 16:54:44 +00:00
|
|
|
nil,
|
2015-01-08 20:41:38 +00:00
|
|
|
)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.True(t, fakeCommandRunner.ID.IsEmpty(), "Unexpected invocation of runner.ExecInContainer")
|
2015-01-08 20:41:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type fakeReadWriteCloser struct{}
|
|
|
|
|
|
|
|
func (f *fakeReadWriteCloser) Write(data []byte) (int, error) {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *fakeReadWriteCloser) Read(data []byte) (int, error) {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *fakeReadWriteCloser) Close() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestExecInContainer(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-02-23 21:04:45 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2015-05-27 17:32:53 +00:00
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
|
|
|
fakeCommandRunner := fakeContainerCommandRunner{}
|
2015-01-08 20:41:38 +00:00
|
|
|
kubelet.runner = &fakeCommandRunner
|
|
|
|
|
|
|
|
podName := "podFoo"
|
2015-03-11 23:40:20 +00:00
|
|
|
podNamespace := "nsFoo"
|
2015-01-08 20:41:38 +00:00
|
|
|
containerID := "containerFoo"
|
|
|
|
command := []string{"ls"}
|
|
|
|
stdin := &bytes.Buffer{}
|
|
|
|
stdout := &fakeReadWriteCloser{}
|
|
|
|
stderr := &fakeReadWriteCloser{}
|
|
|
|
tty := true
|
2016-06-20 22:14:08 +00:00
|
|
|
fakeRuntime.PodList = []*containertest.FakePod{
|
|
|
|
{Pod: &kubecontainer.Pod{
|
2015-06-02 01:14:08 +00:00
|
|
|
ID: "12345678",
|
|
|
|
Name: podName,
|
|
|
|
Namespace: podNamespace,
|
|
|
|
Containers: []*kubecontainer.Container{
|
|
|
|
{Name: containerID,
|
2016-03-23 23:45:24 +00:00
|
|
|
ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
|
2015-06-02 01:14:08 +00:00
|
|
|
},
|
|
|
|
},
|
2016-06-20 22:14:08 +00:00
|
|
|
}},
|
2015-01-08 20:41:38 +00:00
|
|
|
}
|
2015-02-08 21:22:19 +00:00
|
|
|
|
2015-01-08 20:41:38 +00:00
|
|
|
err := kubelet.ExecInContainer(
|
2016-04-27 04:53:07 +00:00
|
|
|
kubecontainer.GetPodFullName(podWithUidNameNs("12345678", podName, podNamespace)),
|
2015-01-08 20:41:38 +00:00
|
|
|
"",
|
|
|
|
containerID,
|
|
|
|
[]string{"ls"},
|
|
|
|
stdin,
|
|
|
|
stdout,
|
|
|
|
stderr,
|
|
|
|
tty,
|
2016-04-18 16:54:44 +00:00
|
|
|
nil,
|
2015-01-08 20:41:38 +00:00
|
|
|
)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, fakeCommandRunner.ID.ID, containerID, "ID")
|
|
|
|
require.Equal(t, fakeCommandRunner.Cmd, command, "Command")
|
|
|
|
require.Equal(t, fakeCommandRunner.Stdin, stdin, "Stdin")
|
|
|
|
require.Equal(t, fakeCommandRunner.Stdout, stdout, "Stdout")
|
|
|
|
require.Equal(t, fakeCommandRunner.Stderr, stderr, "Stderr")
|
|
|
|
require.Equal(t, fakeCommandRunner.TTY, tty, "TTY")
|
2015-01-08 20:41:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestPortForwardNoSuchPod(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-02-23 21:04:45 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2015-05-27 17:32:53 +00:00
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
2016-06-20 22:14:08 +00:00
|
|
|
fakeRuntime.PodList = []*containertest.FakePod{}
|
2015-05-27 17:32:53 +00:00
|
|
|
fakeCommandRunner := fakeContainerCommandRunner{}
|
2015-01-08 20:41:38 +00:00
|
|
|
kubelet.runner = &fakeCommandRunner
|
|
|
|
|
|
|
|
podName := "podFoo"
|
2015-03-11 23:40:20 +00:00
|
|
|
podNamespace := "nsFoo"
|
2015-01-08 20:41:38 +00:00
|
|
|
var port uint16 = 5000
|
|
|
|
|
|
|
|
err := kubelet.PortForward(
|
2015-03-23 17:14:30 +00:00
|
|
|
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
|
2015-01-08 20:41:38 +00:00
|
|
|
"",
|
|
|
|
port,
|
|
|
|
nil,
|
|
|
|
)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.True(t, fakeCommandRunner.ID.IsEmpty(), "unexpected invocation of runner.PortForward")
|
2015-01-08 20:41:38 +00:00
|
|
|
}
|
|
|
|
|
2015-06-05 21:10:45 +00:00
|
|
|
func TestPortForward(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-02-23 21:04:45 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2015-05-27 17:32:53 +00:00
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
2015-01-08 20:41:38 +00:00
|
|
|
|
|
|
|
podName := "podFoo"
|
2015-03-11 23:40:20 +00:00
|
|
|
podNamespace := "nsFoo"
|
2015-06-05 21:10:45 +00:00
|
|
|
podID := types.UID("12345678")
|
2016-06-20 22:14:08 +00:00
|
|
|
fakeRuntime.PodList = []*containertest.FakePod{
|
|
|
|
{Pod: &kubecontainer.Pod{
|
2015-06-05 21:10:45 +00:00
|
|
|
ID: podID,
|
2015-06-02 01:14:08 +00:00
|
|
|
Name: podName,
|
|
|
|
Namespace: podNamespace,
|
|
|
|
Containers: []*kubecontainer.Container{
|
2015-06-05 21:10:45 +00:00
|
|
|
{
|
|
|
|
Name: "foo",
|
2016-03-23 23:45:24 +00:00
|
|
|
ID: kubecontainer.ContainerID{Type: "test", ID: "containerFoo"},
|
2015-06-05 21:10:45 +00:00
|
|
|
},
|
2015-06-02 01:14:08 +00:00
|
|
|
},
|
2016-06-20 22:14:08 +00:00
|
|
|
}},
|
2015-01-08 20:41:38 +00:00
|
|
|
}
|
|
|
|
fakeCommandRunner := fakeContainerCommandRunner{}
|
|
|
|
kubelet.runner = &fakeCommandRunner
|
|
|
|
|
|
|
|
var port uint16 = 5000
|
|
|
|
stream := &fakeReadWriteCloser{}
|
|
|
|
err := kubelet.PortForward(
|
2015-03-23 17:14:30 +00:00
|
|
|
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
|
2015-02-26 20:27:14 +00:00
|
|
|
UID: "12345678",
|
|
|
|
Name: podName,
|
|
|
|
Namespace: podNamespace,
|
2015-01-08 20:41:38 +00:00
|
|
|
}}),
|
|
|
|
"",
|
|
|
|
port,
|
|
|
|
stream,
|
|
|
|
)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, fakeCommandRunner.PodID, podID, "Pod ID")
|
|
|
|
require.Equal(t, fakeCommandRunner.Port, port, "Port")
|
|
|
|
require.Equal(t, fakeCommandRunner.Stream, stream, "stream")
|
2015-02-08 21:22:19 +00:00
|
|
|
}
|
2015-02-27 21:43:21 +00:00
|
|
|
|
2015-03-03 18:33:25 +00:00
|
|
|
// Tests that identify the host port conflicts are detected correctly.
|
|
|
|
func TestGetHostPortConflicts(t *testing.T) {
|
2015-04-03 22:51:50 +00:00
|
|
|
pods := []*api.Pod{
|
2015-03-03 18:33:25 +00:00
|
|
|
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
|
|
|
|
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}},
|
|
|
|
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 82}}}}}},
|
|
|
|
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 83}}}}}},
|
|
|
|
}
|
|
|
|
// Pods should not cause any conflict.
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.False(t, hasHostPortConflicts(pods), "Should not have port conflicts")
|
2015-03-03 06:06:20 +00:00
|
|
|
|
2015-04-03 22:51:50 +00:00
|
|
|
expected := &api.Pod{
|
2015-03-03 18:33:25 +00:00
|
|
|
Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}},
|
|
|
|
}
|
2015-08-19 00:52:26 +00:00
|
|
|
// The new pod should cause conflict and be reported.
|
2015-03-03 18:33:25 +00:00
|
|
|
pods = append(pods, expected)
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.True(t, hasHostPortConflicts(pods), "Should have port conflicts")
|
2015-03-03 18:33:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that we handle port conflicts correctly by setting the failed status in status map.
|
|
|
|
func TestHandlePortConflicts(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-02-23 21:04:45 +00:00
|
|
|
kl := testKubelet.kubelet
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-05-18 05:05:55 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-03-16 12:50:00 +00:00
|
|
|
|
2016-01-06 01:10:59 +00:00
|
|
|
kl.nodeLister = testNodeLister{nodes: []api.Node{
|
2016-04-22 16:58:49 +00:00
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
|
|
|
|
Status: api.NodeStatus{
|
|
|
|
Allocatable: api.ResourceList{
|
|
|
|
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-01-06 01:10:59 +00:00
|
|
|
}}
|
|
|
|
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
|
2016-04-22 16:58:49 +00:00
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
|
|
|
|
Status: api.NodeStatus{
|
|
|
|
Allocatable: api.ResourceList{
|
|
|
|
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-01-06 01:10:59 +00:00
|
|
|
}}
|
|
|
|
|
|
|
|
spec := api.PodSpec{NodeName: kl.nodeName, Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
|
2015-04-03 22:51:50 +00:00
|
|
|
pods := []*api.Pod{
|
2016-04-27 04:53:07 +00:00
|
|
|
podWithUidNameNsSpec("123456789", "newpod", "foo", spec),
|
|
|
|
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec),
|
2015-02-27 21:43:21 +00:00
|
|
|
}
|
2015-03-13 13:19:07 +00:00
|
|
|
// Make sure the Pods are in the reverse order of creation time.
|
2015-09-17 22:21:55 +00:00
|
|
|
pods[1].CreationTimestamp = unversioned.NewTime(time.Now())
|
|
|
|
pods[0].CreationTimestamp = unversioned.NewTime(time.Now().Add(1 * time.Second))
|
2015-03-03 18:33:25 +00:00
|
|
|
// The newer pod should be rejected.
|
2016-01-06 01:10:59 +00:00
|
|
|
notfittingPod := pods[0]
|
|
|
|
fittingPod := pods[1]
|
2015-03-03 18:33:25 +00:00
|
|
|
|
2015-08-19 00:52:26 +00:00
|
|
|
kl.HandlePodAdditions(pods)
|
2015-03-03 18:33:25 +00:00
|
|
|
// Check pod status stored in the status map.
|
2016-01-06 01:10:59 +00:00
|
|
|
// notfittingPod should be Failed
|
|
|
|
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.True(t, found, "Status of pod %q is not found in the status map", notfittingPod.UID)
|
|
|
|
require.Equal(t, api.PodFailed, status.Phase)
|
|
|
|
|
2016-01-06 01:10:59 +00:00
|
|
|
// fittingPod should be Pending
|
|
|
|
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.True(t, found, "Status of pod %q is not found in the status map", fittingPod.UID)
|
|
|
|
require.Equal(t, api.PodPending, status.Phase)
|
2016-01-06 01:10:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that we handle host name conflicts correctly by setting the failed status in status map.
|
|
|
|
func TestHandleHostNameConflicts(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-01-06 01:10:59 +00:00
|
|
|
kl := testKubelet.kubelet
|
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-05-18 05:05:55 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2016-01-06 01:10:59 +00:00
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
|
|
|
|
kl.nodeLister = testNodeLister{nodes: []api.Node{
|
2016-04-22 16:58:49 +00:00
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
|
|
|
|
Status: api.NodeStatus{
|
|
|
|
Allocatable: api.ResourceList{
|
|
|
|
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-01-06 01:10:59 +00:00
|
|
|
}}
|
|
|
|
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
|
2016-04-22 16:58:49 +00:00
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
|
|
|
|
Status: api.NodeStatus{
|
|
|
|
Allocatable: api.ResourceList{
|
|
|
|
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-01-06 01:10:59 +00:00
|
|
|
}}
|
|
|
|
|
2016-04-27 04:53:07 +00:00
|
|
|
// default NodeName in test is 127.0.0.1
|
2016-01-06 01:10:59 +00:00
|
|
|
pods := []*api.Pod{
|
2016-04-27 04:53:07 +00:00
|
|
|
podWithUidNameNsSpec("123456789", "notfittingpod", "foo", api.PodSpec{NodeName: "127.0.0.2"}),
|
|
|
|
podWithUidNameNsSpec("987654321", "fittingpod", "foo", api.PodSpec{NodeName: "127.0.0.1"}),
|
2016-01-06 01:10:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
notfittingPod := pods[0]
|
|
|
|
fittingPod := pods[1]
|
|
|
|
|
|
|
|
kl.HandlePodAdditions(pods)
|
|
|
|
// Check pod status stored in the status map.
|
|
|
|
// notfittingPod should be Failed
|
|
|
|
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.True(t, found, "Status of pod %q is not found in the status map", notfittingPod.UID)
|
|
|
|
require.Equal(t, api.PodFailed, status.Phase)
|
|
|
|
|
2016-01-06 01:10:59 +00:00
|
|
|
// fittingPod should be Pending
|
|
|
|
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.True(t, found, "Status of pod %q is not found in the status map", fittingPod.UID)
|
|
|
|
require.Equal(t, api.PodPending, status.Phase)
|
2015-03-03 18:33:25 +00:00
|
|
|
}
|
|
|
|
|
2015-03-20 16:52:32 +00:00
|
|
|
// Tests that we handle not matching labels selector correctly by setting the failed status in status map.
|
|
|
|
func TestHandleNodeSelector(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-03-20 16:52:32 +00:00
|
|
|
kl := testKubelet.kubelet
|
2016-04-24 22:47:42 +00:00
|
|
|
nodes := []api.Node{
|
2016-04-22 16:58:49 +00:00
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
|
|
|
|
Status: api.NodeStatus{
|
|
|
|
Allocatable: api.ResourceList{
|
|
|
|
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-04-24 22:47:42 +00:00
|
|
|
}
|
|
|
|
kl.nodeLister = testNodeLister{nodes: nodes}
|
|
|
|
kl.nodeInfo = testNodeInfo{nodes: nodes}
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-05-18 05:05:55 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-04-03 22:51:50 +00:00
|
|
|
pods := []*api.Pod{
|
2016-04-27 04:53:07 +00:00
|
|
|
podWithUidNameNsSpec("123456789", "podA", "foo", api.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
|
|
|
|
podWithUidNameNsSpec("987654321", "podB", "foo", api.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
|
2015-03-20 16:52:32 +00:00
|
|
|
}
|
|
|
|
// The first pod should be rejected.
|
2015-08-18 20:26:56 +00:00
|
|
|
notfittingPod := pods[0]
|
2016-01-06 01:10:59 +00:00
|
|
|
fittingPod := pods[1]
|
2015-03-20 16:52:32 +00:00
|
|
|
|
2015-08-19 00:52:26 +00:00
|
|
|
kl.HandlePodAdditions(pods)
|
2015-03-20 16:52:32 +00:00
|
|
|
// Check pod status stored in the status map.
|
2016-01-06 01:10:59 +00:00
|
|
|
// notfittingPod should be Failed
|
2015-08-18 20:26:56 +00:00
|
|
|
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.True(t, found, "Status of pod %q is not found in the status map", notfittingPod.UID)
|
|
|
|
require.Equal(t, api.PodFailed, status.Phase)
|
|
|
|
|
2016-01-06 01:10:59 +00:00
|
|
|
// fittingPod should be Pending
|
|
|
|
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.True(t, found, "Status of pod %q is not found in the status map", fittingPod.UID)
|
|
|
|
require.Equal(t, api.PodPending, status.Phase)
|
2015-03-20 16:52:32 +00:00
|
|
|
}
|
|
|
|
|
2015-03-16 12:50:00 +00:00
|
|
|
// Tests that we handle exceeded resources correctly by setting the failed status in status map.
|
|
|
|
func TestHandleMemExceeded(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-03-16 12:50:00 +00:00
|
|
|
kl := testKubelet.kubelet
|
2016-04-24 22:47:42 +00:00
|
|
|
nodes := []api.Node{
|
2016-01-06 01:10:59 +00:00
|
|
|
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
|
|
|
|
Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: api.ResourceList{
|
|
|
|
api.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
|
|
|
|
api.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI),
|
|
|
|
api.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI),
|
|
|
|
}}},
|
2016-04-24 22:47:42 +00:00
|
|
|
}
|
|
|
|
kl.nodeLister = testNodeLister{nodes: nodes}
|
|
|
|
kl.nodeInfo = testNodeInfo{nodes: nodes}
|
2016-01-06 01:10:59 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-05-18 05:05:55 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-03-16 12:50:00 +00:00
|
|
|
|
2016-01-06 01:10:59 +00:00
|
|
|
spec := api.PodSpec{NodeName: kl.nodeName,
|
|
|
|
Containers: []api.Container{{Resources: api.ResourceRequirements{
|
|
|
|
Requests: api.ResourceList{
|
|
|
|
"memory": resource.MustParse("90"),
|
|
|
|
},
|
|
|
|
}}}}
|
2015-04-03 22:51:50 +00:00
|
|
|
pods := []*api.Pod{
|
2016-04-27 04:53:07 +00:00
|
|
|
podWithUidNameNsSpec("123456789", "newpod", "foo", spec),
|
|
|
|
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec),
|
2015-03-16 12:50:00 +00:00
|
|
|
}
|
|
|
|
// Make sure the Pods are in the reverse order of creation time.
|
2015-09-17 22:21:55 +00:00
|
|
|
pods[1].CreationTimestamp = unversioned.NewTime(time.Now())
|
|
|
|
pods[0].CreationTimestamp = unversioned.NewTime(time.Now().Add(1 * time.Second))
|
2015-03-16 12:50:00 +00:00
|
|
|
// The newer pod should be rejected.
|
2015-08-18 20:26:56 +00:00
|
|
|
notfittingPod := pods[0]
|
2016-01-06 01:10:59 +00:00
|
|
|
fittingPod := pods[1]
|
2015-03-16 12:50:00 +00:00
|
|
|
|
2015-08-19 00:52:26 +00:00
|
|
|
kl.HandlePodAdditions(pods)
|
2015-03-16 12:50:00 +00:00
|
|
|
// Check pod status stored in the status map.
|
2016-01-06 01:10:59 +00:00
|
|
|
// notfittingPod should be Failed
|
2015-08-18 20:26:56 +00:00
|
|
|
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.True(t, found, "Status of pod %q is not found in the status map", notfittingPod.UID)
|
|
|
|
require.Equal(t, api.PodFailed, status.Phase)
|
|
|
|
|
2016-01-06 01:10:59 +00:00
|
|
|
// fittingPod should be Pending
|
|
|
|
status, found = kl.statusManager.GetPodStatus(fittingPod.UID)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.True(t, found, "Status of pod %q is not found in the status map", fittingPod.UID)
|
|
|
|
require.Equal(t, api.PodPending, status.Phase)
|
2015-03-16 12:50:00 +00:00
|
|
|
}
|
|
|
|
|
2015-03-20 16:37:08 +00:00
|
|
|
// TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
|
2015-03-03 18:33:25 +00:00
|
|
|
func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-05-18 05:05:55 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2016-01-06 01:10:59 +00:00
|
|
|
versionInfo := &cadvisorapi.VersionInfo{
|
|
|
|
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
|
|
|
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
|
|
|
DockerVersion: "1.5.0",
|
|
|
|
}
|
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
2015-03-16 12:50:00 +00:00
|
|
|
|
2015-02-23 21:04:45 +00:00
|
|
|
kl := testKubelet.kubelet
|
2015-04-03 22:51:50 +00:00
|
|
|
pods := []*api.Pod{
|
2015-08-18 20:26:56 +00:00
|
|
|
{ObjectMeta: api.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
|
|
|
|
{ObjectMeta: api.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
|
2015-03-03 18:33:25 +00:00
|
|
|
}
|
2015-08-18 20:26:56 +00:00
|
|
|
podToTest := pods[1]
|
2015-03-03 18:33:25 +00:00
|
|
|
// Run once to populate the status map.
|
2015-08-19 00:52:26 +00:00
|
|
|
kl.HandlePodAdditions(pods)
|
2015-08-18 20:26:56 +00:00
|
|
|
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); !found {
|
2015-05-15 22:30:28 +00:00
|
|
|
t.Fatalf("expected to have status cached for pod2")
|
2015-02-27 21:43:21 +00:00
|
|
|
}
|
2015-03-03 18:33:25 +00:00
|
|
|
// Sync with empty pods so that the entry in status map will be removed.
|
2015-08-19 00:52:26 +00:00
|
|
|
kl.podManager.SetPods([]*api.Pod{})
|
|
|
|
kl.HandlePodCleanups()
|
2015-08-18 20:26:56 +00:00
|
|
|
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found {
|
2015-05-15 22:30:28 +00:00
|
|
|
t.Fatalf("expected to not have status cached for pod2")
|
2015-02-27 21:43:21 +00:00
|
|
|
}
|
2015-02-24 00:33:43 +00:00
|
|
|
}
|
|
|
|
|
2016-01-29 03:27:56 +00:00
|
|
|
func TestValidateContainerLogStatus(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-02-23 21:04:45 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2015-02-24 00:33:43 +00:00
|
|
|
containerName := "x"
|
|
|
|
testCases := []struct {
|
2015-03-25 11:09:35 +00:00
|
|
|
statuses []api.ContainerStatus
|
2016-09-01 19:25:01 +00:00
|
|
|
success bool // whether getting logs for the container should succeed.
|
|
|
|
pSuccess bool // whether getting logs for the previous container should succeed.
|
2015-02-24 00:33:43 +00:00
|
|
|
}{
|
|
|
|
{
|
2015-03-25 11:09:35 +00:00
|
|
|
statuses: []api.ContainerStatus{
|
|
|
|
{
|
|
|
|
Name: containerName,
|
|
|
|
State: api.ContainerState{
|
|
|
|
Running: &api.ContainerStateRunning{},
|
|
|
|
},
|
2015-05-07 18:34:16 +00:00
|
|
|
LastTerminationState: api.ContainerState{
|
2015-05-27 22:02:11 +00:00
|
|
|
Terminated: &api.ContainerStateTerminated{},
|
2015-05-07 18:34:16 +00:00
|
|
|
},
|
2015-03-25 11:09:35 +00:00
|
|
|
},
|
|
|
|
},
|
2016-09-01 19:25:01 +00:00
|
|
|
success: true,
|
|
|
|
pSuccess: true,
|
2015-02-24 00:33:43 +00:00
|
|
|
},
|
2016-01-29 03:27:56 +00:00
|
|
|
{
|
|
|
|
statuses: []api.ContainerStatus{
|
|
|
|
{
|
|
|
|
Name: containerName,
|
|
|
|
State: api.ContainerState{
|
|
|
|
Running: &api.ContainerStateRunning{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-09-01 19:25:01 +00:00
|
|
|
success: true,
|
|
|
|
pSuccess: false,
|
2016-01-29 03:27:56 +00:00
|
|
|
},
|
2015-02-24 00:33:43 +00:00
|
|
|
{
|
2015-03-25 11:09:35 +00:00
|
|
|
statuses: []api.ContainerStatus{
|
|
|
|
{
|
|
|
|
Name: containerName,
|
|
|
|
State: api.ContainerState{
|
2015-05-27 22:02:11 +00:00
|
|
|
Terminated: &api.ContainerStateTerminated{},
|
2015-03-25 11:09:35 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-09-01 19:25:01 +00:00
|
|
|
success: true,
|
|
|
|
pSuccess: false,
|
2015-02-24 00:33:43 +00:00
|
|
|
},
|
|
|
|
{
|
2015-03-25 11:09:35 +00:00
|
|
|
statuses: []api.ContainerStatus{
|
|
|
|
{
|
|
|
|
Name: containerName,
|
|
|
|
State: api.ContainerState{
|
|
|
|
Waiting: &api.ContainerStateWaiting{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-09-01 19:25:01 +00:00
|
|
|
success: false,
|
|
|
|
pSuccess: false,
|
2015-02-24 00:33:43 +00:00
|
|
|
},
|
2016-01-29 03:27:56 +00:00
|
|
|
{
|
|
|
|
statuses: []api.ContainerStatus{
|
|
|
|
{
|
|
|
|
Name: containerName,
|
|
|
|
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePull"}},
|
|
|
|
},
|
|
|
|
},
|
2016-09-01 19:25:01 +00:00
|
|
|
success: false,
|
|
|
|
pSuccess: false,
|
2016-01-29 03:27:56 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
statuses: []api.ContainerStatus{
|
|
|
|
{
|
|
|
|
Name: containerName,
|
|
|
|
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}},
|
|
|
|
},
|
|
|
|
},
|
2016-09-01 19:25:01 +00:00
|
|
|
success: false,
|
|
|
|
pSuccess: false,
|
2016-01-29 03:27:56 +00:00
|
|
|
},
|
2015-02-24 00:33:43 +00:00
|
|
|
}
|
2015-02-27 21:43:21 +00:00
|
|
|
|
2015-02-24 00:33:43 +00:00
|
|
|
for i, tc := range testCases {
|
2016-09-01 19:25:01 +00:00
|
|
|
// Access the log of the most recent container
|
|
|
|
previous := false
|
|
|
|
podStatus := &api.PodStatus{ContainerStatuses: tc.statuses}
|
|
|
|
_, err := kubelet.validateContainerLogStatus("podName", podStatus, containerName, previous)
|
|
|
|
if !tc.success {
|
|
|
|
assert.Error(t, err, "[case %d] error", i)
|
|
|
|
} else {
|
|
|
|
assert.NoError(t, err, "[case %d] error", i)
|
2015-02-24 00:33:43 +00:00
|
|
|
}
|
2016-09-01 19:25:01 +00:00
|
|
|
// Access the log of the previous, terminated container
|
|
|
|
previous = true
|
|
|
|
_, err = kubelet.validateContainerLogStatus("podName", podStatus, containerName, previous)
|
|
|
|
if !tc.pSuccess {
|
|
|
|
assert.Error(t, err, "[case %d] error", i)
|
|
|
|
} else {
|
|
|
|
assert.NoError(t, err, "[case %d] error", i)
|
|
|
|
}
|
|
|
|
// Access the log of a container that's not in the pod
|
|
|
|
_, err = kubelet.validateContainerLogStatus("podName", podStatus, "blah", false)
|
|
|
|
assert.Error(t, err, "[case %d] invalid container name should cause an error", i)
|
2016-01-29 03:27:56 +00:00
|
|
|
}
|
2015-02-27 21:43:21 +00:00
|
|
|
}
|
2015-02-23 21:04:45 +00:00
|
|
|
|
2016-01-04 20:03:28 +00:00
|
|
|
// updateDiskSpacePolicy creates a new DiskSpaceManager with a new policy. This new manager along
|
|
|
|
// with the mock FsInfo values added to Cadvisor should make the kubelet report that it has
|
|
|
|
// sufficient disk space or it is out of disk, depending on the capacity, availability and
|
|
|
|
// threshold values.
|
2016-02-25 23:40:44 +00:00
|
|
|
func updateDiskSpacePolicy(kubelet *Kubelet, mockCadvisor *cadvisortest.Mock, rootCap, dockerCap, rootAvail, dockerAvail uint64, rootThreshold, dockerThreshold int) error {
|
2016-07-06 15:19:09 +00:00
|
|
|
dockerimagesFsInfo := cadvisorapiv2.FsInfo{Capacity: rootCap * mb, Available: rootAvail * mb}
|
|
|
|
rootFsInfo := cadvisorapiv2.FsInfo{Capacity: dockerCap * mb, Available: dockerAvail * mb}
|
2016-05-18 05:05:55 +00:00
|
|
|
mockCadvisor.On("ImagesFsInfo").Return(dockerimagesFsInfo, nil)
|
2016-01-04 20:03:28 +00:00
|
|
|
mockCadvisor.On("RootFsInfo").Return(rootFsInfo, nil)
|
|
|
|
|
|
|
|
dsp := DiskSpacePolicy{DockerFreeDiskMB: rootThreshold, RootFreeDiskMB: dockerThreshold}
|
|
|
|
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, dsp)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
kubelet.diskSpaceManager = diskSpaceManager
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-03-09 22:46:47 +00:00
|
|
|
func TestCreateMirrorPod(t *testing.T) {
|
2015-10-09 17:24:31 +00:00
|
|
|
for _, updateType := range []kubetypes.SyncPodType{kubetypes.SyncPodCreate, kubetypes.SyncPodUpdate} {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-06-16 17:43:32 +00:00
|
|
|
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
|
2015-06-10 00:50:15 +00:00
|
|
|
kl := testKubelet.kubelet
|
|
|
|
manager := testKubelet.fakeMirrorClient
|
2016-04-27 04:53:07 +00:00
|
|
|
pod := podWithUidNameNs("12345678", "bar", "foo")
|
|
|
|
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
|
2015-06-10 00:50:15 +00:00
|
|
|
pods := []*api.Pod{pod}
|
|
|
|
kl.podManager.SetPods(pods)
|
2016-05-06 18:07:24 +00:00
|
|
|
err := kl.syncPod(syncPodOptions{
|
|
|
|
pod: pod,
|
|
|
|
podStatus: &kubecontainer.PodStatus{},
|
|
|
|
updateType: updateType,
|
|
|
|
})
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, err)
|
2015-06-10 00:50:15 +00:00
|
|
|
podFullName := kubecontainer.GetPodFullName(pod)
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.True(t, manager.HasPod(podFullName), "Expected mirror pod %q to be created", podFullName)
|
|
|
|
assert.Equal(t, 1, manager.NumOfPods(), "Expected only 1 mirror pod %q, got %+v", podFullName, manager.GetPods())
|
2015-03-09 22:46:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-08 20:28:33 +00:00
|
|
|
func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-11-07 01:03:39 +00:00
|
|
|
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
2016-06-16 17:43:32 +00:00
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-05-18 05:05:55 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-11-07 01:03:39 +00:00
|
|
|
|
2015-04-08 20:28:33 +00:00
|
|
|
kl := testKubelet.kubelet
|
|
|
|
manager := testKubelet.fakeMirrorClient
|
2016-04-27 04:53:07 +00:00
|
|
|
pod := podWithUidNameNsSpec("12345678", "foo", "ns", api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "1234", Image: "foo"},
|
2015-04-08 20:28:33 +00:00
|
|
|
},
|
2016-04-27 04:53:07 +00:00
|
|
|
})
|
|
|
|
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
|
|
|
|
|
2015-04-08 20:28:33 +00:00
|
|
|
// Mirror pod has an outdated spec.
|
2016-04-27 04:53:07 +00:00
|
|
|
mirrorPod := podWithUidNameNsSpec("11111111", "foo", "ns", api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "1234", Image: "bar"},
|
2015-04-08 20:28:33 +00:00
|
|
|
},
|
2016-04-27 04:53:07 +00:00
|
|
|
})
|
|
|
|
mirrorPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "api"
|
|
|
|
mirrorPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = "mirror"
|
2015-04-08 20:28:33 +00:00
|
|
|
|
2015-04-03 22:51:50 +00:00
|
|
|
pods := []*api.Pod{pod, mirrorPod}
|
2015-04-08 20:28:33 +00:00
|
|
|
kl.podManager.SetPods(pods)
|
2016-05-06 18:07:24 +00:00
|
|
|
err := kl.syncPod(syncPodOptions{
|
|
|
|
pod: pod,
|
|
|
|
mirrorPod: mirrorPod,
|
|
|
|
podStatus: &kubecontainer.PodStatus{},
|
|
|
|
updateType: kubetypes.SyncPodUpdate,
|
|
|
|
})
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, err)
|
2015-04-03 22:51:50 +00:00
|
|
|
name := kubecontainer.GetPodFullName(pod)
|
2015-04-08 20:28:33 +00:00
|
|
|
creates, deletes := manager.GetCounts(name)
|
2016-01-21 19:55:37 +00:00
|
|
|
if creates != 1 || deletes != 1 {
|
|
|
|
t.Errorf("expected 1 creation and 1 deletion of %q, got %d, %d", name, creates, deletes)
|
2015-04-08 20:28:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-09 22:46:47 +00:00
|
|
|
func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-11-07 01:03:39 +00:00
|
|
|
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-05-18 05:05:55 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-11-07 01:03:39 +00:00
|
|
|
|
2015-03-09 22:46:47 +00:00
|
|
|
kl := testKubelet.kubelet
|
2015-03-23 19:17:12 +00:00
|
|
|
manager := testKubelet.fakeMirrorClient
|
2015-04-03 22:51:50 +00:00
|
|
|
orphanPods := []*api.Pod{
|
2015-03-20 20:55:26 +00:00
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
UID: "12345678",
|
|
|
|
Name: "pod1",
|
|
|
|
Namespace: "ns",
|
|
|
|
Annotations: map[string]string{
|
2015-10-09 17:24:31 +00:00
|
|
|
kubetypes.ConfigSourceAnnotationKey: "api",
|
|
|
|
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
2015-03-20 20:55:26 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
UID: "12345679",
|
|
|
|
Name: "pod2",
|
|
|
|
Namespace: "ns",
|
|
|
|
Annotations: map[string]string{
|
2015-10-09 17:24:31 +00:00
|
|
|
kubetypes.ConfigSourceAnnotationKey: "api",
|
|
|
|
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
2015-03-20 20:55:26 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2015-03-23 19:17:12 +00:00
|
|
|
kl.podManager.SetPods(orphanPods)
|
2015-03-09 22:46:47 +00:00
|
|
|
// Sync with an empty pod list to delete all mirror pods.
|
2015-08-19 00:52:26 +00:00
|
|
|
kl.HandlePodCleanups()
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Len(t, manager.GetPods(), 0, "Expected 0 mirror pods")
|
2015-03-20 20:55:26 +00:00
|
|
|
for _, pod := range orphanPods {
|
2015-04-03 22:51:50 +00:00
|
|
|
name := kubecontainer.GetPodFullName(pod)
|
2015-03-09 22:46:47 +00:00
|
|
|
creates, deletes := manager.GetCounts(name)
|
|
|
|
if creates != 0 || deletes != 1 {
|
|
|
|
t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-03-20 20:55:26 +00:00
|
|
|
|
|
|
|
func TestGetContainerInfoForMirrorPods(t *testing.T) {
|
|
|
|
// pods contain one static and one mirror pod with the same name but
|
|
|
|
// different UIDs.
|
2015-04-03 22:51:50 +00:00
|
|
|
pods := []*api.Pod{
|
2015-03-20 20:55:26 +00:00
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
UID: "1234",
|
|
|
|
Name: "qux",
|
|
|
|
Namespace: "ns",
|
|
|
|
Annotations: map[string]string{
|
2015-10-09 17:24:31 +00:00
|
|
|
kubetypes.ConfigSourceAnnotationKey: "file",
|
2015-03-20 20:55:26 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "foo"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
UID: "5678",
|
|
|
|
Name: "qux",
|
|
|
|
Namespace: "ns",
|
|
|
|
Annotations: map[string]string{
|
2015-10-09 17:24:31 +00:00
|
|
|
kubetypes.ConfigSourceAnnotationKey: "api",
|
|
|
|
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
2015-03-20 20:55:26 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "foo"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
containerID := "ab2cdf"
|
|
|
|
containerPath := fmt.Sprintf("/docker/%v", containerID)
|
2015-10-16 03:00:28 +00:00
|
|
|
containerInfo := cadvisorapi.ContainerInfo{
|
|
|
|
ContainerReference: cadvisorapi.ContainerReference{
|
2015-03-20 20:55:26 +00:00
|
|
|
Name: containerPath,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-05-27 19:53:39 +00:00
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
2015-03-20 20:55:26 +00:00
|
|
|
mockCadvisor := testKubelet.fakeCadvisor
|
2015-10-16 03:00:28 +00:00
|
|
|
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
2015-03-20 20:55:26 +00:00
|
|
|
mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil)
|
2015-05-27 19:53:39 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2015-03-20 20:55:26 +00:00
|
|
|
|
2016-06-20 22:14:08 +00:00
|
|
|
fakeRuntime.PodList = []*containertest.FakePod{
|
|
|
|
{Pod: &kubecontainer.Pod{
|
2015-06-02 01:14:08 +00:00
|
|
|
ID: "1234",
|
|
|
|
Name: "qux",
|
|
|
|
Namespace: "ns",
|
|
|
|
Containers: []*kubecontainer.Container{
|
|
|
|
{
|
|
|
|
Name: "foo",
|
2016-03-23 23:45:24 +00:00
|
|
|
ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
|
2015-06-02 01:14:08 +00:00
|
|
|
},
|
|
|
|
},
|
2016-06-20 22:14:08 +00:00
|
|
|
}},
|
2015-03-20 20:55:26 +00:00
|
|
|
}
|
|
|
|
|
2015-03-21 00:22:02 +00:00
|
|
|
kubelet.podManager.SetPods(pods)
|
2015-03-20 20:55:26 +00:00
|
|
|
// Use the mirror pod UID to retrieve the stats.
|
|
|
|
stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq)
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
require.NotNil(t, stats)
|
2015-03-20 20:55:26 +00:00
|
|
|
mockCadvisor.AssertExpectations(t)
|
|
|
|
}
|
2015-03-24 23:52:38 +00:00
|
|
|
|
2015-03-24 23:09:16 +00:00
|
|
|
func TestHostNetworkAllowed(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-06-16 17:43:32 +00:00
|
|
|
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
|
2015-03-24 23:09:16 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
|
|
|
|
capabilities.SetForTests(capabilities.Capabilities{
|
2015-08-24 17:41:51 +00:00
|
|
|
PrivilegedSources: capabilities.PrivilegedSources{
|
2015-10-09 17:24:31 +00:00
|
|
|
HostNetworkSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource},
|
2015-08-24 17:41:51 +00:00
|
|
|
},
|
2015-03-24 23:09:16 +00:00
|
|
|
})
|
2016-04-27 04:53:07 +00:00
|
|
|
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "foo"},
|
2015-03-24 23:09:16 +00:00
|
|
|
},
|
2016-04-27 04:53:07 +00:00
|
|
|
SecurityContext: &api.PodSecurityContext{
|
|
|
|
HostNetwork: true,
|
2015-03-24 23:09:16 +00:00
|
|
|
},
|
2016-04-27 04:53:07 +00:00
|
|
|
})
|
|
|
|
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
|
|
|
|
|
2015-04-03 22:51:50 +00:00
|
|
|
kubelet.podManager.SetPods([]*api.Pod{pod})
|
2016-05-06 18:07:24 +00:00
|
|
|
err := kubelet.syncPod(syncPodOptions{
|
|
|
|
pod: pod,
|
|
|
|
podStatus: &kubecontainer.PodStatus{},
|
|
|
|
updateType: kubetypes.SyncPodUpdate,
|
|
|
|
})
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, err, "expected pod infra creation to succeed")
|
2015-03-24 23:09:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestHostNetworkDisallowed(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-06-16 17:43:32 +00:00
|
|
|
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
|
2015-03-24 23:09:16 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
|
|
|
|
capabilities.SetForTests(capabilities.Capabilities{
|
2015-08-24 17:41:51 +00:00
|
|
|
PrivilegedSources: capabilities.PrivilegedSources{
|
|
|
|
HostNetworkSources: []string{},
|
|
|
|
},
|
2015-03-24 23:09:16 +00:00
|
|
|
})
|
2016-04-27 04:53:07 +00:00
|
|
|
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "foo"},
|
2015-03-24 23:09:16 +00:00
|
|
|
},
|
2016-04-27 04:53:07 +00:00
|
|
|
SecurityContext: &api.PodSecurityContext{
|
|
|
|
HostNetwork: true,
|
2015-03-24 23:09:16 +00:00
|
|
|
},
|
2016-04-27 04:53:07 +00:00
|
|
|
})
|
|
|
|
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
|
|
|
|
|
2016-05-06 18:07:24 +00:00
|
|
|
err := kubelet.syncPod(syncPodOptions{
|
|
|
|
pod: pod,
|
|
|
|
podStatus: &kubecontainer.PodStatus{},
|
|
|
|
updateType: kubetypes.SyncPodUpdate,
|
|
|
|
})
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Error(t, err, "expected pod infra creation to fail")
|
2015-03-24 23:09:16 +00:00
|
|
|
}
|
2015-04-08 02:03:43 +00:00
|
|
|
|
2015-05-09 21:17:36 +00:00
|
|
|
func TestPrivilegeContainerAllowed(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-06-16 17:43:32 +00:00
|
|
|
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
|
2015-05-09 21:17:36 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
|
|
|
|
capabilities.SetForTests(capabilities.Capabilities{
|
|
|
|
AllowPrivileged: true,
|
|
|
|
})
|
|
|
|
privileged := true
|
2016-04-27 04:53:07 +00:00
|
|
|
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
|
2015-05-09 21:17:36 +00:00
|
|
|
},
|
2016-04-27 04:53:07 +00:00
|
|
|
})
|
|
|
|
|
2015-05-09 21:17:36 +00:00
|
|
|
kubelet.podManager.SetPods([]*api.Pod{pod})
|
2016-05-06 18:07:24 +00:00
|
|
|
err := kubelet.syncPod(syncPodOptions{
|
|
|
|
pod: pod,
|
|
|
|
podStatus: &kubecontainer.PodStatus{},
|
|
|
|
updateType: kubetypes.SyncPodUpdate,
|
|
|
|
})
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.NoError(t, err, "expected pod infra creation to succeed")
|
2015-05-09 21:17:36 +00:00
|
|
|
}
|
|
|
|
|
2016-07-20 22:08:47 +00:00
|
|
|
func TestPrivilegedContainerDisallowed(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-06-20 22:14:11 +00:00
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-05-09 21:17:36 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
|
|
|
|
capabilities.SetForTests(capabilities.Capabilities{
|
|
|
|
AllowPrivileged: false,
|
|
|
|
})
|
|
|
|
privileged := true
|
2016-04-27 04:53:07 +00:00
|
|
|
pod := podWithUidNameNsSpec("12345678", "foo", "new", api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
|
2015-05-09 21:17:36 +00:00
|
|
|
},
|
2016-04-27 04:53:07 +00:00
|
|
|
})
|
|
|
|
|
2016-05-06 18:07:24 +00:00
|
|
|
err := kubelet.syncPod(syncPodOptions{
|
|
|
|
pod: pod,
|
|
|
|
podStatus: &kubecontainer.PodStatus{},
|
|
|
|
updateType: kubetypes.SyncPodUpdate,
|
|
|
|
})
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Error(t, err, "expected pod infra creation to fail")
|
2015-05-09 21:17:36 +00:00
|
|
|
}
|
|
|
|
|
2015-04-24 18:20:23 +00:00
|
|
|
func TestFilterOutTerminatedPods(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-04-24 18:20:23 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
pods := newTestPods(5)
|
|
|
|
pods[0].Status.Phase = api.PodFailed
|
|
|
|
pods[1].Status.Phase = api.PodSucceeded
|
|
|
|
pods[2].Status.Phase = api.PodRunning
|
|
|
|
pods[3].Status.Phase = api.PodPending
|
|
|
|
|
|
|
|
expected := []*api.Pod{pods[2], pods[3], pods[4]}
|
|
|
|
kubelet.podManager.SetPods(pods)
|
|
|
|
actual := kubelet.filterOutTerminatedPods(pods)
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Equal(t, expected, actual)
|
2015-04-24 18:20:23 +00:00
|
|
|
}
|
2015-05-12 23:54:39 +00:00
|
|
|
|
|
|
|
func TestMakePortMappings(t *testing.T) {
|
2016-04-27 04:53:07 +00:00
|
|
|
port := func(name string, protocol api.Protocol, containerPort, hostPort int32, ip string) api.ContainerPort {
|
|
|
|
return api.ContainerPort{
|
|
|
|
Name: name,
|
|
|
|
Protocol: protocol,
|
|
|
|
ContainerPort: containerPort,
|
|
|
|
HostPort: hostPort,
|
|
|
|
HostIP: ip,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
portMapping := func(name string, protocol api.Protocol, containerPort, hostPort int, ip string) kubecontainer.PortMapping {
|
|
|
|
return kubecontainer.PortMapping{
|
|
|
|
Name: name,
|
|
|
|
Protocol: protocol,
|
|
|
|
ContainerPort: containerPort,
|
|
|
|
HostPort: hostPort,
|
|
|
|
HostIP: ip,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-12 23:54:39 +00:00
|
|
|
tests := []struct {
|
|
|
|
container *api.Container
|
|
|
|
expectedPortMappings []kubecontainer.PortMapping
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
&api.Container{
|
|
|
|
Name: "fooContainer",
|
|
|
|
Ports: []api.ContainerPort{
|
2016-04-27 04:53:07 +00:00
|
|
|
port("", api.ProtocolTCP, 80, 8080, "127.0.0.1"),
|
|
|
|
port("", api.ProtocolTCP, 443, 4343, "192.168.0.1"),
|
|
|
|
port("foo", api.ProtocolUDP, 555, 5555, ""),
|
|
|
|
// Duplicated, should be ignored.
|
|
|
|
port("foo", api.ProtocolUDP, 888, 8888, ""),
|
|
|
|
// Duplicated, should be ignored.
|
|
|
|
port("", api.ProtocolTCP, 80, 8888, ""),
|
2015-05-12 23:54:39 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
[]kubecontainer.PortMapping{
|
2016-04-27 04:53:07 +00:00
|
|
|
portMapping("fooContainer-TCP:80", api.ProtocolTCP, 80, 8080, "127.0.0.1"),
|
|
|
|
portMapping("fooContainer-TCP:443", api.ProtocolTCP, 443, 4343, "192.168.0.1"),
|
|
|
|
portMapping("fooContainer-foo", api.ProtocolUDP, 555, 5555, ""),
|
2015-05-12 23:54:39 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, tt := range tests {
|
|
|
|
actual := makePortMappings(tt.container)
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Equal(t, tt.expectedPortMappings, actual, "[%d]", i)
|
2015-05-12 23:54:39 +00:00
|
|
|
}
|
|
|
|
}
|
2015-05-09 05:01:43 +00:00
|
|
|
|
2015-05-16 00:01:56 +00:00
|
|
|
func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-05-27 19:53:39 +00:00
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2015-05-09 05:01:43 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
|
2015-09-17 22:21:55 +00:00
|
|
|
now := unversioned.Now()
|
|
|
|
startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
|
2015-05-09 05:01:43 +00:00
|
|
|
exceededActiveDeadlineSeconds := int64(30)
|
|
|
|
|
|
|
|
pods := []*api.Pod{
|
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
UID: "12345678",
|
|
|
|
Name: "bar",
|
|
|
|
Namespace: "new",
|
|
|
|
},
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "foo"},
|
|
|
|
},
|
|
|
|
ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
|
|
|
|
},
|
|
|
|
Status: api.PodStatus{
|
|
|
|
StartTime: &startTime,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2015-05-27 19:53:39 +00:00
|
|
|
|
2016-06-20 22:14:08 +00:00
|
|
|
fakeRuntime.PodList = []*containertest.FakePod{
|
|
|
|
{Pod: &kubecontainer.Pod{
|
2015-06-02 01:14:08 +00:00
|
|
|
ID: "12345678",
|
|
|
|
Name: "bar",
|
|
|
|
Namespace: "new",
|
|
|
|
Containers: []*kubecontainer.Container{
|
|
|
|
{Name: "foo"},
|
|
|
|
},
|
2016-06-20 22:14:08 +00:00
|
|
|
}},
|
2015-05-09 05:01:43 +00:00
|
|
|
}
|
|
|
|
|
2015-05-16 00:01:56 +00:00
|
|
|
// Let the pod worker sets the status to fail after this sync.
|
2015-08-19 00:52:26 +00:00
|
|
|
kubelet.HandlePodUpdates(pods)
|
2015-08-18 20:26:56 +00:00
|
|
|
status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.True(t, found, "expected to found status for pod %q", pods[0].UID)
|
|
|
|
assert.Equal(t, api.PodFailed, status.Phase)
|
2015-05-09 05:01:43 +00:00
|
|
|
}
|
|
|
|
|
2015-05-16 00:01:56 +00:00
|
|
|
func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-05-27 19:53:39 +00:00
|
|
|
fakeRuntime := testKubelet.fakeRuntime
|
2016-06-16 17:43:32 +00:00
|
|
|
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-06-16 17:43:32 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
|
2015-05-09 05:01:43 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
|
2015-09-17 22:21:55 +00:00
|
|
|
now := unversioned.Now()
|
|
|
|
startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
|
2015-05-09 05:01:43 +00:00
|
|
|
exceededActiveDeadlineSeconds := int64(300)
|
|
|
|
|
|
|
|
pods := []*api.Pod{
|
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
UID: "12345678",
|
|
|
|
Name: "bar",
|
|
|
|
Namespace: "new",
|
|
|
|
},
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
Containers: []api.Container{
|
|
|
|
{Name: "foo"},
|
|
|
|
},
|
|
|
|
ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
|
|
|
|
},
|
|
|
|
Status: api.PodStatus{
|
|
|
|
StartTime: &startTime,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2015-05-27 19:53:39 +00:00
|
|
|
|
2016-06-20 22:14:08 +00:00
|
|
|
fakeRuntime.PodList = []*containertest.FakePod{
|
|
|
|
{Pod: &kubecontainer.Pod{
|
2015-06-02 01:14:08 +00:00
|
|
|
ID: "12345678",
|
|
|
|
Name: "bar",
|
|
|
|
Namespace: "new",
|
|
|
|
Containers: []*kubecontainer.Container{
|
|
|
|
{Name: "foo"},
|
|
|
|
},
|
2016-06-20 22:14:08 +00:00
|
|
|
}},
|
2015-05-09 05:01:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
kubelet.podManager.SetPods(pods)
|
2015-08-19 00:52:26 +00:00
|
|
|
kubelet.HandlePodUpdates(pods)
|
2015-08-18 20:26:56 +00:00
|
|
|
status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.True(t, found, "expected to found status for pod %q", pods[0].UID)
|
|
|
|
assert.NotEqual(t, api.PodFailed, status.Phase)
|
2015-05-09 05:01:43 +00:00
|
|
|
}
|
2015-05-18 20:12:35 +00:00
|
|
|
|
2016-04-27 04:53:07 +00:00
|
|
|
func podWithUidNameNs(uid types.UID, name, namespace string) *api.Pod {
|
|
|
|
return &api.Pod{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
UID: uid,
|
|
|
|
Name: name,
|
|
|
|
Namespace: namespace,
|
|
|
|
Annotations: map[string]string{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func podWithUidNameNsSpec(uid types.UID, name, namespace string, spec api.PodSpec) *api.Pod {
|
|
|
|
pod := podWithUidNameNs(uid, name, namespace)
|
|
|
|
pod.Spec = spec
|
|
|
|
return pod
|
|
|
|
}
|
|
|
|
|
2015-05-18 20:12:35 +00:00
|
|
|
func TestDeletePodDirsForDeletedPods(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-06-16 17:43:32 +00:00
|
|
|
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-05-18 05:05:55 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-05-18 20:12:35 +00:00
|
|
|
kl := testKubelet.kubelet
|
|
|
|
pods := []*api.Pod{
|
2016-04-27 04:53:07 +00:00
|
|
|
podWithUidNameNs("12345678", "pod1", "ns"),
|
|
|
|
podWithUidNameNs("12345679", "pod2", "ns"),
|
2015-05-18 20:12:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
kl.podManager.SetPods(pods)
|
|
|
|
// Sync to create pod directories.
|
2015-08-19 00:52:26 +00:00
|
|
|
kl.HandlePodSyncs(kl.podManager.GetPods())
|
2015-05-18 20:12:35 +00:00
|
|
|
for i := range pods {
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.True(t, dirExists(kl.getPodDir(pods[i].UID)), "Expected directory to exist for pod %d", i)
|
2015-05-18 20:12:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Pod 1 has been deleted and no longer exists.
|
2015-08-19 00:52:26 +00:00
|
|
|
kl.podManager.SetPods([]*api.Pod{pods[0]})
|
|
|
|
kl.HandlePodCleanups()
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.True(t, dirExists(kl.getPodDir(pods[0].UID)), "Expected directory to exist for pod 0")
|
|
|
|
assert.False(t, dirExists(kl.getPodDir(pods[1].UID)), "Expected directory to be deleted for pod 1")
|
2015-05-18 20:12:35 +00:00
|
|
|
}
|
|
|
|
|
2015-07-01 22:25:41 +00:00
|
|
|
func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*api.Pod, podsToCheck []*api.Pod, shouldExist bool) {
|
|
|
|
kl := testKubelet.kubelet
|
|
|
|
|
|
|
|
kl.podManager.SetPods(pods)
|
|
|
|
kl.HandlePodSyncs(pods)
|
|
|
|
kl.HandlePodCleanups()
|
|
|
|
for i, pod := range podsToCheck {
|
|
|
|
exist := dirExists(kl.getPodDir(pod.UID))
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Equal(t, shouldExist, exist, "directory of pod %d", i)
|
2015-07-01 22:25:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-18 20:12:35 +00:00
|
|
|
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-06-16 17:43:32 +00:00
|
|
|
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-05-18 05:05:55 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-05-18 20:12:35 +00:00
|
|
|
kl := testKubelet.kubelet
|
|
|
|
pods := []*api.Pod{
|
2016-04-27 04:53:07 +00:00
|
|
|
podWithUidNameNs("12345678", "pod1", "ns"),
|
|
|
|
podWithUidNameNs("12345679", "pod2", "ns"),
|
|
|
|
podWithUidNameNs("12345680", "pod3", "ns"),
|
2015-05-18 20:12:35 +00:00
|
|
|
}
|
|
|
|
|
2015-07-01 22:25:41 +00:00
|
|
|
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
|
2015-05-18 20:12:35 +00:00
|
|
|
// Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
|
|
|
|
// deleted.
|
|
|
|
kl.statusManager.SetPodStatus(pods[1], api.PodStatus{Phase: api.PodFailed})
|
|
|
|
kl.statusManager.SetPodStatus(pods[2], api.PodStatus{Phase: api.PodSucceeded})
|
2015-07-01 22:25:41 +00:00
|
|
|
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-06-16 17:43:32 +00:00
|
|
|
testKubelet.fakeCadvisor.On("Start").Return(nil)
|
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-05-18 05:05:55 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-10-16 03:00:28 +00:00
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2015-07-01 22:25:41 +00:00
|
|
|
runningPod := &kubecontainer.Pod{
|
|
|
|
ID: "12345678",
|
|
|
|
Name: "pod1",
|
|
|
|
Namespace: "ns",
|
2015-05-18 20:12:35 +00:00
|
|
|
}
|
2016-04-27 04:53:07 +00:00
|
|
|
apiPod := podWithUidNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace)
|
|
|
|
|
2015-07-01 22:25:41 +00:00
|
|
|
// Sync once to create pod directory; confirm that the pod directory has
|
|
|
|
// already been created.
|
|
|
|
pods := []*api.Pod{apiPod}
|
|
|
|
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true)
|
|
|
|
|
|
|
|
// Pretend the pod is deleted from apiserver, but is still active on the node.
|
|
|
|
// The pod directory should not be removed.
|
|
|
|
pods = []*api.Pod{}
|
2016-06-20 22:14:08 +00:00
|
|
|
testKubelet.fakeRuntime.PodList = []*containertest.FakePod{{runningPod, ""}}
|
2015-07-01 22:25:41 +00:00
|
|
|
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true)
|
|
|
|
|
|
|
|
// The pod is deleted and also not active on the node. The pod directory
|
|
|
|
// should be removed.
|
|
|
|
pods = []*api.Pod{}
|
2016-06-20 22:14:08 +00:00
|
|
|
testKubelet.fakeRuntime.PodList = []*containertest.FakePod{}
|
2015-07-01 22:25:41 +00:00
|
|
|
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, false)
|
2015-05-18 20:12:35 +00:00
|
|
|
}
|
2015-08-10 22:08:31 +00:00
|
|
|
|
2015-11-05 05:59:15 +00:00
|
|
|
func TestGetPodsToSync(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2015-11-05 05:59:15 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2016-04-29 01:39:46 +00:00
|
|
|
clock := testKubelet.fakeClock
|
2015-11-05 05:59:15 +00:00
|
|
|
pods := newTestPods(5)
|
|
|
|
|
|
|
|
exceededActiveDeadlineSeconds := int64(30)
|
|
|
|
notYetActiveDeadlineSeconds := int64(120)
|
2016-04-29 01:39:46 +00:00
|
|
|
startTime := unversioned.NewTime(clock.Now())
|
2015-11-05 05:59:15 +00:00
|
|
|
pods[0].Status.StartTime = &startTime
|
|
|
|
pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
|
|
|
|
pods[1].Status.StartTime = &startTime
|
|
|
|
pods[1].Spec.ActiveDeadlineSeconds = ¬YetActiveDeadlineSeconds
|
|
|
|
pods[2].Status.StartTime = &startTime
|
|
|
|
pods[2].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
|
|
|
|
|
|
|
|
kubelet.podManager.SetPods(pods)
|
|
|
|
kubelet.workQueue.Enqueue(pods[2].UID, 0)
|
2016-04-29 01:39:46 +00:00
|
|
|
kubelet.workQueue.Enqueue(pods[3].UID, 30*time.Second)
|
|
|
|
kubelet.workQueue.Enqueue(pods[4].UID, 2*time.Minute)
|
2015-11-05 05:59:15 +00:00
|
|
|
|
2016-04-29 01:39:46 +00:00
|
|
|
clock.Step(1 * time.Minute)
|
|
|
|
|
2016-09-01 19:25:01 +00:00
|
|
|
expected := []*api.Pod{pods[2], pods[3], pods[0]}
|
2015-11-05 05:59:15 +00:00
|
|
|
podsToSync := kubelet.getPodsToSync()
|
2016-09-01 19:25:01 +00:00
|
|
|
sort.Sort(podsByUID(expected))
|
|
|
|
sort.Sort(podsByUID(podsToSync))
|
|
|
|
assert.Equal(t, expected, podsToSync)
|
2015-11-05 05:59:15 +00:00
|
|
|
}
|
2016-01-27 01:46:15 +00:00
|
|
|
|
2016-02-14 09:05:12 +00:00
|
|
|
func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-06-20 22:14:11 +00:00
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2016-02-14 09:05:12 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
numContainers := 10
|
|
|
|
expectedOrder := []string{}
|
|
|
|
cStatuses := []*kubecontainer.ContainerStatus{}
|
|
|
|
specContainerList := []api.Container{}
|
|
|
|
for i := 0; i < numContainers; i++ {
|
|
|
|
id := fmt.Sprintf("%v", i)
|
|
|
|
containerName := fmt.Sprintf("%vcontainer", id)
|
|
|
|
expectedOrder = append(expectedOrder, containerName)
|
|
|
|
cStatus := &kubecontainer.ContainerStatus{
|
|
|
|
ID: kubecontainer.BuildContainerID("test", id),
|
|
|
|
Name: containerName,
|
|
|
|
}
|
|
|
|
// Rearrange container statuses
|
|
|
|
if i%2 == 0 {
|
|
|
|
cStatuses = append(cStatuses, cStatus)
|
|
|
|
} else {
|
|
|
|
cStatuses = append([]*kubecontainer.ContainerStatus{cStatus}, cStatuses...)
|
|
|
|
}
|
|
|
|
specContainerList = append(specContainerList, api.Container{Name: containerName})
|
|
|
|
}
|
2016-04-27 04:53:07 +00:00
|
|
|
pod := podWithUidNameNs("uid1", "foo", "test")
|
|
|
|
pod.Spec = api.PodSpec{
|
|
|
|
Containers: specContainerList,
|
2016-02-14 09:05:12 +00:00
|
|
|
}
|
2016-04-27 04:53:07 +00:00
|
|
|
|
2016-02-14 09:05:12 +00:00
|
|
|
status := &kubecontainer.PodStatus{
|
|
|
|
ID: pod.UID,
|
|
|
|
Name: pod.Name,
|
|
|
|
Namespace: pod.Namespace,
|
|
|
|
ContainerStatuses: cStatuses,
|
|
|
|
}
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
apiStatus := kubelet.generateAPIPodStatus(pod, status)
|
|
|
|
for i, c := range apiStatus.ContainerStatuses {
|
|
|
|
if expectedOrder[i] != c.Name {
|
|
|
|
t.Fatalf("Container status not sorted, expected %v at index %d, but found %v", expectedOrder[i], i, c.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-01 19:25:01 +00:00
|
|
|
func verifyContainerStatuses(t *testing.T, statuses []api.ContainerStatus, state, lastTerminationState map[string]api.ContainerState, message string) {
|
2016-05-19 23:57:36 +00:00
|
|
|
for _, s := range statuses {
|
2016-09-01 19:25:01 +00:00
|
|
|
assert.Equal(t, s.State, state[s.Name], "%s: state", message)
|
|
|
|
assert.Equal(t, s.LastTerminationState, lastTerminationState[s.Name], "%s: last terminated state", message)
|
2016-05-19 23:57:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test generateAPIPodStatus with different reason cache and old api pod status.
|
2016-02-14 09:05:12 +00:00
|
|
|
func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
|
|
|
|
// The following waiting reason and message are generated in convertStatusToAPIStatus()
|
|
|
|
startWaitingReason := "ContainerCreating"
|
2016-05-19 23:57:36 +00:00
|
|
|
initWaitingReason := "PodInitializing"
|
2016-02-14 09:05:12 +00:00
|
|
|
testTimestamp := time.Unix(123456789, 987654321)
|
|
|
|
testErrorReason := fmt.Errorf("test-error")
|
|
|
|
emptyContainerID := (&kubecontainer.ContainerID{}).String()
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-06-20 22:14:11 +00:00
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2016-02-14 09:05:12 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
2016-04-27 04:53:07 +00:00
|
|
|
pod := podWithUidNameNs("12345678", "foo", "new")
|
|
|
|
pod.Spec = api.PodSpec{RestartPolicy: api.RestartPolicyOnFailure}
|
|
|
|
|
2016-02-14 09:05:12 +00:00
|
|
|
podStatus := &kubecontainer.PodStatus{
|
|
|
|
ID: pod.UID,
|
|
|
|
Name: pod.Name,
|
|
|
|
Namespace: pod.Namespace,
|
|
|
|
}
|
|
|
|
tests := []struct {
|
2016-05-19 23:57:36 +00:00
|
|
|
containers []api.Container
|
|
|
|
statuses []*kubecontainer.ContainerStatus
|
|
|
|
reasons map[string]error
|
|
|
|
oldStatuses []api.ContainerStatus
|
|
|
|
expectedState map[string]api.ContainerState
|
|
|
|
// Only set expectedInitState when it is different from expectedState
|
|
|
|
expectedInitState map[string]api.ContainerState
|
2016-02-14 09:05:12 +00:00
|
|
|
expectedLastTerminationState map[string]api.ContainerState
|
|
|
|
}{
|
|
|
|
// For container with no historical record, State should be Waiting, LastTerminationState should be retrieved from
|
|
|
|
// old status from apiserver.
|
|
|
|
{
|
2016-05-19 23:57:36 +00:00
|
|
|
containers: []api.Container{{Name: "without-old-record"}, {Name: "with-old-record"}},
|
|
|
|
statuses: []*kubecontainer.ContainerStatus{},
|
|
|
|
reasons: map[string]error{},
|
|
|
|
oldStatuses: []api.ContainerStatus{{
|
2016-02-14 09:05:12 +00:00
|
|
|
Name: "with-old-record",
|
|
|
|
LastTerminationState: api.ContainerState{Terminated: &api.ContainerStateTerminated{}},
|
|
|
|
}},
|
2016-05-19 23:57:36 +00:00
|
|
|
expectedState: map[string]api.ContainerState{
|
2016-02-14 09:05:12 +00:00
|
|
|
"without-old-record": {Waiting: &api.ContainerStateWaiting{
|
|
|
|
Reason: startWaitingReason,
|
|
|
|
}},
|
|
|
|
"with-old-record": {Waiting: &api.ContainerStateWaiting{
|
|
|
|
Reason: startWaitingReason,
|
|
|
|
}},
|
|
|
|
},
|
2016-05-19 23:57:36 +00:00
|
|
|
expectedInitState: map[string]api.ContainerState{
|
|
|
|
"without-old-record": {Waiting: &api.ContainerStateWaiting{
|
|
|
|
Reason: initWaitingReason,
|
|
|
|
}},
|
|
|
|
"with-old-record": {Waiting: &api.ContainerStateWaiting{
|
|
|
|
Reason: initWaitingReason,
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
expectedLastTerminationState: map[string]api.ContainerState{
|
2016-02-14 09:05:12 +00:00
|
|
|
"with-old-record": {Terminated: &api.ContainerStateTerminated{}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// For running container, State should be Running, LastTerminationState should be retrieved from latest terminated status.
|
|
|
|
{
|
2016-05-19 23:57:36 +00:00
|
|
|
containers: []api.Container{{Name: "running"}},
|
|
|
|
statuses: []*kubecontainer.ContainerStatus{
|
2016-02-14 09:05:12 +00:00
|
|
|
{
|
|
|
|
Name: "running",
|
|
|
|
State: kubecontainer.ContainerStateRunning,
|
|
|
|
StartedAt: testTimestamp,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "running",
|
|
|
|
State: kubecontainer.ContainerStateExited,
|
|
|
|
ExitCode: 1,
|
|
|
|
},
|
|
|
|
},
|
2016-05-19 23:57:36 +00:00
|
|
|
reasons: map[string]error{},
|
|
|
|
oldStatuses: []api.ContainerStatus{},
|
|
|
|
expectedState: map[string]api.ContainerState{
|
2016-02-14 09:05:12 +00:00
|
|
|
"running": {Running: &api.ContainerStateRunning{
|
|
|
|
StartedAt: unversioned.NewTime(testTimestamp),
|
|
|
|
}},
|
|
|
|
},
|
2016-05-19 23:57:36 +00:00
|
|
|
expectedLastTerminationState: map[string]api.ContainerState{
|
2016-02-14 09:05:12 +00:00
|
|
|
"running": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 1,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// For terminated container:
|
|
|
|
// * If there is no recent start error record, State should be Terminated, LastTerminationState should be retrieved from
|
|
|
|
// second latest terminated status;
|
|
|
|
// * If there is recent start error record, State should be Waiting, LastTerminationState should be retrieved from latest
|
|
|
|
// terminated status;
|
|
|
|
// * If ExitCode = 0, restart policy is RestartPolicyOnFailure, the container shouldn't be restarted. No matter there is
|
|
|
|
// recent start error or not, State should be Terminated, LastTerminationState should be retrieved from second latest
|
|
|
|
// terminated status.
|
|
|
|
{
|
2016-05-19 23:57:36 +00:00
|
|
|
containers: []api.Container{{Name: "without-reason"}, {Name: "with-reason"}},
|
|
|
|
statuses: []*kubecontainer.ContainerStatus{
|
2016-02-14 09:05:12 +00:00
|
|
|
{
|
|
|
|
Name: "without-reason",
|
|
|
|
State: kubecontainer.ContainerStateExited,
|
|
|
|
ExitCode: 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "with-reason",
|
|
|
|
State: kubecontainer.ContainerStateExited,
|
|
|
|
ExitCode: 2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "without-reason",
|
|
|
|
State: kubecontainer.ContainerStateExited,
|
|
|
|
ExitCode: 3,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "with-reason",
|
|
|
|
State: kubecontainer.ContainerStateExited,
|
|
|
|
ExitCode: 4,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "succeed",
|
|
|
|
State: kubecontainer.ContainerStateExited,
|
|
|
|
ExitCode: 0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "succeed",
|
|
|
|
State: kubecontainer.ContainerStateExited,
|
|
|
|
ExitCode: 5,
|
|
|
|
},
|
|
|
|
},
|
2016-05-19 23:57:36 +00:00
|
|
|
reasons: map[string]error{"with-reason": testErrorReason, "succeed": testErrorReason},
|
|
|
|
oldStatuses: []api.ContainerStatus{},
|
|
|
|
expectedState: map[string]api.ContainerState{
|
2016-02-14 09:05:12 +00:00
|
|
|
"without-reason": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 1,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
"with-reason": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
|
|
|
|
"succeed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 0,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
},
|
2016-05-19 23:57:36 +00:00
|
|
|
expectedLastTerminationState: map[string]api.ContainerState{
|
2016-02-14 09:05:12 +00:00
|
|
|
"without-reason": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 3,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
"with-reason": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 2,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
"succeed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 5,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, test := range tests {
|
|
|
|
kubelet.reasonCache = NewReasonCache()
|
|
|
|
for n, e := range test.reasons {
|
|
|
|
kubelet.reasonCache.add(pod.UID, n, e, "")
|
|
|
|
}
|
|
|
|
pod.Spec.Containers = test.containers
|
|
|
|
pod.Status.ContainerStatuses = test.oldStatuses
|
|
|
|
podStatus.ContainerStatuses = test.statuses
|
|
|
|
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
|
2016-09-01 19:25:01 +00:00
|
|
|
verifyContainerStatuses(t, apiStatus.ContainerStatuses, test.expectedState, test.expectedLastTerminationState, fmt.Sprintf("case %d", i))
|
2016-05-19 23:57:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Everything should be the same for init containers
|
|
|
|
for i, test := range tests {
|
|
|
|
kubelet.reasonCache = NewReasonCache()
|
|
|
|
for n, e := range test.reasons {
|
|
|
|
kubelet.reasonCache.add(pod.UID, n, e, "")
|
|
|
|
}
|
|
|
|
pod.Spec.InitContainers = test.containers
|
|
|
|
pod.Status.InitContainerStatuses = test.oldStatuses
|
|
|
|
podStatus.ContainerStatuses = test.statuses
|
|
|
|
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
|
|
|
|
expectedState := test.expectedState
|
|
|
|
if test.expectedInitState != nil {
|
|
|
|
expectedState = test.expectedInitState
|
|
|
|
}
|
2016-09-01 19:25:01 +00:00
|
|
|
verifyContainerStatuses(t, apiStatus.InitContainerStatuses, expectedState, test.expectedLastTerminationState, fmt.Sprintf("case %d", i))
|
2016-05-19 23:57:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test generateAPIPodStatus with different restart policies.
|
|
|
|
func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
|
|
|
|
testErrorReason := fmt.Errorf("test-error")
|
|
|
|
emptyContainerID := (&kubecontainer.ContainerID{}).String()
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-06-20 22:14:11 +00:00
|
|
|
testKubelet.fakeCadvisor.On("VersionInfo").Return(&cadvisorapi.VersionInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2016-05-19 23:57:36 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
pod := podWithUidNameNs("12345678", "foo", "new")
|
|
|
|
containers := []api.Container{{Name: "succeed"}, {Name: "failed"}}
|
|
|
|
podStatus := &kubecontainer.PodStatus{
|
|
|
|
ID: pod.UID,
|
|
|
|
Name: pod.Name,
|
|
|
|
Namespace: pod.Namespace,
|
|
|
|
ContainerStatuses: []*kubecontainer.ContainerStatus{
|
|
|
|
{
|
|
|
|
Name: "succeed",
|
|
|
|
State: kubecontainer.ContainerStateExited,
|
|
|
|
ExitCode: 0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "failed",
|
|
|
|
State: kubecontainer.ContainerStateExited,
|
|
|
|
ExitCode: 1,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "succeed",
|
|
|
|
State: kubecontainer.ContainerStateExited,
|
|
|
|
ExitCode: 2,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "failed",
|
|
|
|
State: kubecontainer.ContainerStateExited,
|
|
|
|
ExitCode: 3,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
kubelet.reasonCache.add(pod.UID, "succeed", testErrorReason, "")
|
|
|
|
kubelet.reasonCache.add(pod.UID, "failed", testErrorReason, "")
|
|
|
|
for c, test := range []struct {
|
|
|
|
restartPolicy api.RestartPolicy
|
|
|
|
expectedState map[string]api.ContainerState
|
|
|
|
expectedLastTerminationState map[string]api.ContainerState
|
|
|
|
// Only set expectedInitState when it is different from expectedState
|
|
|
|
expectedInitState map[string]api.ContainerState
|
|
|
|
// Only set expectedInitLastTerminationState when it is different from expectedLastTerminationState
|
|
|
|
expectedInitLastTerminationState map[string]api.ContainerState
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
restartPolicy: api.RestartPolicyNever,
|
|
|
|
expectedState: map[string]api.ContainerState{
|
|
|
|
"succeed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 0,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
"failed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 1,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
expectedLastTerminationState: map[string]api.ContainerState{
|
|
|
|
"succeed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 2,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
"failed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 3,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
restartPolicy: api.RestartPolicyOnFailure,
|
|
|
|
expectedState: map[string]api.ContainerState{
|
|
|
|
"succeed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 0,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
"failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
|
|
|
|
},
|
|
|
|
expectedLastTerminationState: map[string]api.ContainerState{
|
|
|
|
"succeed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 2,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
"failed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 1,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
restartPolicy: api.RestartPolicyAlways,
|
|
|
|
expectedState: map[string]api.ContainerState{
|
|
|
|
"succeed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
|
|
|
|
"failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
|
|
|
|
},
|
|
|
|
expectedLastTerminationState: map[string]api.ContainerState{
|
|
|
|
"succeed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 0,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
"failed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 1,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
// If the init container is terminated with exit code 0, it won't be restarted even when the
|
|
|
|
// restart policy is RestartAlways.
|
|
|
|
expectedInitState: map[string]api.ContainerState{
|
|
|
|
"succeed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 0,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
"failed": {Waiting: &api.ContainerStateWaiting{Reason: testErrorReason.Error()}},
|
|
|
|
},
|
|
|
|
expectedInitLastTerminationState: map[string]api.ContainerState{
|
|
|
|
"succeed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 2,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
"failed": {Terminated: &api.ContainerStateTerminated{
|
|
|
|
ExitCode: 1,
|
|
|
|
ContainerID: emptyContainerID,
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
} {
|
|
|
|
pod.Spec.RestartPolicy = test.restartPolicy
|
|
|
|
// Test normal containers
|
|
|
|
pod.Spec.Containers = containers
|
|
|
|
apiStatus := kubelet.generateAPIPodStatus(pod, podStatus)
|
|
|
|
expectedState, expectedLastTerminationState := test.expectedState, test.expectedLastTerminationState
|
2016-09-01 19:25:01 +00:00
|
|
|
verifyContainerStatuses(t, apiStatus.ContainerStatuses, expectedState, expectedLastTerminationState, fmt.Sprintf("case %d", c))
|
2016-05-19 23:57:36 +00:00
|
|
|
pod.Spec.Containers = nil
|
|
|
|
|
|
|
|
// Test init containers
|
|
|
|
pod.Spec.InitContainers = containers
|
|
|
|
apiStatus = kubelet.generateAPIPodStatus(pod, podStatus)
|
|
|
|
if test.expectedInitState != nil {
|
|
|
|
expectedState = test.expectedInitState
|
|
|
|
}
|
|
|
|
if test.expectedInitLastTerminationState != nil {
|
|
|
|
expectedLastTerminationState = test.expectedInitLastTerminationState
|
|
|
|
}
|
2016-09-01 19:25:01 +00:00
|
|
|
verifyContainerStatuses(t, apiStatus.InitContainerStatuses, expectedState, expectedLastTerminationState, fmt.Sprintf("case %d", c))
|
2016-05-19 23:57:36 +00:00
|
|
|
pod.Spec.InitContainers = nil
|
2016-02-14 09:05:12 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-15 18:17:17 +00:00
|
|
|
|
|
|
|
// testPodAdmitHandler is a lifecycle.PodAdmitHandler for testing.
|
|
|
|
type testPodAdmitHandler struct {
|
|
|
|
// list of pods to reject.
|
|
|
|
podsToReject []*api.Pod
|
|
|
|
}
|
|
|
|
|
|
|
|
// Admit rejects all pods in the podsToReject list with a matching UID.
|
|
|
|
func (a *testPodAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
|
|
|
|
for _, podToReject := range a.podsToReject {
|
|
|
|
if podToReject.UID == attrs.Pod.UID {
|
|
|
|
return lifecycle.PodAdmitResult{Admit: false, Reason: "Rejected", Message: "Pod is rejected"}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return lifecycle.PodAdmitResult{Admit: true}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test verifies that the kubelet invokes an admission handler during HandlePodAdditions.
|
|
|
|
func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-04-15 18:17:17 +00:00
|
|
|
kl := testKubelet.kubelet
|
|
|
|
kl.nodeLister = testNodeLister{nodes: []api.Node{
|
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
|
|
|
|
Status: api.NodeStatus{
|
|
|
|
Allocatable: api.ResourceList{
|
|
|
|
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}}
|
|
|
|
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
|
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
|
|
|
|
Status: api.NodeStatus{
|
|
|
|
Allocatable: api.ResourceList{
|
|
|
|
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}}
|
|
|
|
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
2016-05-18 05:05:55 +00:00
|
|
|
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
2016-04-15 18:17:17 +00:00
|
|
|
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
|
|
|
|
|
|
|
pods := []*api.Pod{
|
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
UID: "123456789",
|
|
|
|
Name: "podA",
|
|
|
|
Namespace: "foo",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
UID: "987654321",
|
|
|
|
Name: "podB",
|
|
|
|
Namespace: "foo",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
podToReject := pods[0]
|
|
|
|
podToAdmit := pods[1]
|
|
|
|
podsToReject := []*api.Pod{podToReject}
|
|
|
|
|
|
|
|
kl.AddPodAdmitHandler(&testPodAdmitHandler{podsToReject: podsToReject})
|
|
|
|
|
|
|
|
kl.HandlePodAdditions(pods)
|
|
|
|
// Check pod status stored in the status map.
|
|
|
|
// podToReject should be Failed
|
|
|
|
status, found := kl.statusManager.GetPodStatus(podToReject.UID)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.True(t, found, "Status of pod %q is not found in the status map", podToAdmit.UID)
|
|
|
|
require.Equal(t, api.PodFailed, status.Phase)
|
|
|
|
|
2016-04-15 18:17:17 +00:00
|
|
|
// podToAdmit should be Pending
|
|
|
|
status, found = kl.statusManager.GetPodStatus(podToAdmit.UID)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.True(t, found, "Status of pod %q is not found in the status map", podToAdmit.UID)
|
|
|
|
require.Equal(t, api.PodPending, status.Phase)
|
2016-04-15 18:17:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// testPodSyncLoopHandler is a lifecycle.PodSyncLoopHandler that is used for testing.
|
|
|
|
type testPodSyncLoopHandler struct {
|
|
|
|
// list of pods to sync
|
|
|
|
podsToSync []*api.Pod
|
|
|
|
}
|
|
|
|
|
|
|
|
// ShouldSync evaluates if the pod should be synced from the kubelet.
|
|
|
|
func (a *testPodSyncLoopHandler) ShouldSync(pod *api.Pod) bool {
|
|
|
|
for _, podToSync := range a.podsToSync {
|
|
|
|
if podToSync.UID == pod.UID {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestGetPodsToSyncInvokesPodSyncLoopHandlers ensures that the get pods to sync routine invokes the handler.
|
|
|
|
func TestGetPodsToSyncInvokesPodSyncLoopHandlers(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-04-15 18:17:17 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
pods := newTestPods(5)
|
2016-09-01 19:25:01 +00:00
|
|
|
expected := []*api.Pod{pods[0]}
|
|
|
|
kubelet.AddPodSyncLoopHandler(&testPodSyncLoopHandler{expected})
|
2016-04-15 18:17:17 +00:00
|
|
|
kubelet.podManager.SetPods(pods)
|
|
|
|
|
2016-09-01 19:25:01 +00:00
|
|
|
podsToSync := kubelet.getPodsToSync()
|
|
|
|
sort.Sort(podsByUID(expected))
|
|
|
|
sort.Sort(podsByUID(podsToSync))
|
|
|
|
assert.Equal(t, expected, podsToSync)
|
2016-04-15 18:17:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// testPodSyncHandler is a lifecycle.PodSyncHandler that is used for testing.
|
|
|
|
type testPodSyncHandler struct {
|
|
|
|
// list of pods to evict.
|
|
|
|
podsToEvict []*api.Pod
|
|
|
|
// the reason for the eviction
|
|
|
|
reason string
|
2016-08-02 22:13:54 +00:00
|
|
|
// the message for the eviction
|
2016-04-15 18:17:17 +00:00
|
|
|
message string
|
|
|
|
}
|
|
|
|
|
|
|
|
// ShouldEvict evaluates if the pod should be evicted from the kubelet.
|
|
|
|
func (a *testPodSyncHandler) ShouldEvict(pod *api.Pod) lifecycle.ShouldEvictResponse {
|
|
|
|
for _, podToEvict := range a.podsToEvict {
|
|
|
|
if podToEvict.UID == pod.UID {
|
|
|
|
return lifecycle.ShouldEvictResponse{Evict: true, Reason: a.reason, Message: a.message}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return lifecycle.ShouldEvictResponse{Evict: false}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestGenerateAPIPodStatusInvokesPodSyncHandlers invokes the handlers and reports the proper status
|
|
|
|
func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-04-15 18:17:17 +00:00
|
|
|
kubelet := testKubelet.kubelet
|
|
|
|
pod := newTestPods(1)[0]
|
|
|
|
podsToEvict := []*api.Pod{pod}
|
|
|
|
kubelet.AddPodSyncHandler(&testPodSyncHandler{podsToEvict, "Evicted", "because"})
|
|
|
|
status := &kubecontainer.PodStatus{
|
|
|
|
ID: pod.UID,
|
|
|
|
Name: pod.Name,
|
|
|
|
Namespace: pod.Namespace,
|
|
|
|
}
|
|
|
|
apiStatus := kubelet.generateAPIPodStatus(pod, status)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.Equal(t, api.PodFailed, apiStatus.Phase)
|
|
|
|
require.Equal(t, "Evicted", apiStatus.Reason)
|
|
|
|
require.Equal(t, "because", apiStatus.Message)
|
2016-04-15 18:17:17 +00:00
|
|
|
}
|
2016-05-06 18:07:24 +00:00
|
|
|
|
|
|
|
func TestSyncPodKillPod(t *testing.T) {
|
2016-05-30 02:22:22 +00:00
|
|
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
2016-05-06 18:07:24 +00:00
|
|
|
kl := testKubelet.kubelet
|
|
|
|
pod := &api.Pod{
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
UID: "12345678",
|
|
|
|
Name: "bar",
|
|
|
|
Namespace: "foo",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
pods := []*api.Pod{pod}
|
|
|
|
kl.podManager.SetPods(pods)
|
|
|
|
gracePeriodOverride := int64(0)
|
|
|
|
err := kl.syncPod(syncPodOptions{
|
|
|
|
pod: pod,
|
|
|
|
podStatus: &kubecontainer.PodStatus{},
|
|
|
|
updateType: kubetypes.SyncPodKill,
|
|
|
|
killPodOptions: &KillPodOptions{
|
|
|
|
PodStatusFunc: func(p *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus {
|
|
|
|
return api.PodStatus{
|
|
|
|
Phase: api.PodFailed,
|
|
|
|
Reason: "reason",
|
|
|
|
Message: "message",
|
|
|
|
}
|
|
|
|
},
|
|
|
|
PodTerminationGracePeriodSecondsOverride: &gracePeriodOverride,
|
|
|
|
},
|
|
|
|
})
|
2016-09-01 19:25:01 +00:00
|
|
|
require.NoError(t, err)
|
2016-05-06 18:07:24 +00:00
|
|
|
// Check pod status stored in the status map.
|
|
|
|
status, found := kl.statusManager.GetPodStatus(pod.UID)
|
2016-09-01 19:25:01 +00:00
|
|
|
require.True(t, found, "Status of pod %q is not found in the status map", pod.UID)
|
|
|
|
require.Equal(t, api.PodFailed, status.Phase)
|
2016-05-06 18:07:24 +00:00
|
|
|
}
|
2016-05-30 02:22:22 +00:00
|
|
|
|
|
|
|
func waitForVolumeUnmount(
|
|
|
|
volumeManager kubeletvolume.VolumeManager,
|
|
|
|
pod *api.Pod) error {
|
|
|
|
var podVolumes kubecontainer.VolumeMap
|
|
|
|
err := retryWithExponentialBackOff(
|
|
|
|
time.Duration(50*time.Millisecond),
|
|
|
|
func() (bool, error) {
|
|
|
|
// Verify volumes detached
|
|
|
|
podVolumes = volumeManager.GetMountedVolumesForPod(
|
|
|
|
volumehelper.GetUniquePodName(pod))
|
|
|
|
|
|
|
|
if len(podVolumes) != 0 {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Expected volumes to be unmounted. But some volumes are still mounted: %#v", podVolumes)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func waitForVolumeDetach(
|
2016-06-27 00:33:01 +00:00
|
|
|
volumeName api.UniqueVolumeName,
|
2016-05-30 02:22:22 +00:00
|
|
|
volumeManager kubeletvolume.VolumeManager) error {
|
|
|
|
attachedVolumes := []api.UniqueVolumeName{}
|
|
|
|
err := retryWithExponentialBackOff(
|
|
|
|
time.Duration(50*time.Millisecond),
|
|
|
|
func() (bool, error) {
|
|
|
|
// Verify volumes detached
|
2016-06-27 00:33:01 +00:00
|
|
|
volumeAttached := volumeManager.VolumeIsAttached(volumeName)
|
|
|
|
return !volumeAttached, nil
|
2016-05-30 02:22:22 +00:00
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Expected volumes to be detached. But some volumes are still attached: %#v", attachedVolumes)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
|
|
|
|
backoff := wait.Backoff{
|
|
|
|
Duration: initialDuration,
|
|
|
|
Factor: 3,
|
|
|
|
Jitter: 0,
|
|
|
|
Steps: 6,
|
|
|
|
}
|
|
|
|
return wait.ExponentialBackoff(backoff, fn)
|
|
|
|
}
|
2016-06-27 00:33:01 +00:00
|
|
|
|
|
|
|
func simulateVolumeInUseUpdate(
|
|
|
|
volumeName api.UniqueVolumeName,
|
|
|
|
stopCh <-chan struct{},
|
|
|
|
volumeManager kubeletvolume.VolumeManager) {
|
|
|
|
ticker := time.NewTicker(100 * time.Millisecond)
|
|
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
volumeManager.MarkVolumesAsReportedInUse(
|
|
|
|
[]api.UniqueVolumeName{volumeName})
|
|
|
|
case <-stopCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-06-23 19:46:21 +00:00
|
|
|
|
|
|
|
func runVolumeManager(kubelet *Kubelet) chan struct{} {
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
go kubelet.volumeManager.Run(kubelet.sourcesReady, stopCh)
|
|
|
|
return stopCh
|
|
|
|
}
|
2016-09-01 19:25:01 +00:00
|
|
|
|
|
|
|
// Sort pods by UID.
|
|
|
|
type podsByUID []*api.Pod
|
|
|
|
|
|
|
|
func (p podsByUID) Len() int { return len(p) }
|
|
|
|
func (p podsByUID) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
|
|
|
func (p podsByUID) Less(i, j int) bool { return p[i].UID < p[j].UID }
|