mirror of https://github.com/k3s-io/k3s
Kubelet: replace DockerManager with the Runtime interface
This change instructs kubelet to switch to using the Runtime interface. In order to do it, the change moves the Prober instantiation to DockerManager. Note that most of the tests in kubelet_test.go needs to be migrated to dockertools. For now, we use type assertion to convert the Runtime interface to DockerManager in most tests.pull/6/head
parent
e87d735304
commit
1ad4dd7803
|
@ -28,7 +28,6 @@ import (
|
|||
"github.com/GoogleCloudPlatform/kubernetes/pkg/credentialprovider"
|
||||
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
|
||||
kubeletProber "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/prober"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
|
@ -553,7 +552,7 @@ func TestFindContainersByPod(t *testing.T) {
|
|||
}
|
||||
fakeClient := &FakeDockerClient{}
|
||||
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
|
||||
containerManager := NewDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, &kubeletProber.FakeProber{}, nil, nil, nil)
|
||||
containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, nil)
|
||||
for i, test := range tests {
|
||||
fakeClient.ContainerList = test.containerList
|
||||
fakeClient.ExitedContainerList = test.exitedContainerList
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockertools
|
||||
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
||||
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/prober"
|
||||
kubeletTypes "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
func NewFakeDockerManager(
|
||||
client DockerInterface,
|
||||
recorder record.EventRecorder,
|
||||
readinessManager *kubecontainer.ReadinessManager,
|
||||
containerRefManager *kubecontainer.RefManager,
|
||||
podInfraContainerImage string,
|
||||
qps float32,
|
||||
burst int,
|
||||
containerLogsDir string,
|
||||
osInterface kubecontainer.OSInterface,
|
||||
networkPlugin network.NetworkPlugin,
|
||||
generator kubecontainer.RunContainerOptionsGenerator,
|
||||
httpClient kubeletTypes.HttpGetter,
|
||||
runtimeHooks kubecontainer.RuntimeHooks) *DockerManager {
|
||||
|
||||
dm := NewDockerManager(client, recorder, readinessManager, containerRefManager, podInfraContainerImage, qps,
|
||||
burst, containerLogsDir, osInterface, networkPlugin, generator, httpClient, runtimeHooks)
|
||||
dm.Puller = &FakeDockerPuller{}
|
||||
dm.prober = prober.New(nil, readinessManager, containerRefManager, recorder)
|
||||
return dm
|
||||
}
|
|
@ -91,10 +91,8 @@ type DockerManager struct {
|
|||
// Network plugin.
|
||||
networkPlugin network.NetworkPlugin
|
||||
|
||||
// TODO(vmarmol): Make this non-public when we remove the circular dependency
|
||||
// with prober.
|
||||
// Health check prober.
|
||||
Prober prober.Prober
|
||||
prober prober.Prober
|
||||
|
||||
// Generator of runtime container options.
|
||||
generator kubecontainer.RunContainerOptionsGenerator
|
||||
|
@ -117,7 +115,6 @@ func NewDockerManager(
|
|||
containerLogsDir string,
|
||||
osInterface kubecontainer.OSInterface,
|
||||
networkPlugin network.NetworkPlugin,
|
||||
prober prober.Prober,
|
||||
generator kubecontainer.RunContainerOptionsGenerator,
|
||||
httpClient kubeletTypes.HttpGetter,
|
||||
runtimeHooks kubecontainer.RuntimeHooks) *DockerManager {
|
||||
|
@ -164,11 +161,13 @@ func NewDockerManager(
|
|||
dockerRoot: dockerRoot,
|
||||
containerLogsDir: containerLogsDir,
|
||||
networkPlugin: networkPlugin,
|
||||
Prober: prober,
|
||||
prober: nil,
|
||||
generator: generator,
|
||||
runtimeHooks: runtimeHooks,
|
||||
}
|
||||
dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm)
|
||||
dm.prober = prober.New(dm, readinessManager, containerRefManager, recorder)
|
||||
|
||||
return dm
|
||||
}
|
||||
|
||||
|
@ -1305,7 +1304,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
|
|||
continue
|
||||
}
|
||||
|
||||
result, err := dm.Prober.Probe(pod, podStatus, container, string(c.ID), c.Created)
|
||||
result, err := dm.prober.Probe(pod, podStatus, container, string(c.ID), c.Created)
|
||||
if err != nil {
|
||||
// TODO(vmarmol): examine this logic.
|
||||
glog.V(2).Infof("probe no-error: %q", container.Name)
|
||||
|
|
|
@ -17,28 +17,32 @@ limitations under the License.
|
|||
package dockertools
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
||||
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
|
||||
kubeprober "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/prober"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/probe"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
uexec "github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
func NewFakeDockerManager() (*DockerManager, *FakeDockerClient) {
|
||||
func newTestDockerManager() (*DockerManager, *FakeDockerClient) {
|
||||
fakeDocker := &FakeDockerClient{Errors: make(map[string]error), RemovedImages: util.StringSet{}}
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
readinessManager := kubecontainer.NewReadinessManager()
|
||||
containerRefManager := kubecontainer.NewRefManager()
|
||||
networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
|
||||
|
||||
dockerManager := NewDockerManager(
|
||||
dockerManager := NewFakeDockerManager(
|
||||
fakeDocker,
|
||||
fakeRecorder,
|
||||
readinessManager,
|
||||
|
@ -49,7 +53,6 @@ func NewFakeDockerManager() (*DockerManager, *FakeDockerClient) {
|
|||
networkPlugin,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil)
|
||||
|
||||
return dockerManager, fakeDocker
|
||||
|
@ -142,7 +145,7 @@ func verifyPods(a, b []*kubecontainer.Pod) bool {
|
|||
}
|
||||
|
||||
func TestGetPods(t *testing.T) {
|
||||
manager, fakeDocker := NewFakeDockerManager()
|
||||
manager, fakeDocker := newTestDockerManager()
|
||||
dockerContainers := []docker.APIContainers{
|
||||
{
|
||||
ID: "1111",
|
||||
|
@ -195,7 +198,7 @@ func TestGetPods(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestListImages(t *testing.T) {
|
||||
manager, fakeDocker := NewFakeDockerManager()
|
||||
manager, fakeDocker := newTestDockerManager()
|
||||
dockerImages := []docker.APIImages{{ID: "1111"}, {ID: "2222"}, {ID: "3333"}}
|
||||
expected := util.NewStringSet([]string{"1111", "2222", "3333"}...)
|
||||
|
||||
|
@ -250,7 +253,7 @@ func dockerContainersToPod(containers DockerContainers) kubecontainer.Pod {
|
|||
}
|
||||
|
||||
func TestKillContainerInPod(t *testing.T) {
|
||||
manager, fakeDocker := NewFakeDockerManager()
|
||||
manager, fakeDocker := newTestDockerManager()
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -296,7 +299,7 @@ func TestKillContainerInPod(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestKillContainerInPodWithError(t *testing.T) {
|
||||
manager, fakeDocker := NewFakeDockerManager()
|
||||
manager, fakeDocker := newTestDockerManager()
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -338,3 +341,281 @@ func TestKillContainerInPodWithError(t *testing.T) {
|
|||
t.Errorf("exepcted container entry ID '%v' to be found. states: %+v", containerToSpare.ID, ready)
|
||||
}
|
||||
}
|
||||
|
||||
type fakeExecProber struct {
|
||||
result probe.Result
|
||||
err error
|
||||
}
|
||||
|
||||
func (p fakeExecProber) Probe(_ uexec.Cmd) (probe.Result, error) {
|
||||
return p.result, p.err
|
||||
}
|
||||
|
||||
func replaceProber(dm *DockerManager, result probe.Result, err error) {
|
||||
fakeExec := fakeExecProber{
|
||||
result: result,
|
||||
err: err,
|
||||
}
|
||||
|
||||
dm.prober = kubeprober.NewTestProber(fakeExec, dm.readinessManager, dm.containerRefManager, &record.FakeRecorder{})
|
||||
return
|
||||
}
|
||||
|
||||
// TestProbeContainer tests the functionality of probeContainer.
|
||||
// Test cases are:
|
||||
//
|
||||
// No probe.
|
||||
// Only LivenessProbe.
|
||||
// Only ReadinessProbe.
|
||||
// Both probes.
|
||||
//
|
||||
// Also, for each probe, there will be several cases covering whether the initial
|
||||
// delay has passed, whether the probe handler will return Success, Failure,
|
||||
// Unknown or error.
|
||||
//
|
||||
func TestProbeContainer(t *testing.T) {
|
||||
manager, _ := newTestDockerManager()
|
||||
dc := &docker.APIContainers{
|
||||
ID: "foobar",
|
||||
Created: time.Now().Unix(),
|
||||
}
|
||||
tests := []struct {
|
||||
testContainer api.Container
|
||||
expectError bool
|
||||
expectedResult probe.Result
|
||||
expectedReadiness bool
|
||||
}{
|
||||
// No probes.
|
||||
{
|
||||
testContainer: api.Container{},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
// Only LivenessProbe.
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{InitialDelaySeconds: 100},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
},
|
||||
expectedResult: probe.Unknown,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Failure,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Unknown,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
expectedResult: probe.Unknown,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
// Only ReadinessProbe.
|
||||
{
|
||||
testContainer: api.Container{
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
ReadinessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
ReadinessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
ReadinessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
ReadinessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
// Both LivenessProbe and ReadinessProbe.
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{InitialDelaySeconds: 100},
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{InitialDelaySeconds: 100},
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
|
||||
},
|
||||
expectedResult: probe.Unknown,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
},
|
||||
expectedResult: probe.Unknown,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
},
|
||||
expectedResult: probe.Unknown,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
},
|
||||
expectedResult: probe.Failure,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
ReadinessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if test.expectError {
|
||||
replaceProber(manager, test.expectedResult, errors.New("error"))
|
||||
} else {
|
||||
replaceProber(manager, test.expectedResult, nil)
|
||||
}
|
||||
result, err := manager.prober.Probe(&api.Pod{}, api.PodStatus{}, test.testContainer, dc.ID, dc.Created)
|
||||
if test.expectError && err == nil {
|
||||
t.Error("Expected error but did no error was returned.")
|
||||
}
|
||||
if !test.expectError && err != nil {
|
||||
t.Errorf("Expected error but got: %v", err)
|
||||
}
|
||||
if test.expectedResult != result {
|
||||
t.Errorf("Expected result was %v but probeContainer() returned %v", test.expectedResult, result)
|
||||
}
|
||||
if test.expectedReadiness != manager.readinessManager.GetReadiness(dc.ID) {
|
||||
t.Errorf("Expected readiness was %v but probeContainer() set %v", test.expectedReadiness, manager.readinessManager.GetReadiness(dc.ID))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -242,15 +242,11 @@ func NewMainKubelet(
|
|||
klet.networkPlugin = plug
|
||||
}
|
||||
|
||||
// TODO(vmarmol,yjhong): Use container runtime.
|
||||
// Initialize the runtime.
|
||||
switch containerRuntime {
|
||||
case "docker":
|
||||
// Only supported one for now, continue.
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported container runtime %q specified", containerRuntime)
|
||||
}
|
||||
containerManager := dockertools.NewDockerManager(
|
||||
klet.containerRuntime = dockertools.NewDockerManager(
|
||||
dockerClient,
|
||||
recorder,
|
||||
readinessManager,
|
||||
|
@ -261,20 +257,17 @@ func NewMainKubelet(
|
|||
containerLogsDir,
|
||||
osInterface,
|
||||
klet.networkPlugin,
|
||||
nil,
|
||||
klet,
|
||||
klet.httpClient,
|
||||
newKubeletRuntimeHooks(recorder))
|
||||
klet.runner = containerManager
|
||||
klet.containerManager = containerManager
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported container runtime %q specified", containerRuntime)
|
||||
}
|
||||
|
||||
klet.runner = klet.containerRuntime
|
||||
klet.podManager = newBasicPodManager(klet.kubeClient)
|
||||
klet.prober = prober.New(klet.runner, klet.readinessManager, klet.containerRefManager, klet.recorder)
|
||||
|
||||
// TODO(vmarmol): Remove when the circular dependency is removed :(
|
||||
containerManager.Prober = klet.prober
|
||||
|
||||
runtimeCache, err := kubecontainer.NewRuntimeCache(containerManager)
|
||||
runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -352,9 +345,6 @@ type Kubelet struct {
|
|||
// Network plugin.
|
||||
networkPlugin network.NetworkPlugin
|
||||
|
||||
// Healthy check prober.
|
||||
prober prober.Prober
|
||||
|
||||
// Container readiness state manager.
|
||||
readinessManager *kubecontainer.ReadinessManager
|
||||
|
||||
|
@ -386,8 +376,8 @@ type Kubelet struct {
|
|||
// Reference to this node.
|
||||
nodeRef *api.ObjectReference
|
||||
|
||||
// Manage containers.
|
||||
containerManager *dockertools.DockerManager
|
||||
// Container runtime.
|
||||
containerRuntime kubecontainer.Runtime
|
||||
|
||||
// nodeStatusUpdateFrequency specifies how often kubelet posts node status to master.
|
||||
// Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod
|
||||
|
@ -876,7 +866,7 @@ func parseResolvConf(reader io.Reader) (nameservers []string, searches []string,
|
|||
|
||||
// Kill all running containers in a pod (includes the pod infra container).
|
||||
func (kl *Kubelet) killPod(pod kubecontainer.Pod) error {
|
||||
return kl.containerManager.KillPod(pod)
|
||||
return kl.containerRuntime.KillPod(pod)
|
||||
}
|
||||
|
||||
type empty struct{}
|
||||
|
@ -954,7 +944,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
|
|||
return err
|
||||
}
|
||||
|
||||
err = kl.containerManager.SyncPod(pod, runningPod, podStatus)
|
||||
err = kl.containerRuntime.SyncPod(pod, runningPod, podStatus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1146,7 +1136,7 @@ func (kl *Kubelet) SyncPods(allPods []*api.Pod, podSyncTypes map[types.UID]metri
|
|||
// in the cache. We need to bypass the cach to get the latest set of
|
||||
// running pods to clean up the volumes.
|
||||
// TODO: Evaluate the performance impact of bypassing the runtime cache.
|
||||
runningPods, err = kl.containerManager.GetPods(false)
|
||||
runningPods, err = kl.containerRuntime.GetPods(false)
|
||||
if err != nil {
|
||||
glog.Errorf("Error listing containers: %#v", err)
|
||||
return err
|
||||
|
@ -1343,10 +1333,10 @@ func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) {
|
|||
|
||||
// Returns the container runtime version for this Kubelet.
|
||||
func (kl *Kubelet) GetContainerRuntimeVersion() (kubecontainer.Version, error) {
|
||||
if kl.containerManager == nil {
|
||||
if kl.containerRuntime == nil {
|
||||
return nil, fmt.Errorf("no container runtime")
|
||||
}
|
||||
return kl.containerManager.Version()
|
||||
return kl.containerRuntime.Version()
|
||||
}
|
||||
|
||||
func (kl *Kubelet) validatePodPhase(podStatus *api.PodStatus) error {
|
||||
|
@ -1386,7 +1376,7 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName, tail stri
|
|||
// waiting state.
|
||||
return err
|
||||
}
|
||||
return kl.containerManager.GetContainerLogs(containerID, tail, follow, stdout, stderr)
|
||||
return kl.containerRuntime.GetContainerLogs(containerID, tail, follow, stdout, stderr)
|
||||
}
|
||||
|
||||
// GetHostname Returns the hostname as the kubelet sees it.
|
||||
|
@ -1656,7 +1646,7 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) {
|
|||
glog.V(3).Infof("Generating status for %q", podFullName)
|
||||
|
||||
spec := &pod.Spec
|
||||
podStatus, err := kl.containerManager.GetPodStatus(pod)
|
||||
podStatus, err := kl.containerRuntime.GetPodStatus(pod)
|
||||
|
||||
if err != nil {
|
||||
// Error handling
|
||||
|
@ -1706,7 +1696,7 @@ func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
|
|||
// It returns nil if not found.
|
||||
// TODO(yifan): Move this to runtime once the runtime interface has been all implemented.
|
||||
func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) {
|
||||
pods, err := kl.containerManager.GetPods(false)
|
||||
pods, err := kl.containerRuntime.GetPods(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1758,7 +1748,7 @@ func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16
|
|||
return fmt.Errorf("no runner specified.")
|
||||
}
|
||||
|
||||
pods, err := kl.containerManager.GetPods(false)
|
||||
pods, err := kl.containerRuntime.GetPods(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -45,11 +45,8 @@ import (
|
|||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/metrics"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/prober"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/probe"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
uexec "github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/version"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
|
||||
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/volume/host_path"
|
||||
|
@ -109,8 +106,9 @@ func newTestKubelet(t *testing.T) *TestKubelet {
|
|||
kubelet.podManager = podManager
|
||||
kubelet.containerRefManager = kubecontainer.NewRefManager()
|
||||
runtimeHooks := newKubeletRuntimeHooks(kubelet.recorder)
|
||||
kubelet.containerManager = dockertools.NewDockerManager(fakeDocker, fakeRecorder, kubelet.readinessManager, kubelet.containerRefManager, dockertools.PodInfraContainerImage, 0, 0, "", kubelet.os, kubelet.networkPlugin, nil, kubelet, &fakeHTTP{}, runtimeHooks)
|
||||
kubelet.runtimeCache = kubecontainer.NewFakeRuntimeCache(kubelet.containerManager)
|
||||
|
||||
kubelet.containerRuntime = dockertools.NewFakeDockerManager(fakeDocker, fakeRecorder, kubelet.readinessManager, kubelet.containerRefManager, dockertools.PodInfraContainerImage, 0, 0, "", kubelet.os, kubelet.networkPlugin, kubelet, &fakeHTTP{}, runtimeHooks)
|
||||
kubelet.runtimeCache = kubecontainer.NewFakeRuntimeCache(kubelet.containerRuntime)
|
||||
kubelet.podWorkers = newPodWorkers(
|
||||
kubelet.runtimeCache,
|
||||
func(pod *api.Pod, mirrorPod *api.Pod, runningPod container.Pod) error {
|
||||
|
@ -119,9 +117,6 @@ func newTestKubelet(t *testing.T) *TestKubelet {
|
|||
return err
|
||||
},
|
||||
fakeRecorder)
|
||||
kubelet.containerManager.Puller = &dockertools.FakeDockerPuller{}
|
||||
kubelet.prober = prober.New(nil, kubelet.readinessManager, kubelet.containerRefManager, kubelet.recorder)
|
||||
kubelet.containerManager.Prober = kubelet.prober
|
||||
kubelet.volumeManager = newVolumeManager()
|
||||
return &TestKubelet{kubelet, fakeDocker, mockCadvisor, fakeKubeClient, waitGroup, fakeMirrorClient}
|
||||
}
|
||||
|
@ -504,7 +499,10 @@ func TestSyncPodsCreatesNetAndContainer(t *testing.T) {
|
|||
kubelet := testKubelet.kubelet
|
||||
fakeDocker := testKubelet.fakeDocker
|
||||
waitGroup := testKubelet.waitGroup
|
||||
kubelet.containerManager.PodInfraContainerImage = "custom_image_name"
|
||||
// TODO: Move this test to dockertools so that we don't have to do the hacky
|
||||
// type assertion here.
|
||||
dm := kubelet.containerRuntime.(*dockertools.DockerManager)
|
||||
dm.PodInfraContainerImage = "custom_image_name"
|
||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||
pods := []*api.Pod{
|
||||
{
|
||||
|
@ -565,9 +563,12 @@ func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) {
|
|||
kubelet := testKubelet.kubelet
|
||||
fakeDocker := testKubelet.fakeDocker
|
||||
waitGroup := testKubelet.waitGroup
|
||||
puller := kubelet.containerManager.Puller.(*dockertools.FakeDockerPuller)
|
||||
// TODO: Move this test to dockertools so that we don't have to do the hacky
|
||||
// type assertion here.
|
||||
dm := kubelet.containerRuntime.(*dockertools.DockerManager)
|
||||
puller := dm.Puller.(*dockertools.FakeDockerPuller)
|
||||
puller.HasImages = []string{}
|
||||
kubelet.containerManager.PodInfraContainerImage = "custom_image_name"
|
||||
dm.PodInfraContainerImage = "custom_image_name"
|
||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||
pods := []*api.Pod{
|
||||
{
|
||||
|
@ -695,10 +696,10 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
|
|||
waitGroup := testKubelet.waitGroup
|
||||
fakeHttp := fakeHTTP{}
|
||||
|
||||
// Simulate HTTP failure. Re-create the containerManager to inject the failure.
|
||||
// Simulate HTTP failure. Re-create the containerRuntime to inject the failure.
|
||||
kubelet.httpClient = &fakeHttp
|
||||
runtimeHooks := newKubeletRuntimeHooks(kubelet.recorder)
|
||||
kubelet.containerManager = dockertools.NewDockerManager(kubelet.dockerClient, kubelet.recorder, kubelet.readinessManager, kubelet.containerRefManager, dockertools.PodInfraContainerImage, 0, 0, "", kubelet.os, kubelet.networkPlugin, nil, kubelet, kubelet.httpClient, runtimeHooks)
|
||||
kubelet.containerRuntime = dockertools.NewFakeDockerManager(kubelet.dockerClient, kubelet.recorder, kubelet.readinessManager, kubelet.containerRefManager, dockertools.PodInfraContainerImage, 0, 0, "", kubelet.os, kubelet.networkPlugin, kubelet, kubelet.httpClient, runtimeHooks)
|
||||
|
||||
pods := []*api.Pod{
|
||||
{
|
||||
|
@ -752,7 +753,7 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
|
|||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_image",
|
||||
// Check the pod infra container.
|
||||
"inspect_container", "inspect_image",
|
||||
"inspect_container",
|
||||
// Create container.
|
||||
"create", "start",
|
||||
// Get pod status.
|
||||
|
@ -1645,12 +1646,12 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
|
|||
fakeDocker := testKubelet.fakeDocker
|
||||
waitGroup := testKubelet.waitGroup
|
||||
|
||||
// Simulate HTTP failure. Re-create the containerManager to inject the failure.
|
||||
// Simulate HTTP failure. Re-create the containerRuntime to inject the failure.
|
||||
kubelet.httpClient = &fakeHTTP{
|
||||
err: fmt.Errorf("test error"),
|
||||
}
|
||||
runtimeHooks := newKubeletRuntimeHooks(kubelet.recorder)
|
||||
kubelet.containerManager = dockertools.NewDockerManager(kubelet.dockerClient, kubelet.recorder, kubelet.readinessManager, kubelet.containerRefManager, dockertools.PodInfraContainerImage, 0, 0, "", kubelet.os, kubelet.networkPlugin, nil, kubelet, kubelet.httpClient, runtimeHooks)
|
||||
kubelet.containerRuntime = dockertools.NewFakeDockerManager(kubelet.dockerClient, kubelet.recorder, kubelet.readinessManager, kubelet.containerRefManager, dockertools.PodInfraContainerImage, 0, 0, "", kubelet.os, kubelet.networkPlugin, kubelet, kubelet.httpClient, runtimeHooks)
|
||||
|
||||
pods := []*api.Pod{
|
||||
{
|
||||
|
@ -1704,7 +1705,7 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
|
|||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_image",
|
||||
// Check the pod infra container.
|
||||
"inspect_container", "inspect_image",
|
||||
"inspect_container",
|
||||
// Create the container.
|
||||
"create", "start",
|
||||
// Kill the container since event handler fails.
|
||||
|
@ -1731,9 +1732,12 @@ func TestSyncPodsWithPullPolicy(t *testing.T) {
|
|||
kubelet := testKubelet.kubelet
|
||||
fakeDocker := testKubelet.fakeDocker
|
||||
waitGroup := testKubelet.waitGroup
|
||||
puller := kubelet.containerManager.Puller.(*dockertools.FakeDockerPuller)
|
||||
// TODO: Move this test to dockertools so that we don't have to do the hacky
|
||||
// type assertion here.
|
||||
dm := kubelet.containerRuntime.(*dockertools.DockerManager)
|
||||
puller := dm.Puller.(*dockertools.FakeDockerPuller)
|
||||
puller.HasImages = []string{"existing_one", "want:latest"}
|
||||
kubelet.containerManager.PodInfraContainerImage = "custom_image_name"
|
||||
dm.PodInfraContainerImage = "custom_image_name"
|
||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||
|
||||
pods := []*api.Pod{
|
||||
|
@ -2850,7 +2854,10 @@ func TestPortForward(t *testing.T) {
|
|||
|
||||
podInfraContainerImage := "POD"
|
||||
infraContainerID := "infra"
|
||||
kubelet.containerManager.PodInfraContainerImage = podInfraContainerImage
|
||||
// TODO: Move this test to dockertools so that we don't have to do the hacky
|
||||
// type assertion here.
|
||||
dm := kubelet.containerRuntime.(*dockertools.DockerManager)
|
||||
dm.PodInfraContainerImage = podInfraContainerImage
|
||||
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
|
@ -3984,7 +3991,10 @@ func TestGetPodCreationFailureReason(t *testing.T) {
|
|||
pods := []*api.Pod{pod}
|
||||
kubelet.podManager.SetPods(pods)
|
||||
kubelet.volumeManager.SetVolumes(pod.UID, volumeMap{})
|
||||
_, err := kubelet.containerManager.RunContainer(pod, &pod.Spec.Containers[0], "", "")
|
||||
// TODO: Move this test to dockertools so that we don't have to do the hacky
|
||||
// type assertion here.
|
||||
dm := kubelet.containerRuntime.(*dockertools.DockerManager)
|
||||
_, err := dm.RunContainer(pod, &pod.Spec.Containers[0], "", "")
|
||||
if err == nil {
|
||||
t.Errorf("expected error, found nil")
|
||||
}
|
||||
|
@ -4107,287 +4117,3 @@ func TestFilterOutTerminatedPods(t *testing.T) {
|
|||
t.Errorf("expected %#v, got %#v", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
type fakeExecProber struct {
|
||||
result probe.Result
|
||||
err error
|
||||
}
|
||||
|
||||
func (p fakeExecProber) Probe(_ uexec.Cmd) (probe.Result, error) {
|
||||
return p.result, p.err
|
||||
}
|
||||
|
||||
func makeTestKubelet(result probe.Result, err error) *Kubelet {
|
||||
kl := &Kubelet{
|
||||
readinessManager: kubecontainer.NewReadinessManager(),
|
||||
containerRefManager: kubecontainer.NewRefManager(),
|
||||
}
|
||||
|
||||
// TODO(vmarmol): Fix
|
||||
fakeExec := fakeExecProber{
|
||||
result: result,
|
||||
err: err,
|
||||
}
|
||||
kl.prober = prober.NewTestProber(fakeExec, kl.readinessManager, kl.containerRefManager, &record.FakeRecorder{})
|
||||
return kl
|
||||
}
|
||||
|
||||
// TestProbeContainer tests the functionality of probeContainer.
|
||||
// Test cases are:
|
||||
//
|
||||
// No probe.
|
||||
// Only LivenessProbe.
|
||||
// Only ReadinessProbe.
|
||||
// Both probes.
|
||||
//
|
||||
// Also, for each probe, there will be several cases covering whether the initial
|
||||
// delay has passed, whether the probe handler will return Success, Failure,
|
||||
// Unknown or error.
|
||||
//
|
||||
func TestProbeContainer(t *testing.T) {
|
||||
dc := &docker.APIContainers{
|
||||
ID: "foobar",
|
||||
Created: time.Now().Unix(),
|
||||
}
|
||||
tests := []struct {
|
||||
testContainer api.Container
|
||||
expectError bool
|
||||
expectedResult probe.Result
|
||||
expectedReadiness bool
|
||||
}{
|
||||
// No probes.
|
||||
{
|
||||
testContainer: api.Container{},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
// Only LivenessProbe.
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{InitialDelaySeconds: 100},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
},
|
||||
expectedResult: probe.Unknown,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Failure,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Unknown,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
expectedResult: probe.Unknown,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
// Only ReadinessProbe.
|
||||
{
|
||||
testContainer: api.Container{
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
ReadinessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
ReadinessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
ReadinessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
ReadinessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
// Both LivenessProbe and ReadinessProbe.
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{InitialDelaySeconds: 100},
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{InitialDelaySeconds: 100},
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
|
||||
},
|
||||
expectedResult: probe.Unknown,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
},
|
||||
expectedResult: probe.Unknown,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
},
|
||||
expectedResult: probe.Unknown,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
|
||||
},
|
||||
expectedResult: probe.Failure,
|
||||
expectedReadiness: false,
|
||||
},
|
||||
{
|
||||
testContainer: api.Container{
|
||||
LivenessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
ReadinessProbe: &api.Probe{
|
||||
InitialDelaySeconds: -100,
|
||||
Handler: api.Handler{
|
||||
Exec: &api.ExecAction{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResult: probe.Success,
|
||||
expectedReadiness: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
var kl *Kubelet
|
||||
|
||||
if test.expectError {
|
||||
kl = makeTestKubelet(test.expectedResult, errors.New("error"))
|
||||
} else {
|
||||
kl = makeTestKubelet(test.expectedResult, nil)
|
||||
}
|
||||
result, err := kl.prober.Probe(&api.Pod{}, api.PodStatus{}, test.testContainer, dc.ID, dc.Created)
|
||||
if test.expectError && err == nil {
|
||||
t.Error("Expected error but did no error was returned.")
|
||||
}
|
||||
if !test.expectError && err != nil {
|
||||
t.Errorf("Expected error but got: %v", err)
|
||||
}
|
||||
if test.expectedResult != result {
|
||||
t.Errorf("Expected result was %v but probeContainer() returned %v", test.expectedResult, result)
|
||||
}
|
||||
if test.expectedReadiness != kl.readinessManager.GetReadiness(dc.ID) {
|
||||
t.Errorf("Expected readiness was %v but probeContainer() set %v", test.expectedReadiness, kl.readinessManager.GetReadiness(dc.ID))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
|
||||
kubeletProber "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/prober"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
|
@ -43,7 +42,7 @@ func createPodWorkers() (*podWorkers, map[types.UID][]string) {
|
|||
fakeDocker := &dockertools.FakeDockerClient{}
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
|
||||
dockerManager := dockertools.NewDockerManager(fakeDocker, fakeRecorder, nil, nil, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, &kubeletProber.FakeProber{}, nil, nil, newKubeletRuntimeHooks(fakeRecorder))
|
||||
dockerManager := dockertools.NewFakeDockerManager(fakeDocker, fakeRecorder, nil, nil, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, newKubeletRuntimeHooks(fakeRecorder))
|
||||
fakeRuntimeCache := kubecontainer.NewFakeRuntimeCache(dockerManager)
|
||||
|
||||
lock := sync.Mutex{}
|
||||
|
|
|
@ -88,7 +88,7 @@ func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error {
|
|||
delay := retryDelay
|
||||
retry := 0
|
||||
for {
|
||||
pods, err := kl.containerManager.GetPods(false)
|
||||
pods, err := kl.containerRuntime.GetPods(false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get kubelet pods: %v", err)
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error {
|
|||
|
||||
// isPodRunning returns true if all containers of a manifest are running.
|
||||
func (kl *Kubelet) isPodRunning(pod *api.Pod, runningPod container.Pod) (bool, error) {
|
||||
status, err := kl.containerManager.GetPodStatus(pod)
|
||||
status, err := kl.containerRuntime.GetPodStatus(pod)
|
||||
if err != nil {
|
||||
glog.Infof("Failed to get the status of pod %q: %v", kubecontainer.GetPodFullName(pod), err)
|
||||
return false, err
|
||||
|
|
|
@ -28,7 +28,6 @@ import (
|
|||
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
|
||||
kubeletProber "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/prober"
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
cadvisorApi "github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
@ -149,7 +148,7 @@ func TestRunOnce(t *testing.T) {
|
|||
t: t,
|
||||
}
|
||||
|
||||
kb.containerManager = dockertools.NewDockerManager(
|
||||
kb.containerRuntime = dockertools.NewFakeDockerManager(
|
||||
kb.dockerClient,
|
||||
kb.recorder,
|
||||
kb.readinessManager,
|
||||
|
@ -160,11 +159,9 @@ func TestRunOnce(t *testing.T) {
|
|||
"",
|
||||
kubecontainer.FakeOS{},
|
||||
kb.networkPlugin,
|
||||
&kubeletProber.FakeProber{},
|
||||
kb,
|
||||
nil,
|
||||
newKubeletRuntimeHooks(kb.recorder))
|
||||
kb.containerManager.Puller = &dockertools.FakeDockerPuller{}
|
||||
|
||||
pods := []*api.Pod{
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue