Kubelet: replace DockerManager with the Runtime interface

This change instructs kubelet to switch to using the Runtime interface. In order
to do it, the change moves the Prober instantiation to DockerManager.

Note that most of the tests in kubelet_test.go needs to be migrated to
dockertools. For now, we use type assertion to convert the Runtime interface to
DockerManager in most tests.
pull/6/head
Yu-Ju Hong 2015-05-01 15:25:11 -07:00
parent e87d735304
commit 1ad4dd7803
9 changed files with 404 additions and 366 deletions

View File

@ -28,7 +28,6 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/credentialprovider" "github.com/GoogleCloudPlatform/kubernetes/pkg/credentialprovider"
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container" kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
kubeletProber "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/prober"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
docker "github.com/fsouza/go-dockerclient" docker "github.com/fsouza/go-dockerclient"
@ -553,7 +552,7 @@ func TestFindContainersByPod(t *testing.T) {
} }
fakeClient := &FakeDockerClient{} fakeClient := &FakeDockerClient{}
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
containerManager := NewDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, &kubeletProber.FakeProber{}, nil, nil, nil) containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, nil)
for i, test := range tests { for i, test := range tests {
fakeClient.ContainerList = test.containerList fakeClient.ContainerList = test.containerList
fakeClient.ExitedContainerList = test.exitedContainerList fakeClient.ExitedContainerList = test.exitedContainerList

View File

@ -0,0 +1,47 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/prober"
kubeletTypes "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/types"
)
func NewFakeDockerManager(
client DockerInterface,
recorder record.EventRecorder,
readinessManager *kubecontainer.ReadinessManager,
containerRefManager *kubecontainer.RefManager,
podInfraContainerImage string,
qps float32,
burst int,
containerLogsDir string,
osInterface kubecontainer.OSInterface,
networkPlugin network.NetworkPlugin,
generator kubecontainer.RunContainerOptionsGenerator,
httpClient kubeletTypes.HttpGetter,
runtimeHooks kubecontainer.RuntimeHooks) *DockerManager {
dm := NewDockerManager(client, recorder, readinessManager, containerRefManager, podInfraContainerImage, qps,
burst, containerLogsDir, osInterface, networkPlugin, generator, httpClient, runtimeHooks)
dm.Puller = &FakeDockerPuller{}
dm.prober = prober.New(nil, readinessManager, containerRefManager, recorder)
return dm
}

View File

@ -91,10 +91,8 @@ type DockerManager struct {
// Network plugin. // Network plugin.
networkPlugin network.NetworkPlugin networkPlugin network.NetworkPlugin
// TODO(vmarmol): Make this non-public when we remove the circular dependency
// with prober.
// Health check prober. // Health check prober.
Prober prober.Prober prober prober.Prober
// Generator of runtime container options. // Generator of runtime container options.
generator kubecontainer.RunContainerOptionsGenerator generator kubecontainer.RunContainerOptionsGenerator
@ -117,7 +115,6 @@ func NewDockerManager(
containerLogsDir string, containerLogsDir string,
osInterface kubecontainer.OSInterface, osInterface kubecontainer.OSInterface,
networkPlugin network.NetworkPlugin, networkPlugin network.NetworkPlugin,
prober prober.Prober,
generator kubecontainer.RunContainerOptionsGenerator, generator kubecontainer.RunContainerOptionsGenerator,
httpClient kubeletTypes.HttpGetter, httpClient kubeletTypes.HttpGetter,
runtimeHooks kubecontainer.RuntimeHooks) *DockerManager { runtimeHooks kubecontainer.RuntimeHooks) *DockerManager {
@ -164,11 +161,13 @@ func NewDockerManager(
dockerRoot: dockerRoot, dockerRoot: dockerRoot,
containerLogsDir: containerLogsDir, containerLogsDir: containerLogsDir,
networkPlugin: networkPlugin, networkPlugin: networkPlugin,
Prober: prober, prober: nil,
generator: generator, generator: generator,
runtimeHooks: runtimeHooks, runtimeHooks: runtimeHooks,
} }
dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm) dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm)
dm.prober = prober.New(dm, readinessManager, containerRefManager, recorder)
return dm return dm
} }
@ -1305,7 +1304,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, runningPod kub
continue continue
} }
result, err := dm.Prober.Probe(pod, podStatus, container, string(c.ID), c.Created) result, err := dm.prober.Probe(pod, podStatus, container, string(c.ID), c.Created)
if err != nil { if err != nil {
// TODO(vmarmol): examine this logic. // TODO(vmarmol): examine this logic.
glog.V(2).Infof("probe no-error: %q", container.Name) glog.V(2).Infof("probe no-error: %q", container.Name)

View File

@ -17,28 +17,32 @@ limitations under the License.
package dockertools package dockertools
import ( import (
"errors"
"fmt" "fmt"
"reflect" "reflect"
"sort" "sort"
"testing" "testing"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container" kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
kubeprober "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/prober"
"github.com/GoogleCloudPlatform/kubernetes/pkg/probe"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
uexec "github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
"github.com/fsouza/go-dockerclient" "github.com/fsouza/go-dockerclient"
) )
func NewFakeDockerManager() (*DockerManager, *FakeDockerClient) { func newTestDockerManager() (*DockerManager, *FakeDockerClient) {
fakeDocker := &FakeDockerClient{Errors: make(map[string]error), RemovedImages: util.StringSet{}} fakeDocker := &FakeDockerClient{Errors: make(map[string]error), RemovedImages: util.StringSet{}}
fakeRecorder := &record.FakeRecorder{} fakeRecorder := &record.FakeRecorder{}
readinessManager := kubecontainer.NewReadinessManager() readinessManager := kubecontainer.NewReadinessManager()
containerRefManager := kubecontainer.NewRefManager() containerRefManager := kubecontainer.NewRefManager()
networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
dockerManager := NewFakeDockerManager(
dockerManager := NewDockerManager(
fakeDocker, fakeDocker,
fakeRecorder, fakeRecorder,
readinessManager, readinessManager,
@ -49,7 +53,6 @@ func NewFakeDockerManager() (*DockerManager, *FakeDockerClient) {
networkPlugin, networkPlugin,
nil, nil,
nil, nil,
nil,
nil) nil)
return dockerManager, fakeDocker return dockerManager, fakeDocker
@ -142,7 +145,7 @@ func verifyPods(a, b []*kubecontainer.Pod) bool {
} }
func TestGetPods(t *testing.T) { func TestGetPods(t *testing.T) {
manager, fakeDocker := NewFakeDockerManager() manager, fakeDocker := newTestDockerManager()
dockerContainers := []docker.APIContainers{ dockerContainers := []docker.APIContainers{
{ {
ID: "1111", ID: "1111",
@ -195,7 +198,7 @@ func TestGetPods(t *testing.T) {
} }
func TestListImages(t *testing.T) { func TestListImages(t *testing.T) {
manager, fakeDocker := NewFakeDockerManager() manager, fakeDocker := newTestDockerManager()
dockerImages := []docker.APIImages{{ID: "1111"}, {ID: "2222"}, {ID: "3333"}} dockerImages := []docker.APIImages{{ID: "1111"}, {ID: "2222"}, {ID: "3333"}}
expected := util.NewStringSet([]string{"1111", "2222", "3333"}...) expected := util.NewStringSet([]string{"1111", "2222", "3333"}...)
@ -250,7 +253,7 @@ func dockerContainersToPod(containers DockerContainers) kubecontainer.Pod {
} }
func TestKillContainerInPod(t *testing.T) { func TestKillContainerInPod(t *testing.T) {
manager, fakeDocker := NewFakeDockerManager() manager, fakeDocker := newTestDockerManager()
pod := &api.Pod{ pod := &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
@ -296,7 +299,7 @@ func TestKillContainerInPod(t *testing.T) {
} }
func TestKillContainerInPodWithError(t *testing.T) { func TestKillContainerInPodWithError(t *testing.T) {
manager, fakeDocker := NewFakeDockerManager() manager, fakeDocker := newTestDockerManager()
pod := &api.Pod{ pod := &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
@ -338,3 +341,281 @@ func TestKillContainerInPodWithError(t *testing.T) {
t.Errorf("exepcted container entry ID '%v' to be found. states: %+v", containerToSpare.ID, ready) t.Errorf("exepcted container entry ID '%v' to be found. states: %+v", containerToSpare.ID, ready)
} }
} }
type fakeExecProber struct {
result probe.Result
err error
}
func (p fakeExecProber) Probe(_ uexec.Cmd) (probe.Result, error) {
return p.result, p.err
}
func replaceProber(dm *DockerManager, result probe.Result, err error) {
fakeExec := fakeExecProber{
result: result,
err: err,
}
dm.prober = kubeprober.NewTestProber(fakeExec, dm.readinessManager, dm.containerRefManager, &record.FakeRecorder{})
return
}
// TestProbeContainer tests the functionality of probeContainer.
// Test cases are:
//
// No probe.
// Only LivenessProbe.
// Only ReadinessProbe.
// Both probes.
//
// Also, for each probe, there will be several cases covering whether the initial
// delay has passed, whether the probe handler will return Success, Failure,
// Unknown or error.
//
func TestProbeContainer(t *testing.T) {
manager, _ := newTestDockerManager()
dc := &docker.APIContainers{
ID: "foobar",
Created: time.Now().Unix(),
}
tests := []struct {
testContainer api.Container
expectError bool
expectedResult probe.Result
expectedReadiness bool
}{
// No probes.
{
testContainer: api.Container{},
expectedResult: probe.Success,
expectedReadiness: true,
},
// Only LivenessProbe.
{
testContainer: api.Container{
LivenessProbe: &api.Probe{InitialDelaySeconds: 100},
},
expectedResult: probe.Success,
expectedReadiness: true,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{InitialDelaySeconds: -100},
},
expectedResult: probe.Unknown,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Failure,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Success,
expectedReadiness: true,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Unknown,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectError: true,
expectedResult: probe.Unknown,
expectedReadiness: false,
},
// Only ReadinessProbe.
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
},
expectedResult: probe.Success,
expectedReadiness: false,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
},
expectedResult: probe.Success,
expectedReadiness: false,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Success,
expectedReadiness: true,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Success,
expectedReadiness: true,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Success,
expectedReadiness: true,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectError: false,
expectedResult: probe.Success,
expectedReadiness: true,
},
// Both LivenessProbe and ReadinessProbe.
{
testContainer: api.Container{
LivenessProbe: &api.Probe{InitialDelaySeconds: 100},
ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
},
expectedResult: probe.Success,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{InitialDelaySeconds: 100},
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
},
expectedResult: probe.Success,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{InitialDelaySeconds: -100},
ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
},
expectedResult: probe.Unknown,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{InitialDelaySeconds: -100},
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
},
expectedResult: probe.Unknown,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
},
expectedResult: probe.Unknown,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
},
expectedResult: probe.Failure,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
ReadinessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Success,
expectedReadiness: true,
},
}
for _, test := range tests {
if test.expectError {
replaceProber(manager, test.expectedResult, errors.New("error"))
} else {
replaceProber(manager, test.expectedResult, nil)
}
result, err := manager.prober.Probe(&api.Pod{}, api.PodStatus{}, test.testContainer, dc.ID, dc.Created)
if test.expectError && err == nil {
t.Error("Expected error but did no error was returned.")
}
if !test.expectError && err != nil {
t.Errorf("Expected error but got: %v", err)
}
if test.expectedResult != result {
t.Errorf("Expected result was %v but probeContainer() returned %v", test.expectedResult, result)
}
if test.expectedReadiness != manager.readinessManager.GetReadiness(dc.ID) {
t.Errorf("Expected readiness was %v but probeContainer() set %v", test.expectedReadiness, manager.readinessManager.GetReadiness(dc.ID))
}
}
}

View File

@ -242,15 +242,11 @@ func NewMainKubelet(
klet.networkPlugin = plug klet.networkPlugin = plug
} }
// TODO(vmarmol,yjhong): Use container runtime.
// Initialize the runtime. // Initialize the runtime.
switch containerRuntime { switch containerRuntime {
case "docker": case "docker":
// Only supported one for now, continue. // Only supported one for now, continue.
default: klet.containerRuntime = dockertools.NewDockerManager(
return nil, fmt.Errorf("unsupported container runtime %q specified", containerRuntime)
}
containerManager := dockertools.NewDockerManager(
dockerClient, dockerClient,
recorder, recorder,
readinessManager, readinessManager,
@ -261,20 +257,17 @@ func NewMainKubelet(
containerLogsDir, containerLogsDir,
osInterface, osInterface,
klet.networkPlugin, klet.networkPlugin,
nil,
klet, klet,
klet.httpClient, klet.httpClient,
newKubeletRuntimeHooks(recorder)) newKubeletRuntimeHooks(recorder))
klet.runner = containerManager default:
klet.containerManager = containerManager return nil, fmt.Errorf("unsupported container runtime %q specified", containerRuntime)
}
klet.runner = klet.containerRuntime
klet.podManager = newBasicPodManager(klet.kubeClient) klet.podManager = newBasicPodManager(klet.kubeClient)
klet.prober = prober.New(klet.runner, klet.readinessManager, klet.containerRefManager, klet.recorder)
// TODO(vmarmol): Remove when the circular dependency is removed :( runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime)
containerManager.Prober = klet.prober
runtimeCache, err := kubecontainer.NewRuntimeCache(containerManager)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -352,9 +345,6 @@ type Kubelet struct {
// Network plugin. // Network plugin.
networkPlugin network.NetworkPlugin networkPlugin network.NetworkPlugin
// Healthy check prober.
prober prober.Prober
// Container readiness state manager. // Container readiness state manager.
readinessManager *kubecontainer.ReadinessManager readinessManager *kubecontainer.ReadinessManager
@ -386,8 +376,8 @@ type Kubelet struct {
// Reference to this node. // Reference to this node.
nodeRef *api.ObjectReference nodeRef *api.ObjectReference
// Manage containers. // Container runtime.
containerManager *dockertools.DockerManager containerRuntime kubecontainer.Runtime
// nodeStatusUpdateFrequency specifies how often kubelet posts node status to master. // nodeStatusUpdateFrequency specifies how often kubelet posts node status to master.
// Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod // Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod
@ -876,7 +866,7 @@ func parseResolvConf(reader io.Reader) (nameservers []string, searches []string,
// Kill all running containers in a pod (includes the pod infra container). // Kill all running containers in a pod (includes the pod infra container).
func (kl *Kubelet) killPod(pod kubecontainer.Pod) error { func (kl *Kubelet) killPod(pod kubecontainer.Pod) error {
return kl.containerManager.KillPod(pod) return kl.containerRuntime.KillPod(pod)
} }
type empty struct{} type empty struct{}
@ -954,7 +944,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont
return err return err
} }
err = kl.containerManager.SyncPod(pod, runningPod, podStatus) err = kl.containerRuntime.SyncPod(pod, runningPod, podStatus)
if err != nil { if err != nil {
return err return err
} }
@ -1146,7 +1136,7 @@ func (kl *Kubelet) SyncPods(allPods []*api.Pod, podSyncTypes map[types.UID]metri
// in the cache. We need to bypass the cach to get the latest set of // in the cache. We need to bypass the cach to get the latest set of
// running pods to clean up the volumes. // running pods to clean up the volumes.
// TODO: Evaluate the performance impact of bypassing the runtime cache. // TODO: Evaluate the performance impact of bypassing the runtime cache.
runningPods, err = kl.containerManager.GetPods(false) runningPods, err = kl.containerRuntime.GetPods(false)
if err != nil { if err != nil {
glog.Errorf("Error listing containers: %#v", err) glog.Errorf("Error listing containers: %#v", err)
return err return err
@ -1343,10 +1333,10 @@ func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) {
// Returns the container runtime version for this Kubelet. // Returns the container runtime version for this Kubelet.
func (kl *Kubelet) GetContainerRuntimeVersion() (kubecontainer.Version, error) { func (kl *Kubelet) GetContainerRuntimeVersion() (kubecontainer.Version, error) {
if kl.containerManager == nil { if kl.containerRuntime == nil {
return nil, fmt.Errorf("no container runtime") return nil, fmt.Errorf("no container runtime")
} }
return kl.containerManager.Version() return kl.containerRuntime.Version()
} }
func (kl *Kubelet) validatePodPhase(podStatus *api.PodStatus) error { func (kl *Kubelet) validatePodPhase(podStatus *api.PodStatus) error {
@ -1386,7 +1376,7 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName, tail stri
// waiting state. // waiting state.
return err return err
} }
return kl.containerManager.GetContainerLogs(containerID, tail, follow, stdout, stderr) return kl.containerRuntime.GetContainerLogs(containerID, tail, follow, stdout, stderr)
} }
// GetHostname Returns the hostname as the kubelet sees it. // GetHostname Returns the hostname as the kubelet sees it.
@ -1656,7 +1646,7 @@ func (kl *Kubelet) generatePodStatus(pod *api.Pod) (api.PodStatus, error) {
glog.V(3).Infof("Generating status for %q", podFullName) glog.V(3).Infof("Generating status for %q", podFullName)
spec := &pod.Spec spec := &pod.Spec
podStatus, err := kl.containerManager.GetPodStatus(pod) podStatus, err := kl.containerRuntime.GetPodStatus(pod)
if err != nil { if err != nil {
// Error handling // Error handling
@ -1706,7 +1696,7 @@ func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
// It returns nil if not found. // It returns nil if not found.
// TODO(yifan): Move this to runtime once the runtime interface has been all implemented. // TODO(yifan): Move this to runtime once the runtime interface has been all implemented.
func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) { func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) {
pods, err := kl.containerManager.GetPods(false) pods, err := kl.containerRuntime.GetPods(false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1758,7 +1748,7 @@ func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16
return fmt.Errorf("no runner specified.") return fmt.Errorf("no runner specified.")
} }
pods, err := kl.containerManager.GetPods(false) pods, err := kl.containerRuntime.GetPods(false)
if err != nil { if err != nil {
return err return err
} }

View File

@ -45,11 +45,8 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/metrics" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/metrics"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/prober"
"github.com/GoogleCloudPlatform/kubernetes/pkg/probe"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
uexec "github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
"github.com/GoogleCloudPlatform/kubernetes/pkg/version" "github.com/GoogleCloudPlatform/kubernetes/pkg/version"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/volume/host_path" _ "github.com/GoogleCloudPlatform/kubernetes/pkg/volume/host_path"
@ -109,8 +106,9 @@ func newTestKubelet(t *testing.T) *TestKubelet {
kubelet.podManager = podManager kubelet.podManager = podManager
kubelet.containerRefManager = kubecontainer.NewRefManager() kubelet.containerRefManager = kubecontainer.NewRefManager()
runtimeHooks := newKubeletRuntimeHooks(kubelet.recorder) runtimeHooks := newKubeletRuntimeHooks(kubelet.recorder)
kubelet.containerManager = dockertools.NewDockerManager(fakeDocker, fakeRecorder, kubelet.readinessManager, kubelet.containerRefManager, dockertools.PodInfraContainerImage, 0, 0, "", kubelet.os, kubelet.networkPlugin, nil, kubelet, &fakeHTTP{}, runtimeHooks)
kubelet.runtimeCache = kubecontainer.NewFakeRuntimeCache(kubelet.containerManager) kubelet.containerRuntime = dockertools.NewFakeDockerManager(fakeDocker, fakeRecorder, kubelet.readinessManager, kubelet.containerRefManager, dockertools.PodInfraContainerImage, 0, 0, "", kubelet.os, kubelet.networkPlugin, kubelet, &fakeHTTP{}, runtimeHooks)
kubelet.runtimeCache = kubecontainer.NewFakeRuntimeCache(kubelet.containerRuntime)
kubelet.podWorkers = newPodWorkers( kubelet.podWorkers = newPodWorkers(
kubelet.runtimeCache, kubelet.runtimeCache,
func(pod *api.Pod, mirrorPod *api.Pod, runningPod container.Pod) error { func(pod *api.Pod, mirrorPod *api.Pod, runningPod container.Pod) error {
@ -119,9 +117,6 @@ func newTestKubelet(t *testing.T) *TestKubelet {
return err return err
}, },
fakeRecorder) fakeRecorder)
kubelet.containerManager.Puller = &dockertools.FakeDockerPuller{}
kubelet.prober = prober.New(nil, kubelet.readinessManager, kubelet.containerRefManager, kubelet.recorder)
kubelet.containerManager.Prober = kubelet.prober
kubelet.volumeManager = newVolumeManager() kubelet.volumeManager = newVolumeManager()
return &TestKubelet{kubelet, fakeDocker, mockCadvisor, fakeKubeClient, waitGroup, fakeMirrorClient} return &TestKubelet{kubelet, fakeDocker, mockCadvisor, fakeKubeClient, waitGroup, fakeMirrorClient}
} }
@ -504,7 +499,10 @@ func TestSyncPodsCreatesNetAndContainer(t *testing.T) {
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
fakeDocker := testKubelet.fakeDocker fakeDocker := testKubelet.fakeDocker
waitGroup := testKubelet.waitGroup waitGroup := testKubelet.waitGroup
kubelet.containerManager.PodInfraContainerImage = "custom_image_name" // TODO: Move this test to dockertools so that we don't have to do the hacky
// type assertion here.
dm := kubelet.containerRuntime.(*dockertools.DockerManager)
dm.PodInfraContainerImage = "custom_image_name"
fakeDocker.ContainerList = []docker.APIContainers{} fakeDocker.ContainerList = []docker.APIContainers{}
pods := []*api.Pod{ pods := []*api.Pod{
{ {
@ -565,9 +563,12 @@ func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) {
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
fakeDocker := testKubelet.fakeDocker fakeDocker := testKubelet.fakeDocker
waitGroup := testKubelet.waitGroup waitGroup := testKubelet.waitGroup
puller := kubelet.containerManager.Puller.(*dockertools.FakeDockerPuller) // TODO: Move this test to dockertools so that we don't have to do the hacky
// type assertion here.
dm := kubelet.containerRuntime.(*dockertools.DockerManager)
puller := dm.Puller.(*dockertools.FakeDockerPuller)
puller.HasImages = []string{} puller.HasImages = []string{}
kubelet.containerManager.PodInfraContainerImage = "custom_image_name" dm.PodInfraContainerImage = "custom_image_name"
fakeDocker.ContainerList = []docker.APIContainers{} fakeDocker.ContainerList = []docker.APIContainers{}
pods := []*api.Pod{ pods := []*api.Pod{
{ {
@ -695,10 +696,10 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
waitGroup := testKubelet.waitGroup waitGroup := testKubelet.waitGroup
fakeHttp := fakeHTTP{} fakeHttp := fakeHTTP{}
// Simulate HTTP failure. Re-create the containerManager to inject the failure. // Simulate HTTP failure. Re-create the containerRuntime to inject the failure.
kubelet.httpClient = &fakeHttp kubelet.httpClient = &fakeHttp
runtimeHooks := newKubeletRuntimeHooks(kubelet.recorder) runtimeHooks := newKubeletRuntimeHooks(kubelet.recorder)
kubelet.containerManager = dockertools.NewDockerManager(kubelet.dockerClient, kubelet.recorder, kubelet.readinessManager, kubelet.containerRefManager, dockertools.PodInfraContainerImage, 0, 0, "", kubelet.os, kubelet.networkPlugin, nil, kubelet, kubelet.httpClient, runtimeHooks) kubelet.containerRuntime = dockertools.NewFakeDockerManager(kubelet.dockerClient, kubelet.recorder, kubelet.readinessManager, kubelet.containerRefManager, dockertools.PodInfraContainerImage, 0, 0, "", kubelet.os, kubelet.networkPlugin, kubelet, kubelet.httpClient, runtimeHooks)
pods := []*api.Pod{ pods := []*api.Pod{
{ {
@ -752,7 +753,7 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
// Get pod status. // Get pod status.
"list", "inspect_container", "inspect_image", "list", "inspect_container", "inspect_image",
// Check the pod infra container. // Check the pod infra container.
"inspect_container", "inspect_image", "inspect_container",
// Create container. // Create container.
"create", "start", "create", "start",
// Get pod status. // Get pod status.
@ -1645,12 +1646,12 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
fakeDocker := testKubelet.fakeDocker fakeDocker := testKubelet.fakeDocker
waitGroup := testKubelet.waitGroup waitGroup := testKubelet.waitGroup
// Simulate HTTP failure. Re-create the containerManager to inject the failure. // Simulate HTTP failure. Re-create the containerRuntime to inject the failure.
kubelet.httpClient = &fakeHTTP{ kubelet.httpClient = &fakeHTTP{
err: fmt.Errorf("test error"), err: fmt.Errorf("test error"),
} }
runtimeHooks := newKubeletRuntimeHooks(kubelet.recorder) runtimeHooks := newKubeletRuntimeHooks(kubelet.recorder)
kubelet.containerManager = dockertools.NewDockerManager(kubelet.dockerClient, kubelet.recorder, kubelet.readinessManager, kubelet.containerRefManager, dockertools.PodInfraContainerImage, 0, 0, "", kubelet.os, kubelet.networkPlugin, nil, kubelet, kubelet.httpClient, runtimeHooks) kubelet.containerRuntime = dockertools.NewFakeDockerManager(kubelet.dockerClient, kubelet.recorder, kubelet.readinessManager, kubelet.containerRefManager, dockertools.PodInfraContainerImage, 0, 0, "", kubelet.os, kubelet.networkPlugin, kubelet, kubelet.httpClient, runtimeHooks)
pods := []*api.Pod{ pods := []*api.Pod{
{ {
@ -1704,7 +1705,7 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
// Get pod status. // Get pod status.
"list", "inspect_container", "inspect_image", "list", "inspect_container", "inspect_image",
// Check the pod infra container. // Check the pod infra container.
"inspect_container", "inspect_image", "inspect_container",
// Create the container. // Create the container.
"create", "start", "create", "start",
// Kill the container since event handler fails. // Kill the container since event handler fails.
@ -1731,9 +1732,12 @@ func TestSyncPodsWithPullPolicy(t *testing.T) {
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
fakeDocker := testKubelet.fakeDocker fakeDocker := testKubelet.fakeDocker
waitGroup := testKubelet.waitGroup waitGroup := testKubelet.waitGroup
puller := kubelet.containerManager.Puller.(*dockertools.FakeDockerPuller) // TODO: Move this test to dockertools so that we don't have to do the hacky
// type assertion here.
dm := kubelet.containerRuntime.(*dockertools.DockerManager)
puller := dm.Puller.(*dockertools.FakeDockerPuller)
puller.HasImages = []string{"existing_one", "want:latest"} puller.HasImages = []string{"existing_one", "want:latest"}
kubelet.containerManager.PodInfraContainerImage = "custom_image_name" dm.PodInfraContainerImage = "custom_image_name"
fakeDocker.ContainerList = []docker.APIContainers{} fakeDocker.ContainerList = []docker.APIContainers{}
pods := []*api.Pod{ pods := []*api.Pod{
@ -2850,7 +2854,10 @@ func TestPortForward(t *testing.T) {
podInfraContainerImage := "POD" podInfraContainerImage := "POD"
infraContainerID := "infra" infraContainerID := "infra"
kubelet.containerManager.PodInfraContainerImage = podInfraContainerImage // TODO: Move this test to dockertools so that we don't have to do the hacky
// type assertion here.
dm := kubelet.containerRuntime.(*dockertools.DockerManager)
dm.PodInfraContainerImage = podInfraContainerImage
fakeDocker.ContainerList = []docker.APIContainers{ fakeDocker.ContainerList = []docker.APIContainers{
{ {
@ -3984,7 +3991,10 @@ func TestGetPodCreationFailureReason(t *testing.T) {
pods := []*api.Pod{pod} pods := []*api.Pod{pod}
kubelet.podManager.SetPods(pods) kubelet.podManager.SetPods(pods)
kubelet.volumeManager.SetVolumes(pod.UID, volumeMap{}) kubelet.volumeManager.SetVolumes(pod.UID, volumeMap{})
_, err := kubelet.containerManager.RunContainer(pod, &pod.Spec.Containers[0], "", "") // TODO: Move this test to dockertools so that we don't have to do the hacky
// type assertion here.
dm := kubelet.containerRuntime.(*dockertools.DockerManager)
_, err := dm.RunContainer(pod, &pod.Spec.Containers[0], "", "")
if err == nil { if err == nil {
t.Errorf("expected error, found nil") t.Errorf("expected error, found nil")
} }
@ -4107,287 +4117,3 @@ func TestFilterOutTerminatedPods(t *testing.T) {
t.Errorf("expected %#v, got %#v", expected, actual) t.Errorf("expected %#v, got %#v", expected, actual)
} }
} }
type fakeExecProber struct {
result probe.Result
err error
}
func (p fakeExecProber) Probe(_ uexec.Cmd) (probe.Result, error) {
return p.result, p.err
}
func makeTestKubelet(result probe.Result, err error) *Kubelet {
kl := &Kubelet{
readinessManager: kubecontainer.NewReadinessManager(),
containerRefManager: kubecontainer.NewRefManager(),
}
// TODO(vmarmol): Fix
fakeExec := fakeExecProber{
result: result,
err: err,
}
kl.prober = prober.NewTestProber(fakeExec, kl.readinessManager, kl.containerRefManager, &record.FakeRecorder{})
return kl
}
// TestProbeContainer tests the functionality of probeContainer.
// Test cases are:
//
// No probe.
// Only LivenessProbe.
// Only ReadinessProbe.
// Both probes.
//
// Also, for each probe, there will be several cases covering whether the initial
// delay has passed, whether the probe handler will return Success, Failure,
// Unknown or error.
//
func TestProbeContainer(t *testing.T) {
dc := &docker.APIContainers{
ID: "foobar",
Created: time.Now().Unix(),
}
tests := []struct {
testContainer api.Container
expectError bool
expectedResult probe.Result
expectedReadiness bool
}{
// No probes.
{
testContainer: api.Container{},
expectedResult: probe.Success,
expectedReadiness: true,
},
// Only LivenessProbe.
{
testContainer: api.Container{
LivenessProbe: &api.Probe{InitialDelaySeconds: 100},
},
expectedResult: probe.Success,
expectedReadiness: true,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{InitialDelaySeconds: -100},
},
expectedResult: probe.Unknown,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Failure,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Success,
expectedReadiness: true,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Unknown,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectError: true,
expectedResult: probe.Unknown,
expectedReadiness: false,
},
// Only ReadinessProbe.
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
},
expectedResult: probe.Success,
expectedReadiness: false,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
},
expectedResult: probe.Success,
expectedReadiness: false,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Success,
expectedReadiness: true,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Success,
expectedReadiness: true,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Success,
expectedReadiness: true,
},
{
testContainer: api.Container{
ReadinessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectError: false,
expectedResult: probe.Success,
expectedReadiness: true,
},
// Both LivenessProbe and ReadinessProbe.
{
testContainer: api.Container{
LivenessProbe: &api.Probe{InitialDelaySeconds: 100},
ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
},
expectedResult: probe.Success,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{InitialDelaySeconds: 100},
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
},
expectedResult: probe.Success,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{InitialDelaySeconds: -100},
ReadinessProbe: &api.Probe{InitialDelaySeconds: 100},
},
expectedResult: probe.Unknown,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{InitialDelaySeconds: -100},
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
},
expectedResult: probe.Unknown,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
},
expectedResult: probe.Unknown,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
ReadinessProbe: &api.Probe{InitialDelaySeconds: -100},
},
expectedResult: probe.Failure,
expectedReadiness: false,
},
{
testContainer: api.Container{
LivenessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
ReadinessProbe: &api.Probe{
InitialDelaySeconds: -100,
Handler: api.Handler{
Exec: &api.ExecAction{},
},
},
},
expectedResult: probe.Success,
expectedReadiness: true,
},
}
for _, test := range tests {
var kl *Kubelet
if test.expectError {
kl = makeTestKubelet(test.expectedResult, errors.New("error"))
} else {
kl = makeTestKubelet(test.expectedResult, nil)
}
result, err := kl.prober.Probe(&api.Pod{}, api.PodStatus{}, test.testContainer, dc.ID, dc.Created)
if test.expectError && err == nil {
t.Error("Expected error but did no error was returned.")
}
if !test.expectError && err != nil {
t.Errorf("Expected error but got: %v", err)
}
if test.expectedResult != result {
t.Errorf("Expected result was %v but probeContainer() returned %v", test.expectedResult, result)
}
if test.expectedReadiness != kl.readinessManager.GetReadiness(dc.ID) {
t.Errorf("Expected readiness was %v but probeContainer() set %v", test.expectedReadiness, kl.readinessManager.GetReadiness(dc.ID))
}
}
}

View File

@ -26,7 +26,6 @@ import (
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container" kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
kubeletProber "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/prober"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
) )
@ -43,7 +42,7 @@ func createPodWorkers() (*podWorkers, map[types.UID][]string) {
fakeDocker := &dockertools.FakeDockerClient{} fakeDocker := &dockertools.FakeDockerClient{}
fakeRecorder := &record.FakeRecorder{} fakeRecorder := &record.FakeRecorder{}
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
dockerManager := dockertools.NewDockerManager(fakeDocker, fakeRecorder, nil, nil, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, &kubeletProber.FakeProber{}, nil, nil, newKubeletRuntimeHooks(fakeRecorder)) dockerManager := dockertools.NewFakeDockerManager(fakeDocker, fakeRecorder, nil, nil, dockertools.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, newKubeletRuntimeHooks(fakeRecorder))
fakeRuntimeCache := kubecontainer.NewFakeRuntimeCache(dockerManager) fakeRuntimeCache := kubecontainer.NewFakeRuntimeCache(dockerManager)
lock := sync.Mutex{} lock := sync.Mutex{}

View File

@ -88,7 +88,7 @@ func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error {
delay := retryDelay delay := retryDelay
retry := 0 retry := 0
for { for {
pods, err := kl.containerManager.GetPods(false) pods, err := kl.containerRuntime.GetPods(false)
if err != nil { if err != nil {
return fmt.Errorf("failed to get kubelet pods: %v", err) return fmt.Errorf("failed to get kubelet pods: %v", err)
} }
@ -120,7 +120,7 @@ func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error {
// isPodRunning returns true if all containers of a manifest are running. // isPodRunning returns true if all containers of a manifest are running.
func (kl *Kubelet) isPodRunning(pod *api.Pod, runningPod container.Pod) (bool, error) { func (kl *Kubelet) isPodRunning(pod *api.Pod, runningPod container.Pod) (bool, error) {
status, err := kl.containerManager.GetPodStatus(pod) status, err := kl.containerRuntime.GetPodStatus(pod)
if err != nil { if err != nil {
glog.Infof("Failed to get the status of pod %q: %v", kubecontainer.GetPodFullName(pod), err) glog.Infof("Failed to get the status of pod %q: %v", kubecontainer.GetPodFullName(pod), err)
return false, err return false, err

View File

@ -28,7 +28,6 @@ import (
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container" kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
kubeletProber "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/prober"
docker "github.com/fsouza/go-dockerclient" docker "github.com/fsouza/go-dockerclient"
cadvisorApi "github.com/google/cadvisor/info/v1" cadvisorApi "github.com/google/cadvisor/info/v1"
) )
@ -149,7 +148,7 @@ func TestRunOnce(t *testing.T) {
t: t, t: t,
} }
kb.containerManager = dockertools.NewDockerManager( kb.containerRuntime = dockertools.NewFakeDockerManager(
kb.dockerClient, kb.dockerClient,
kb.recorder, kb.recorder,
kb.readinessManager, kb.readinessManager,
@ -160,11 +159,9 @@ func TestRunOnce(t *testing.T) {
"", "",
kubecontainer.FakeOS{}, kubecontainer.FakeOS{},
kb.networkPlugin, kb.networkPlugin,
&kubeletProber.FakeProber{},
kb, kb,
nil, nil,
newKubeletRuntimeHooks(kb.recorder)) newKubeletRuntimeHooks(kb.recorder))
kb.containerManager.Puller = &dockertools.FakeDockerPuller{}
pods := []*api.Pod{ pods := []*api.Pod{
{ {