Merge pull request #47316 from k82cn/k8s_47315

Automatic merge from submit-queue (batch tested with PRs 48981, 47316, 49180)

Added golint check for pkg/kubelet.

**What this PR does / why we need it**:
Added golint check for pkg/kubelet, and make golint happy.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #47315 

**Release note**:
```release-note-none
```
pull/6/head
Kubernetes Submit Queue 2017-07-19 11:21:25 -07:00 committed by GitHub
commit c0287ce420
26 changed files with 170 additions and 162 deletions

View File

@ -126,9 +126,9 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
return cmd return cmd
} }
// UnsecuredKubeletDeps returns a KubeletDeps suitable for being run, or an error if the server setup // UnsecuredDependencies returns a Dependencies suitable for being run, or an error if the server setup
// is not valid. It will not start any background processes, and does not include authentication/authorization // is not valid. It will not start any background processes, and does not include authentication/authorization
func UnsecuredKubeletDeps(s *options.KubeletServer) (*kubelet.KubeletDeps, error) { func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, error) {
// Initialize the TLS Options // Initialize the TLS Options
tlsOptions, err := InitializeTLS(&s.KubeletFlags, &s.KubeletConfiguration) tlsOptions, err := InitializeTLS(&s.KubeletFlags, &s.KubeletConfiguration)
if err != nil { if err != nil {
@ -151,7 +151,7 @@ func UnsecuredKubeletDeps(s *options.KubeletServer) (*kubelet.KubeletDeps, error
dockerClient = nil dockerClient = nil
} }
return &kubelet.KubeletDeps{ return &kubelet.Dependencies{
Auth: nil, // default does not enforce auth[nz] Auth: nil, // default does not enforce auth[nz]
CAdvisorInterface: nil, // cadvisor.New launches background processes (bg http.ListenAndServe, and some bg cleaners), not set here CAdvisorInterface: nil, // cadvisor.New launches background processes (bg http.ListenAndServe, and some bg cleaners), not set here
Cloud: nil, // cloud provider might start background processes Cloud: nil, // cloud provider might start background processes
@ -182,7 +182,7 @@ func getKubeClient(s *options.KubeletServer) (*clientset.Clientset, error) {
} }
// Tries to download the kubelet-<node-name> configmap from "kube-system" namespace via the API server and returns a JSON string or error // Tries to download the kubelet-<node-name> configmap from "kube-system" namespace via the API server and returns a JSON string or error
func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (string, error) { func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (string, error) {
// TODO(mtaufen): should probably cache clientset and pass into this function rather than regenerate on every request // TODO(mtaufen): should probably cache clientset and pass into this function rather than regenerate on every request
kubeClient, err := getKubeClient(s) kubeClient, err := getKubeClient(s)
if err != nil { if err != nil {
@ -281,11 +281,11 @@ func initKubeletConfigSync(s *options.KubeletServer) (*componentconfig.KubeletCo
} }
} }
// Run runs the specified KubeletServer with the given KubeletDeps. This should never exit. // Run runs the specified KubeletServer with the given Dependencies. This should never exit.
// The kubeDeps argument may be nil - if so, it is initialized from the settings on KubeletServer. // The kubeDeps argument may be nil - if so, it is initialized from the settings on KubeletServer.
// Otherwise, the caller is assumed to have set up the KubeletDeps object and a default one will // Otherwise, the caller is assumed to have set up the Dependencies object and a default one will
// not be generated. // not be generated.
func Run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) error { func Run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) error {
if err := run(s, kubeDeps); err != nil { if err := run(s, kubeDeps); err != nil {
return fmt.Errorf("failed to run Kubelet: %v", err) return fmt.Errorf("failed to run Kubelet: %v", err)
} }
@ -339,7 +339,7 @@ func validateConfig(s *options.KubeletServer) error {
} }
// makeEventRecorder sets up kubeDeps.Recorder if its nil. Its a no-op otherwise. // makeEventRecorder sets up kubeDeps.Recorder if its nil. Its a no-op otherwise.
func makeEventRecorder(s *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, nodeName types.NodeName) { func makeEventRecorder(s *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies, nodeName types.NodeName) {
if kubeDeps.Recorder != nil { if kubeDeps.Recorder != nil {
return return
} }
@ -354,7 +354,7 @@ func makeEventRecorder(s *componentconfig.KubeletConfiguration, kubeDeps *kubele
} }
} }
func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) { func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) {
// TODO: this should be replaced by a --standalone flag // TODO: this should be replaced by a --standalone flag
standaloneMode := (len(s.APIServerList) == 0 && !s.RequireKubeConfig) standaloneMode := (len(s.APIServerList) == 0 && !s.RequireKubeConfig)
@ -417,7 +417,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) {
} }
if kubeDeps == nil { if kubeDeps == nil {
kubeDeps, err = UnsecuredKubeletDeps(s) kubeDeps, err = UnsecuredDependencies(s)
if err != nil { if err != nil {
return err return err
} }
@ -817,7 +817,7 @@ func addChaosToClientConfig(s *options.KubeletServer, config *restclient.Config)
// 2 Kubelet binary // 2 Kubelet binary
// 3 Standalone 'kubernetes' binary // 3 Standalone 'kubernetes' binary
// Eventually, #2 will be replaced with instances of #3 // Eventually, #2 will be replaced with instances of #3
func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, runOnce bool, standaloneMode bool) error { func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies, runOnce bool, standaloneMode bool) error {
hostname := nodeutil.GetHostname(kubeFlags.HostnameOverride) hostname := nodeutil.GetHostname(kubeFlags.HostnameOverride)
// Query the cloud provider for our node name, default to hostname if kcfg.Cloud == nil // Query the cloud provider for our node name, default to hostname if kcfg.Cloud == nil
nodeName, err := getNodeName(kubeDeps.Cloud, hostname) nodeName, err := getNodeName(kubeDeps.Cloud, hostname)
@ -891,7 +891,7 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *componentconfig.Kubele
return nil return nil
} }
func startKubelet(k kubelet.KubeletBootstrap, podCfg *config.PodConfig, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps) { func startKubelet(k kubelet.Bootstrap, podCfg *config.PodConfig, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies) {
// start the kubelet // start the kubelet
go wait.Until(func() { k.Run(podCfg.Updates()) }, 0, wait.NeverStop) go wait.Until(func() { k.Run(podCfg.Updates()) }, 0, wait.NeverStop)
@ -908,7 +908,7 @@ func startKubelet(k kubelet.KubeletBootstrap, podCfg *config.PodConfig, kubeCfg
} }
} }
func CreateAndInitKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (k kubelet.KubeletBootstrap, err error) { func CreateAndInitKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (k kubelet.Bootstrap, err error) {
// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop // TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
// up into "per source" synchronizations // up into "per source" synchronizations

View File

@ -38,6 +38,7 @@ type diskSpaceManager interface {
IsRuntimeDiskSpaceAvailable() (bool, error) IsRuntimeDiskSpaceAvailable() (bool, error)
} }
// DiskSpacePolicy defines the free disk for Docker and Root.
type DiskSpacePolicy struct { type DiskSpacePolicy struct {
// free disk space threshold for filesystem holding docker images. // free disk space threshold for filesystem holding docker images.
DockerFreeDiskMB int DockerFreeDiskMB int
@ -112,10 +113,10 @@ func (dm *realDiskSpaceManager) isSpaceAvailable(fsType string, threshold int, f
func validatePolicy(policy DiskSpacePolicy) error { func validatePolicy(policy DiskSpacePolicy) error {
if policy.DockerFreeDiskMB < 0 { if policy.DockerFreeDiskMB < 0 {
return fmt.Errorf("free disk space should be non-negative. Invalid value %d for docker disk space threshold.", policy.DockerFreeDiskMB) return fmt.Errorf("free disk space should be non-negative; invalid value %d for docker disk space threshold", policy.DockerFreeDiskMB)
} }
if policy.RootFreeDiskMB < 0 { if policy.RootFreeDiskMB < 0 {
return fmt.Errorf("free disk space should be non-negative. Invalid value %d for root disk space threshold.", policy.RootFreeDiskMB) return fmt.Errorf("free disk space should be non-negative; invalid value %d for root disk space threshold", policy.RootFreeDiskMB)
} }
return nil return nil
} }

View File

@ -116,10 +116,10 @@ const (
// nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed. // nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed.
nodeStatusUpdateRetry = 5 nodeStatusUpdateRetry = 5
// Location of container logs. // ContainerLogsDir is the location of container logs.
ContainerLogsDir = "/var/log/containers" ContainerLogsDir = "/var/log/containers"
// max backoff period, exported for the e2e test // MaxContainerBackOff is the max backoff period, exported for the e2e test
MaxContainerBackOff = 300 * time.Second MaxContainerBackOff = 300 * time.Second
// Capacity of the channel for storing pods to kill. A small number should // Capacity of the channel for storing pods to kill. A small number should
@ -156,9 +156,9 @@ const (
// container restarts and image pulls. // container restarts and image pulls.
backOffPeriod = time.Second * 10 backOffPeriod = time.Second * 10
// Period for performing container garbage collection. // ContainerGCPeriod is the period for performing container garbage collection.
ContainerGCPeriod = time.Minute ContainerGCPeriod = time.Minute
// Period for performing image garbage collection. // ImageGCPeriod is the period for performing image garbage collection.
ImageGCPeriod = 5 * time.Minute ImageGCPeriod = 5 * time.Minute
// Minimum number of dead containers to keep in a pod // Minimum number of dead containers to keep in a pod
@ -178,8 +178,8 @@ type SyncHandler interface {
// Option is a functional option type for Kubelet // Option is a functional option type for Kubelet
type Option func(*Kubelet) type Option func(*Kubelet)
// bootstrapping interface for kubelet, targets the initialization protocol // Bootstrap is a bootstrapping interface for kubelet, targets the initialization protocol
type KubeletBootstrap interface { type Bootstrap interface {
GetConfiguration() componentconfig.KubeletConfiguration GetConfiguration() componentconfig.KubeletConfiguration
BirthCry() BirthCry()
StartGarbageCollection() StartGarbageCollection()
@ -189,13 +189,13 @@ type KubeletBootstrap interface {
RunOnce(<-chan kubetypes.PodUpdate) ([]RunPodResult, error) RunOnce(<-chan kubetypes.PodUpdate) ([]RunPodResult, error)
} }
// create and initialize a Kubelet instance // Builder creates and initializes a Kubelet instance
type KubeletBuilder func(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (KubeletBootstrap, error) type Builder func(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (Bootstrap, error)
// KubeletDeps is a bin for things we might consider "injected dependencies" -- objects constructed // Dependencies is a bin for things we might consider "injected dependencies" -- objects constructed
// at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping // at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping
// these objects while we figure out a more comprehensive dependency injection story for the Kubelet. // these objects while we figure out a more comprehensive dependency injection story for the Kubelet.
type KubeletDeps struct { type Dependencies struct {
// TODO(mtaufen): KubeletBuilder: // TODO(mtaufen): KubeletBuilder:
// Mesos currently uses this as a hook to let them make their own call to // Mesos currently uses this as a hook to let them make their own call to
// let them wrap the KubeletBootstrap that CreateAndInitKubelet returns with // let them wrap the KubeletBootstrap that CreateAndInitKubelet returns with
@ -203,7 +203,7 @@ type KubeletDeps struct {
// a nice home for it would be. There seems to be a trend, between this and // a nice home for it would be. There seems to be a trend, between this and
// the Options fields below, of providing hooks where you can add extra functionality // the Options fields below, of providing hooks where you can add extra functionality
// to the Kubelet for your solution. Maybe we should centralize these sorts of things? // to the Kubelet for your solution. Maybe we should centralize these sorts of things?
Builder KubeletBuilder Builder Builder
// TODO(mtaufen): ContainerRuntimeOptions and Options: // TODO(mtaufen): ContainerRuntimeOptions and Options:
// Arrays of functions that can do arbitrary things to the Kubelet and the Runtime // Arrays of functions that can do arbitrary things to the Kubelet and the Runtime
@ -240,7 +240,7 @@ type KubeletDeps struct {
// makePodSourceConfig creates a config.PodConfig from the given // makePodSourceConfig creates a config.PodConfig from the given
// KubeletConfiguration or returns an error. // KubeletConfiguration or returns an error.
func makePodSourceConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps, nodeName types.NodeName) (*config.PodConfig, error) { func makePodSourceConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName) (*config.PodConfig, error) {
manifestURLHeader := make(http.Header) manifestURLHeader := make(http.Header)
if kubeCfg.ManifestURLHeader != "" { if kubeCfg.ManifestURLHeader != "" {
pieces := strings.Split(kubeCfg.ManifestURLHeader, ":") pieces := strings.Split(kubeCfg.ManifestURLHeader, ":")
@ -285,7 +285,7 @@ func getRuntimeAndImageServices(config *componentconfig.KubeletConfiguration) (i
// NewMainKubelet instantiates a new Kubelet object along with all the required internal modules. // NewMainKubelet instantiates a new Kubelet object along with all the required internal modules.
// No initialization of Kubelet and its modules should happen here. // No initialization of Kubelet and its modules should happen here.
func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (*Kubelet, error) { func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (*Kubelet, error) {
if kubeCfg.RootDirectory == "" { if kubeCfg.RootDirectory == "" {
return nil, fmt.Errorf("invalid root directory %q", kubeCfg.RootDirectory) return nil, fmt.Errorf("invalid root directory %q", kubeCfg.RootDirectory)
} }
@ -514,11 +514,11 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
glog.Warningf("Failed to close iptables lock file: %v", err) glog.Warningf("Failed to close iptables lock file: %v", err)
} }
if plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, crOptions.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, hairpinMode, kubeCfg.NonMasqueradeCIDR, int(crOptions.NetworkPluginMTU)); err != nil { plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, crOptions.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, hairpinMode, kubeCfg.NonMasqueradeCIDR, int(crOptions.NetworkPluginMTU))
if err != nil {
return nil, err return nil, err
} else {
klet.networkPlugin = plug
} }
klet.networkPlugin = plug
machineInfo, err := klet.GetCachedMachineInfo() machineInfo, err := klet.GetCachedMachineInfo()
if err != nil { if err != nil {
@ -698,14 +698,15 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
var ips []net.IP var ips []net.IP
cfgAddress := net.ParseIP(kubeCfg.Address) cfgAddress := net.ParseIP(kubeCfg.Address)
if cfgAddress == nil || cfgAddress.IsUnspecified() { if cfgAddress == nil || cfgAddress.IsUnspecified() {
if localIPs, err := allLocalIPsWithoutLoopback(); err != nil { localIPs, err := allLocalIPsWithoutLoopback()
if err != nil {
return nil, err return nil, err
} else {
ips = localIPs
} }
ips = localIPs
} else { } else {
ips = []net.IP{cfgAddress} ips = []net.IP{cfgAddress}
} }
ips = append(ips, cloudIPs...) ips = append(ips, cloudIPs...)
names := append([]string{klet.GetHostname(), hostnameOverride}, cloudNames...) names := append([]string{klet.GetHostname(), hostnameOverride}, cloudNames...)
klet.serverCertificateManager, err = certificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, ips, names) klet.serverCertificateManager, err = certificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, ips, names)
@ -1118,7 +1119,7 @@ func allLocalIPsWithoutLoopback() ([]net.IP, error) {
for _, i := range interfaces { for _, i := range interfaces {
addresses, err := i.Addrs() addresses, err := i.Addrs()
if err != nil { if err != nil {
return nil, fmt.Errorf("could not list the addresses for network interface %v: %v\n", i, err) return nil, fmt.Errorf("could not list the addresses for network interface %v: %v", i, err)
} }
for _, address := range addresses { for _, address := range addresses {
switch v := address.(type) { switch v := address.(type) {
@ -1150,7 +1151,7 @@ func (kl *Kubelet) setupDataDirs() error {
return nil return nil
} }
// Starts garbage collection threads. // StartGarbageCollection starts garbage collection threads.
func (kl *Kubelet) StartGarbageCollection() { func (kl *Kubelet) StartGarbageCollection() {
loggedContainerGCFailure := false loggedContainerGCFailure := false
go wait.Until(func() { go wait.Until(func() {
@ -2128,10 +2129,10 @@ func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint) {
} }
// Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around. // Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around.
func (kl *Kubelet) cleanUpContainersInPod(podId types.UID, exitedContainerID string) { func (kl *Kubelet) cleanUpContainersInPod(podID types.UID, exitedContainerID string) {
if podStatus, err := kl.podCache.Get(podId); err == nil { if podStatus, err := kl.podCache.Get(podID); err == nil {
removeAll := false removeAll := false
if syncedPod, ok := kl.podManager.GetPodByUID(podId); ok { if syncedPod, ok := kl.podManager.GetPodByUID(podID); ok {
// When an evicted pod has already synced, all containers can be removed. // When an evicted pod has already synced, all containers can be removed.
removeAll = eviction.PodIsEvicted(syncedPod.Status) removeAll = eviction.PodIsEvicted(syncedPod.Status)
} }
@ -2146,7 +2147,7 @@ func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool {
} }
// Gets the streaming server configuration to use with in-process CRI shims. // Gets the streaming server configuration to use with in-process CRI shims.
func getStreamingConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps) *streaming.Config { func getStreamingConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies) *streaming.Config {
config := &streaming.Config{ config := &streaming.Config{
// Use a relative redirect (no scheme or host). // Use a relative redirect (no scheme or host).
BaseURL: &url.URL{ BaseURL: &url.URL{

View File

@ -61,19 +61,19 @@ func (kl *Kubelet) RootFsInfo() (cadvisorapiv2.FsInfo, error) {
return kl.cadvisor.RootFsInfo() return kl.cadvisor.RootFsInfo()
} }
// Returns stats (from Cadvisor) for a non-Kubernetes container. // GetRawContainerInfo returns stats (from Cadvisor) for a non-Kubernetes container.
func (kl *Kubelet) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) { func (kl *Kubelet) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) {
if subcontainers { if subcontainers {
return kl.cadvisor.SubcontainerInfo(containerName, req) return kl.cadvisor.SubcontainerInfo(containerName, req)
} else {
containerInfo, err := kl.cadvisor.ContainerInfo(containerName, req)
if err != nil {
return nil, err
}
return map[string]*cadvisorapi.ContainerInfo{
containerInfo.Name: containerInfo,
}, nil
} }
containerInfo, err := kl.cadvisor.ContainerInfo(containerName, req)
if err != nil {
return nil, err
}
return map[string]*cadvisorapi.ContainerInfo{
containerInfo.Name: containerInfo,
}, nil
} }
// GetVersionInfo returns information about the version of cAdvisor in use. // GetVersionInfo returns information about the version of cAdvisor in use.

View File

@ -30,7 +30,7 @@ import (
) )
func TestGetContainerInfo(t *testing.T) { func TestGetContainerInfo(t *testing.T) {
cadvisorApiFailure := fmt.Errorf("cAdvisor failure") cadvisorAPIFailure := fmt.Errorf("cAdvisor failure")
runtimeError := fmt.Errorf("List containers error") runtimeError := fmt.Errorf("List containers error")
tests := []struct { tests := []struct {
name string name string
@ -40,7 +40,7 @@ func TestGetContainerInfo(t *testing.T) {
runtimeError error runtimeError error
podList []*kubecontainertest.FakePod podList []*kubecontainertest.FakePod
requestedPodFullName string requestedPodFullName string
requestedPodUid types.UID requestedPodUID types.UID
requestedContainerName string requestedContainerName string
expectDockerContainerCall bool expectDockerContainerCall bool
mockError error mockError error
@ -73,7 +73,7 @@ func TestGetContainerInfo(t *testing.T) {
}, },
}, },
requestedPodFullName: "qux_ns", requestedPodFullName: "qux_ns",
requestedPodUid: "", requestedPodUID: "",
requestedContainerName: "foo", requestedContainerName: "foo",
expectDockerContainerCall: true, expectDockerContainerCall: true,
mockError: nil, mockError: nil,
@ -102,11 +102,11 @@ func TestGetContainerInfo(t *testing.T) {
}, },
}, },
requestedPodFullName: "qux_ns", requestedPodFullName: "qux_ns",
requestedPodUid: "uuid", requestedPodUID: "uuid",
requestedContainerName: "foo", requestedContainerName: "foo",
expectDockerContainerCall: true, expectDockerContainerCall: true,
mockError: cadvisorApiFailure, mockError: cadvisorAPIFailure,
expectedError: cadvisorApiFailure, expectedError: cadvisorAPIFailure,
expectStats: false, expectStats: false,
}, },
{ {
@ -117,7 +117,7 @@ func TestGetContainerInfo(t *testing.T) {
runtimeError: nil, runtimeError: nil,
podList: []*kubecontainertest.FakePod{}, podList: []*kubecontainertest.FakePod{},
requestedPodFullName: "qux", requestedPodFullName: "qux",
requestedPodUid: "", requestedPodUID: "",
requestedContainerName: "foo", requestedContainerName: "foo",
expectDockerContainerCall: false, expectDockerContainerCall: false,
mockError: nil, mockError: nil,
@ -132,7 +132,7 @@ func TestGetContainerInfo(t *testing.T) {
runtimeError: runtimeError, runtimeError: runtimeError,
podList: []*kubecontainertest.FakePod{}, podList: []*kubecontainertest.FakePod{},
requestedPodFullName: "qux", requestedPodFullName: "qux",
requestedPodUid: "", requestedPodUID: "",
requestedContainerName: "foo", requestedContainerName: "foo",
mockError: nil, mockError: nil,
expectedError: runtimeError, expectedError: runtimeError,
@ -146,7 +146,7 @@ func TestGetContainerInfo(t *testing.T) {
runtimeError: nil, runtimeError: nil,
podList: []*kubecontainertest.FakePod{}, podList: []*kubecontainertest.FakePod{},
requestedPodFullName: "qux_ns", requestedPodFullName: "qux_ns",
requestedPodUid: "", requestedPodUID: "",
requestedContainerName: "foo", requestedContainerName: "foo",
mockError: nil, mockError: nil,
expectedError: kubecontainer.ErrContainerNotFound, expectedError: kubecontainer.ErrContainerNotFound,
@ -174,7 +174,7 @@ func TestGetContainerInfo(t *testing.T) {
}, },
}, },
requestedPodFullName: "qux_ns", requestedPodFullName: "qux_ns",
requestedPodUid: "", requestedPodUID: "",
requestedContainerName: "foo", requestedContainerName: "foo",
mockError: nil, mockError: nil,
expectedError: kubecontainer.ErrContainerNotFound, expectedError: kubecontainer.ErrContainerNotFound,
@ -195,7 +195,7 @@ func TestGetContainerInfo(t *testing.T) {
fakeRuntime.Err = tc.runtimeError fakeRuntime.Err = tc.runtimeError
fakeRuntime.PodList = tc.podList fakeRuntime.PodList = tc.podList
stats, err := kubelet.GetContainerInfo(tc.requestedPodFullName, tc.requestedPodUid, tc.requestedContainerName, cadvisorReq) stats, err := kubelet.GetContainerInfo(tc.requestedPodFullName, tc.requestedPodUID, tc.requestedContainerName, cadvisorReq)
assert.Equal(t, tc.expectedError, err) assert.Equal(t, tc.expectedError, err)
if tc.expectStats { if tc.expectStats {

View File

@ -214,7 +214,7 @@ func (kl *Kubelet) GetNodeConfig() cm.NodeConfig {
return kl.containerManager.GetNodeConfig() return kl.containerManager.GetNodeConfig()
} }
// Returns host IP or nil in case of error. // GetHostIP returns host IP or nil in case of error.
func (kl *Kubelet) GetHostIP() (net.IP, error) { func (kl *Kubelet) GetHostIP() (net.IP, error) {
node, err := kl.GetNode() node, err := kl.GetNode()
if err != nil { if err != nil {

View File

@ -31,17 +31,17 @@ import (
) )
const ( const (
// the mark-for-masquerade chain // KubeMarkMasqChain is the mark-for-masquerade chain
// TODO: clean up this logic in kube-proxy // TODO: clean up this logic in kube-proxy
KubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ" KubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
// the mark-for-drop chain // KubeMarkDropChain is the mark-for-drop chain
KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP" KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
// kubernetes postrouting rules // KubePostroutingChain is kubernetes postrouting rules
KubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING" KubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING"
// kubernetes firewall rules // KubeFirewallChain is kubernetes firewall rules
KubeFirewallChain utiliptables.Chain = "KUBE-FIREWALL" KubeFirewallChain utiliptables.Chain = "KUBE-FIREWALL"
) )

View File

@ -118,7 +118,7 @@ func TestComposeDNSSearch(t *testing.T) {
recorder := record.NewFakeRecorder(20) recorder := record.NewFakeRecorder(20)
kubelet.recorder = recorder kubelet.recorder = recorder
pod := podWithUidNameNs("", "test_pod", "testNS") pod := podWithUIDNameNs("", "test_pod", "testNS")
kubelet.clusterDomain = "TEST" kubelet.clusterDomain = "TEST"
testCases := []struct { testCases := []struct {

View File

@ -58,10 +58,10 @@ const (
maxNamesPerImageInNodeStatus = 5 maxNamesPerImageInNodeStatus = 5
) )
// registerWithApiServer registers the node with the cluster master. It is safe // registerWithAPIServer registers the node with the cluster master. It is safe
// to call multiple times, but not concurrently (kl.registrationCompleted is // to call multiple times, but not concurrently (kl.registrationCompleted is
// not locked). // not locked).
func (kl *Kubelet) registerWithApiServer() { func (kl *Kubelet) registerWithAPIServer() {
if kl.registrationCompleted { if kl.registrationCompleted {
return return
} }
@ -81,7 +81,7 @@ func (kl *Kubelet) registerWithApiServer() {
} }
glog.Infof("Attempting to register node %s", node.Name) glog.Infof("Attempting to register node %s", node.Name)
registered := kl.tryRegisterWithApiServer(node) registered := kl.tryRegisterWithAPIServer(node)
if registered { if registered {
glog.Infof("Successfully registered node %s", node.Name) glog.Infof("Successfully registered node %s", node.Name)
kl.registrationCompleted = true kl.registrationCompleted = true
@ -90,14 +90,14 @@ func (kl *Kubelet) registerWithApiServer() {
} }
} }
// tryRegisterWithApiServer makes an attempt to register the given node with // tryRegisterWithAPIServer makes an attempt to register the given node with
// the API server, returning a boolean indicating whether the attempt was // the API server, returning a boolean indicating whether the attempt was
// successful. If a node with the same name already exists, it reconciles the // successful. If a node with the same name already exists, it reconciles the
// value of the annotation for controller-managed attach-detach of attachable // value of the annotation for controller-managed attach-detach of attachable
// persistent volumes for the node. If a node of the same name exists but has // persistent volumes for the node. If a node of the same name exists but has
// a different externalID value, it attempts to delete that node so that a // a different externalID value, it attempts to delete that node so that a
// later attempt can recreate it. // later attempt can recreate it.
func (kl *Kubelet) tryRegisterWithApiServer(node *v1.Node) bool { func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
_, err := kl.kubeClient.Core().Nodes().Create(node) _, err := kl.kubeClient.Core().Nodes().Create(node)
if err == nil { if err == nil {
return true return true
@ -344,7 +344,7 @@ func (kl *Kubelet) syncNodeStatus() {
} }
if kl.registerNode { if kl.registerNode {
// This will exit immediately if it doesn't need to do anything. // This will exit immediately if it doesn't need to do anything.
kl.registerWithApiServer() kl.registerWithAPIServer()
} }
if err := kl.updateNodeStatus(); err != nil { if err := kl.updateNodeStatus(); err != nil {
glog.Errorf("Unable to update node status: %v", err) glog.Errorf("Unable to update node status: %v", err)
@ -499,11 +499,10 @@ func (kl *Kubelet) setNodeAddress(node *v1.Node) error {
if ipAddr == nil { if ipAddr == nil {
// We tried everything we could, but the IP address wasn't fetchable; error out // We tried everything we could, but the IP address wasn't fetchable; error out
return fmt.Errorf("can't get ip address of node %s. error: %v", node.Name, err) return fmt.Errorf("can't get ip address of node %s. error: %v", node.Name, err)
} else { }
node.Status.Addresses = []v1.NodeAddress{ node.Status.Addresses = []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: ipAddr.String()}, {Type: v1.NodeInternalIP, Address: ipAddr.String()},
{Type: v1.NodeHostName, Address: kl.GetHostname()}, {Type: v1.NodeHostName, Address: kl.GetHostname()},
}
} }
} }
return nil return nil

View File

@ -954,7 +954,7 @@ func TestRegisterWithApiServer(t *testing.T) {
done := make(chan struct{}) done := make(chan struct{})
go func() { go func() {
kubelet.registerWithApiServer() kubelet.registerWithAPIServer()
done <- struct{}{} done <- struct{}{}
}() }()
select { select {
@ -1113,7 +1113,7 @@ func TestTryRegisterWithApiServer(t *testing.T) {
return notImplemented(action) return notImplemented(action)
}) })
result := kubelet.tryRegisterWithApiServer(tc.newNode) result := kubelet.tryRegisterWithAPIServer(tc.newNode)
require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name) require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name)
actions := kubeClient.Actions() actions := kubeClient.Actions()

View File

@ -381,7 +381,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
} }
services, err := kl.serviceLister.List(labels.Everything()) services, err := kl.serviceLister.List(labels.Everything())
if err != nil { if err != nil {
return m, fmt.Errorf("failed to list services when setting up env vars.") return m, fmt.Errorf("failed to list services when setting up env vars")
} }
// project the services in namespace ns onto the master services // project the services in namespace ns onto the master services
@ -546,7 +546,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
return result, err return result, err
} }
case envVar.ValueFrom.ResourceFieldRef != nil: case envVar.ValueFrom.ResourceFieldRef != nil:
defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardApi(pod, container) defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardAPI(pod, container)
if err != nil { if err != nil {
return result, err return result, err
} }
@ -669,9 +669,8 @@ func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, co
containerName := fs.ContainerName containerName := fs.ContainerName
if len(containerName) == 0 { if len(containerName) == 0 {
return resource.ExtractContainerResourceValue(fs, container) return resource.ExtractContainerResourceValue(fs, container)
} else {
return resource.ExtractResourceValueByContainerName(fs, pod, containerName)
} }
return resource.ExtractResourceValueByContainerName(fs, pod, containerName)
} }
// One of the following arguments must be non-nil: runningPod, status. // One of the following arguments must be non-nil: runningPod, status.
@ -1353,7 +1352,7 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
continue continue
} }
status := statuses[container.Name] status := statuses[container.Name]
reason, message, ok := kl.reasonCache.Get(pod.UID, container.Name) reason, ok := kl.reasonCache.Get(pod.UID, container.Name)
if !ok { if !ok {
// In fact, we could also apply Waiting state here, but it is less informative, // In fact, we could also apply Waiting state here, but it is less informative,
// and the container will be restarted soon, so we prefer the original state here. // and the container will be restarted soon, so we prefer the original state here.
@ -1368,8 +1367,8 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
} }
status.State = v1.ContainerState{ status.State = v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{ Waiting: &v1.ContainerStateWaiting{
Reason: reason.Error(), Reason: reason.Err.Error(),
Message: message, Message: reason.Message,
}, },
} }
statuses[container.Name] = status statuses[container.Name] = status
@ -1390,7 +1389,7 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
return containerStatuses return containerStatuses
} }
// Returns logs of current machine. // ServeLogs returns logs of current machine.
func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) { func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
// TODO: whitelist logs we are willing to serve // TODO: whitelist logs we are willing to serve
kl.logServer.ServeHTTP(w, req) kl.logServer.ServeHTTP(w, req)
@ -1408,7 +1407,7 @@ func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, container
return pod.FindContainerByName(containerName), nil return pod.FindContainerByName(containerName), nil
} }
// Run a command in a container, returns the combined stdout, stderr as an array of bytes // RunInContainer runs a command in a container, returns the combined stdout, stderr as an array of bytes
func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) { func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) {
container, err := kl.findContainer(podFullName, podUID, containerName) container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil { if err != nil {

View File

@ -1723,7 +1723,7 @@ func TestExec(t *testing.T) {
tty = true tty = true
) )
var ( var (
podFullName = kubecontainer.GetPodFullName(podWithUidNameNs(podUID, podName, podNamespace)) podFullName = kubecontainer.GetPodFullName(podWithUIDNameNs(podUID, podName, podNamespace))
command = []string{"ls"} command = []string{"ls"}
stdin = &bytes.Buffer{} stdin = &bytes.Buffer{}
stdout = &fakeReadWriteCloser{} stdout = &fakeReadWriteCloser{}
@ -1859,7 +1859,7 @@ func TestPortForward(t *testing.T) {
}}, }},
} }
podFullName := kubecontainer.GetPodFullName(podWithUidNameNs(podUID, tc.podName, podNamespace)) podFullName := kubecontainer.GetPodFullName(podWithUIDNameNs(podUID, tc.podName, podNamespace))
{ // No streaming case { // No streaming case
description := "no streaming - " + tc.description description := "no streaming - " + tc.description
redirect, err := kubelet.GetPortForward(tc.podName, podNamespace, podUID, portforward.V4Options{}) redirect, err := kubelet.GetPortForward(tc.podName, podNamespace, podUID, portforward.V4Options{})

View File

@ -26,14 +26,14 @@ import (
"k8s.io/kubernetes/pkg/api/v1/resource" "k8s.io/kubernetes/pkg/api/v1/resource"
) )
// defaultPodLimitsForDownwardApi copies the input pod, and optional container, // defaultPodLimitsForDownwardAPI copies the input pod, and optional container,
// and applies default resource limits. it returns a copy of the input pod, // and applies default resource limits. it returns a copy of the input pod,
// and a copy of the input container (if specified) with default limits // and a copy of the input container (if specified) with default limits
// applied. if a container has no limit specified, it will default the limit to // applied. if a container has no limit specified, it will default the limit to
// the node allocatable. // the node allocatable.
// TODO: if/when we have pod level resources, we need to update this function // TODO: if/when we have pod level resources, we need to update this function
// to use those limits instead of node allocatable. // to use those limits instead of node allocatable.
func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *v1.Pod, container *v1.Container) (*v1.Pod, *v1.Container, error) { func (kl *Kubelet) defaultPodLimitsForDownwardAPI(pod *v1.Pod, container *v1.Container) (*v1.Pod, *v1.Container, error) {
if pod == nil { if pod == nil {
return nil, nil, fmt.Errorf("invalid input, pod cannot be nil") return nil, nil, fmt.Errorf("invalid input, pod cannot be nil")
} }

View File

@ -79,7 +79,7 @@ func TestPodResourceLimitsDefaulting(t *testing.T) {
} }
as := assert.New(t) as := assert.New(t)
for idx, tc := range cases { for idx, tc := range cases {
actual, _, err := tk.kubelet.defaultPodLimitsForDownwardApi(tc.pod, nil) actual, _, err := tk.kubelet.defaultPodLimitsForDownwardAPI(tc.pod, nil)
as.Nil(err, "failed to default pod limits: %v", err) as.Nil(err, "failed to default pod limits: %v", err)
if !apiequality.Semantic.DeepEqual(tc.expected, actual) { if !apiequality.Semantic.DeepEqual(tc.expected, actual) {
as.Fail("test case [%d] failed. Expected: %+v, Got: %+v", idx, tc.expected, actual) as.Fail("test case [%d] failed. Expected: %+v, Got: %+v", idx, tc.expected, actual)

View File

@ -356,7 +356,7 @@ func TestSyncPodsStartPod(t *testing.T) {
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime fakeRuntime := testKubelet.fakeRuntime
pods := []*v1.Pod{ pods := []*v1.Pod{
podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{Name: "bar"}, {Name: "bar"},
}, },
@ -443,8 +443,8 @@ func TestHandlePortConflicts(t *testing.T) {
spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}} spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}
pods := []*v1.Pod{ pods := []*v1.Pod{
podWithUidNameNsSpec("123456789", "newpod", "foo", spec), podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec), podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
} }
// Make sure the Pods are in the reverse order of creation time. // Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = metav1.NewTime(time.Now()) pods[1].CreationTimestamp = metav1.NewTime(time.Now())
@ -488,8 +488,8 @@ func TestHandleHostNameConflicts(t *testing.T) {
// default NodeName in test is 127.0.0.1 // default NodeName in test is 127.0.0.1
pods := []*v1.Pod{ pods := []*v1.Pod{
podWithUidNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}), podWithUIDNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}),
podWithUidNameNsSpec("987654321", "fittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.1"}), podWithUIDNameNsSpec("987654321", "fittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.1"}),
} }
notfittingPod := pods[0] notfittingPod := pods[0]
@ -528,8 +528,8 @@ func TestHandleNodeSelector(t *testing.T) {
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
pods := []*v1.Pod{ pods := []*v1.Pod{
podWithUidNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}), podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
podWithUidNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}), podWithUIDNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
} }
// The first pod should be rejected. // The first pod should be rejected.
notfittingPod := pods[0] notfittingPod := pods[0]
@ -574,8 +574,8 @@ func TestHandleMemExceeded(t *testing.T) {
}}}, }}},
} }
pods := []*v1.Pod{ pods := []*v1.Pod{
podWithUidNameNsSpec("123456789", "newpod", "foo", spec), podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec), podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
} }
// Make sure the Pods are in the reverse order of creation time. // Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = metav1.NewTime(time.Now()) pods[1].CreationTimestamp = metav1.NewTime(time.Now())
@ -768,7 +768,7 @@ func TestCreateMirrorPod(t *testing.T) {
kl := testKubelet.kubelet kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient manager := testKubelet.fakeMirrorClient
pod := podWithUidNameNs("12345678", "bar", "foo") pod := podWithUIDNameNs("12345678", "bar", "foo")
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file" pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
pods := []*v1.Pod{pod} pods := []*v1.Pod{pod}
kl.podManager.SetPods(pods) kl.podManager.SetPods(pods)
@ -795,7 +795,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
kl := testKubelet.kubelet kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient manager := testKubelet.fakeMirrorClient
pod := podWithUidNameNsSpec("12345678", "foo", "ns", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "ns", v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{Name: "1234", Image: "foo"}, {Name: "1234", Image: "foo"},
}, },
@ -803,7 +803,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file" pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
// Mirror pod has an outdated spec. // Mirror pod has an outdated spec.
mirrorPod := podWithUidNameNsSpec("11111111", "foo", "ns", v1.PodSpec{ mirrorPod := podWithUIDNameNsSpec("11111111", "foo", "ns", v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{Name: "1234", Image: "bar"}, {Name: "1234", Image: "bar"},
}, },
@ -966,7 +966,7 @@ func TestHostNetworkAllowed(t *testing.T) {
HostNetworkSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource}, HostNetworkSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource},
}, },
}) })
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{Name: "foo"}, {Name: "foo"},
}, },
@ -999,7 +999,7 @@ func TestHostNetworkDisallowed(t *testing.T) {
HostNetworkSources: []string{}, HostNetworkSources: []string{},
}, },
}) })
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{Name: "foo"}, {Name: "foo"},
}, },
@ -1031,7 +1031,7 @@ func TestHostPIDAllowed(t *testing.T) {
HostPIDSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource}, HostPIDSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource},
}, },
}) })
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{Name: "foo"}, {Name: "foo"},
}, },
@ -1064,7 +1064,7 @@ func TestHostPIDDisallowed(t *testing.T) {
HostPIDSources: []string{}, HostPIDSources: []string{},
}, },
}) })
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{Name: "foo"}, {Name: "foo"},
}, },
@ -1096,7 +1096,7 @@ func TestHostIPCAllowed(t *testing.T) {
HostIPCSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource}, HostIPCSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource},
}, },
}) })
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{Name: "foo"}, {Name: "foo"},
}, },
@ -1129,7 +1129,7 @@ func TestHostIPCDisallowed(t *testing.T) {
HostIPCSources: []string{}, HostIPCSources: []string{},
}, },
}) })
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{Name: "foo"}, {Name: "foo"},
}, },
@ -1160,7 +1160,7 @@ func TestPrivilegeContainerAllowed(t *testing.T) {
AllowPrivileged: true, AllowPrivileged: true,
}) })
privileged := true privileged := true
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{Name: "foo", SecurityContext: &v1.SecurityContext{Privileged: &privileged}}, {Name: "foo", SecurityContext: &v1.SecurityContext{Privileged: &privileged}},
}, },
@ -1188,7 +1188,7 @@ func TestPrivilegedContainerDisallowed(t *testing.T) {
AllowPrivileged: false, AllowPrivileged: false,
}) })
privileged := true privileged := true
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{Name: "foo", SecurityContext: &v1.SecurityContext{Privileged: &privileged}}, {Name: "foo", SecurityContext: &v1.SecurityContext{Privileged: &privileged}},
}, },
@ -1218,7 +1218,7 @@ func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
}, },
}) })
pod := podWithUidNameNsSpec("12345678", "hostnetwork", "new", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "hostnetwork", "new", v1.PodSpec{
HostNetwork: false, HostNetwork: false,
Containers: []v1.Container{ Containers: []v1.Container{
@ -1372,7 +1372,7 @@ func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
assert.NotEqual(t, v1.PodFailed, status.Phase) assert.NotEqual(t, v1.PodFailed, status.Phase)
} }
func podWithUidNameNs(uid types.UID, name, namespace string) *v1.Pod { func podWithUIDNameNs(uid types.UID, name, namespace string) *v1.Pod {
return &v1.Pod{ return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
UID: uid, UID: uid,
@ -1383,8 +1383,8 @@ func podWithUidNameNs(uid types.UID, name, namespace string) *v1.Pod {
} }
} }
func podWithUidNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec) *v1.Pod { func podWithUIDNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec) *v1.Pod {
pod := podWithUidNameNs(uid, name, namespace) pod := podWithUIDNameNs(uid, name, namespace)
pod.Spec = spec pod.Spec = spec
return pod return pod
} }
@ -1399,8 +1399,8 @@ func TestDeletePodDirsForDeletedPods(t *testing.T) {
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet kl := testKubelet.kubelet
pods := []*v1.Pod{ pods := []*v1.Pod{
podWithUidNameNs("12345678", "pod1", "ns"), podWithUIDNameNs("12345678", "pod1", "ns"),
podWithUidNameNs("12345679", "pod2", "ns"), podWithUIDNameNs("12345679", "pod2", "ns"),
} }
kl.podManager.SetPods(pods) kl.podManager.SetPods(pods)
@ -1439,9 +1439,9 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kl := testKubelet.kubelet kl := testKubelet.kubelet
pods := []*v1.Pod{ pods := []*v1.Pod{
podWithUidNameNs("12345678", "pod1", "ns"), podWithUIDNameNs("12345678", "pod1", "ns"),
podWithUidNameNs("12345679", "pod2", "ns"), podWithUIDNameNs("12345679", "pod2", "ns"),
podWithUidNameNs("12345680", "pod3", "ns"), podWithUIDNameNs("12345680", "pod3", "ns"),
} }
syncAndVerifyPodDir(t, testKubelet, pods, pods, true) syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
@ -1465,7 +1465,7 @@ func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
Name: "pod1", Name: "pod1",
Namespace: "ns", Namespace: "ns",
} }
apiPod := podWithUidNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace) apiPod := podWithUIDNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace)
// Sync once to create pod directory; confirm that the pod directory has // Sync once to create pod directory; confirm that the pod directory has
// already been created. // already been created.
@ -1544,7 +1544,7 @@ func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
} }
specContainerList = append(specContainerList, v1.Container{Name: containerName}) specContainerList = append(specContainerList, v1.Container{Name: containerName})
} }
pod := podWithUidNameNs("uid1", "foo", "test") pod := podWithUIDNameNs("uid1", "foo", "test")
pod.Spec = v1.PodSpec{ pod.Spec = v1.PodSpec{
Containers: specContainerList, Containers: specContainerList,
} }
@ -1587,7 +1587,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
pod := podWithUidNameNs("12345678", "foo", "new") pod := podWithUIDNameNs("12345678", "foo", "new")
pod.Spec = v1.PodSpec{RestartPolicy: v1.RestartPolicyOnFailure} pod.Spec = v1.PodSpec{RestartPolicy: v1.RestartPolicyOnFailure}
podStatus := &kubecontainer.PodStatus{ podStatus := &kubecontainer.PodStatus{
@ -1777,7 +1777,7 @@ func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil) testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
pod := podWithUidNameNs("12345678", "foo", "new") pod := podWithUIDNameNs("12345678", "foo", "new")
containers := []v1.Container{{Name: "succeed"}, {Name: "failed"}} containers := []v1.Container{{Name: "succeed"}, {Name: "failed"}}
podStatus := &kubecontainer.PodStatus{ podStatus := &kubecontainer.PodStatus{
ID: pod.UID, ID: pod.UID,

View File

@ -113,7 +113,7 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
continue continue
} }
if len(volumePaths) > 0 { if len(volumePaths) > 0 {
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk.", uid)) orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk", uid))
continue continue
} }
glog.V(3).Infof("Orphaned pod %q found, removing", uid) glog.V(3).Infof("Orphaned pod %q found, removing", uid)

View File

@ -36,7 +36,7 @@ func TestListVolumesForPod(t *testing.T) {
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Volumes: []v1.Volume{ Volumes: []v1.Volume{
{ {
Name: "vol1", Name: "vol1",
@ -163,7 +163,7 @@ func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Volumes: []v1.Volume{ Volumes: []v1.Volume{
{ {
Name: "vol1", Name: "vol1",
@ -209,7 +209,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
defer testKubelet.Cleanup() defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet kubelet := testKubelet.kubelet
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Volumes: []v1.Volume{ Volumes: []v1.Volume{
{ {
Name: "vol1", Name: "vol1",
@ -298,7 +298,7 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action) return true, nil, fmt.Errorf("no reaction implemented for %s", action)
}) })
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Volumes: []v1.Volume{ Volumes: []v1.Volume{
{ {
Name: "vol1", Name: "vol1",
@ -367,7 +367,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action) return true, nil, fmt.Errorf("no reaction implemented for %s", action)
}) })
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{ pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Volumes: []v1.Volume{ Volumes: []v1.Volume{
{ {
Name: "vol1", Name: "vol1",

View File

@ -69,23 +69,27 @@ func (c *criNetworkHost) GetNetNS(containerID string) (string, error) {
return c.kubelet.GetRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID}) return c.kubelet.GetRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID})
} }
// noOpLegacyHost implements the network.LegacyHost interface for the remote // NoOpLegacyHost implements the network.LegacyHost interface for the remote
// runtime shim by just returning empties. It doesn't support legacy features // runtime shim by just returning empties. It doesn't support legacy features
// like host port and bandwidth shaping. // like host port and bandwidth shaping.
type NoOpLegacyHost struct{} type NoOpLegacyHost struct{}
// GetPodByName always returns "nil, true" for 'NoOpLegacyHost'
func (n *NoOpLegacyHost) GetPodByName(namespace, name string) (*v1.Pod, bool) { func (n *NoOpLegacyHost) GetPodByName(namespace, name string) (*v1.Pod, bool) {
return nil, true return nil, true
} }
// GetKubeClient always returns "nil" for 'NoOpLegacyHost'
func (n *NoOpLegacyHost) GetKubeClient() clientset.Interface { func (n *NoOpLegacyHost) GetKubeClient() clientset.Interface {
return nil return nil
} }
// GetRuntime always returns "nil" for 'NoOpLegacyHost'
func (n *NoOpLegacyHost) GetRuntime() kubecontainer.Runtime { func (n *NoOpLegacyHost) GetRuntime() kubecontainer.Runtime {
return nil return nil
} }
func (nh *NoOpLegacyHost) SupportsLegacyFeatures() bool { // SupportsLegacyFeatures always returns "false" for 'NoOpLegacyHost'
func (n *NoOpLegacyHost) SupportsLegacyFeatures() bool {
return false return false
} }

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/cadvisor"
) )
// OOMWatcher defines the interface of OOM watchers.
type OOMWatcher interface { type OOMWatcher interface {
Start(ref *v1.ObjectReference) error Start(ref *v1.ObjectReference) error
} }
@ -36,6 +37,7 @@ type realOOMWatcher struct {
recorder record.EventRecorder recorder record.EventRecorder
} }
// NewOOMWatcher creates and initializes a OOMWatcher based on parameters.
func NewOOMWatcher(cadvisor cadvisor.Interface, recorder record.EventRecorder) OOMWatcher { func NewOOMWatcher(cadvisor cadvisor.Interface, recorder record.EventRecorder) OOMWatcher {
return &realOOMWatcher{ return &realOOMWatcher{
cadvisor: cadvisor, cadvisor: cadvisor,

View File

@ -59,8 +59,8 @@ func newPodContainerDeletor(runtime kubecontainer.Runtime, containersToKeep int)
} }
// getContainersToDeleteInPod returns the exited containers in a pod whose name matches the name inferred from filterContainerId (if not empty), ordered by the creation time from the latest to the earliest. // getContainersToDeleteInPod returns the exited containers in a pod whose name matches the name inferred from filterContainerId (if not empty), ordered by the creation time from the latest to the earliest.
// If filterContainerId is empty, all dead containers in the pod are returned. // If filterContainerID is empty, all dead containers in the pod are returned.
func getContainersToDeleteInPod(filterContainerId string, podStatus *kubecontainer.PodStatus, containersToKeep int) containerStatusbyCreatedList { func getContainersToDeleteInPod(filterContainerID string, podStatus *kubecontainer.PodStatus, containersToKeep int) containerStatusbyCreatedList {
matchedContainer := func(filterContainerId string, podStatus *kubecontainer.PodStatus) *kubecontainer.ContainerStatus { matchedContainer := func(filterContainerId string, podStatus *kubecontainer.PodStatus) *kubecontainer.ContainerStatus {
if filterContainerId == "" { if filterContainerId == "" {
return nil return nil
@ -71,10 +71,10 @@ func getContainersToDeleteInPod(filterContainerId string, podStatus *kubecontain
} }
} }
return nil return nil
}(filterContainerId, podStatus) }(filterContainerID, podStatus)
if filterContainerId != "" && matchedContainer == nil { if filterContainerID != "" && matchedContainer == nil {
glog.Warningf("Container %q not found in pod's containers", filterContainerId) glog.Warningf("Container %q not found in pod's containers", filterContainerID)
return containerStatusbyCreatedList{} return containerStatusbyCreatedList{}
} }
@ -97,13 +97,13 @@ func getContainersToDeleteInPod(filterContainerId string, podStatus *kubecontain
} }
// deleteContainersInPod issues container deletion requests for containers selected by getContainersToDeleteInPod. // deleteContainersInPod issues container deletion requests for containers selected by getContainersToDeleteInPod.
func (p *podContainerDeletor) deleteContainersInPod(filterContainerId string, podStatus *kubecontainer.PodStatus, removeAll bool) { func (p *podContainerDeletor) deleteContainersInPod(filterContainerID string, podStatus *kubecontainer.PodStatus, removeAll bool) {
containersToKeep := p.containersToKeep containersToKeep := p.containersToKeep
if removeAll { if removeAll {
containersToKeep = 0 containersToKeep = 0
} }
for _, candidate := range getContainersToDeleteInPod(filterContainerId, podStatus, containersToKeep) { for _, candidate := range getContainersToDeleteInPod(filterContainerID, podStatus, containersToKeep) {
select { select {
case p.worker <- candidate.ID: case p.worker <- candidate.ID:
default: default:

View File

@ -185,7 +185,7 @@ func TestGetContainersToDeleteInPodWithNoMatch(t *testing.T) {
} }
testCases := []struct { testCases := []struct {
filterId string filterID string
expectedContainersToDelete containerStatusbyCreatedList expectedContainersToDelete containerStatusbyCreatedList
}{ }{
{ {
@ -195,7 +195,7 @@ func TestGetContainersToDeleteInPodWithNoMatch(t *testing.T) {
} }
for _, test := range testCases { for _, test := range testCases {
candidates := getContainersToDeleteInPod(test.filterId, &pod, len(pod.ContainerStatuses)) candidates := getContainersToDeleteInPod(test.filterID, &pod, len(pod.ContainerStatuses))
if !reflect.DeepEqual(candidates, test.expectedContainersToDelete) { if !reflect.DeepEqual(candidates, test.expectedContainersToDelete) {
t.Errorf("expected %v got %v", test.expectedContainersToDelete, candidates) t.Errorf("expected %v got %v", test.expectedContainersToDelete, candidates)
} }

View File

@ -292,12 +292,12 @@ func TestFakePodWorkers(t *testing.T) {
&v1.Pod{}, &v1.Pod{},
}, },
{ {
podWithUidNameNs("12345678", "foo", "new"), podWithUIDNameNs("12345678", "foo", "new"),
podWithUidNameNs("12345678", "fooMirror", "new"), podWithUIDNameNs("12345678", "fooMirror", "new"),
}, },
{ {
podWithUidNameNs("98765", "bar", "new"), podWithUIDNameNs("98765", "bar", "new"),
podWithUidNameNs("98765", "barMirror", "new"), podWithUIDNameNs("98765", "barMirror", "new"),
}, },
} }

View File

@ -40,10 +40,10 @@ type ReasonCache struct {
cache *lru.Cache cache *lru.Cache
} }
// reasonInfo is the cached item in ReasonCache // Reason is the cached item in ReasonCache
type reasonInfo struct { type reasonItem struct {
reason error Err error
message string Message string
} }
// maxReasonCacheEntries is the cache entry number in lru cache. 1000 is a proper number // maxReasonCacheEntries is the cache entry number in lru cache. 1000 is a proper number
@ -51,6 +51,7 @@ type reasonInfo struct {
// may want to increase the number. // may want to increase the number.
const maxReasonCacheEntries = 1000 const maxReasonCacheEntries = 1000
// NewReasonCache creates an instance of 'ReasonCache'.
func NewReasonCache() *ReasonCache { func NewReasonCache() *ReasonCache {
return &ReasonCache{cache: lru.New(maxReasonCacheEntries)} return &ReasonCache{cache: lru.New(maxReasonCacheEntries)}
} }
@ -63,7 +64,7 @@ func (c *ReasonCache) composeKey(uid types.UID, name string) string {
func (c *ReasonCache) add(uid types.UID, name string, reason error, message string) { func (c *ReasonCache) add(uid types.UID, name string, reason error, message string) {
c.lock.Lock() c.lock.Lock()
defer c.lock.Unlock() defer c.lock.Unlock()
c.cache.Add(c.composeKey(uid, name), reasonInfo{reason, message}) c.cache.Add(c.composeKey(uid, name), reasonItem{reason, message})
} }
// Update updates the reason cache with the SyncPodResult. Only SyncResult with // Update updates the reason cache with the SyncPodResult. Only SyncResult with
@ -92,13 +93,13 @@ func (c *ReasonCache) Remove(uid types.UID, name string) {
// Get gets error reason from the cache. The return values are error reason, error message and // Get gets error reason from the cache. The return values are error reason, error message and
// whether an error reason is found in the cache. If no error reason is found, empty string will // whether an error reason is found in the cache. If no error reason is found, empty string will
// be returned for error reason and error message. // be returned for error reason and error message.
func (c *ReasonCache) Get(uid types.UID, name string) (error, string, bool) { func (c *ReasonCache) Get(uid types.UID, name string) (*reasonItem, bool) {
c.lock.Lock() c.lock.Lock()
defer c.lock.Unlock() defer c.lock.Unlock()
value, ok := c.cache.Get(c.composeKey(uid, name)) value, ok := c.cache.Get(c.composeKey(uid, name))
if !ok { if !ok {
return nil, "", ok return nil, false
} }
info := value.(reasonInfo) info := value.(reasonItem)
return info.reason, info.message, ok return &info, true
} }

View File

@ -51,9 +51,9 @@ func TestReasonCache(t *testing.T) {
func assertReasonInfo(t *testing.T, cache *ReasonCache, uid types.UID, result *kubecontainer.SyncResult, found bool) { func assertReasonInfo(t *testing.T, cache *ReasonCache, uid types.UID, result *kubecontainer.SyncResult, found bool) {
name := result.Target.(string) name := result.Target.(string)
actualReason, actualMessage, ok := cache.Get(uid, name) actualReason, ok := cache.Get(uid, name)
if ok && !found { if ok && !found {
t.Fatalf("unexpected cache hit: %v, %q", actualReason, actualMessage) t.Fatalf("unexpected cache hit: %v, %q", actualReason.Err, actualReason.Message)
} }
if !ok && found { if !ok && found {
t.Fatalf("corresponding reason info not found") t.Fatalf("corresponding reason info not found")
@ -63,7 +63,7 @@ func assertReasonInfo(t *testing.T, cache *ReasonCache, uid types.UID, result *k
} }
reason := result.Error reason := result.Error
message := result.Message message := result.Message
if actualReason != reason || actualMessage != message { if actualReason.Err != reason || actualReason.Message != message {
t.Errorf("expected %v %q, got %v %q", reason, message, actualReason, actualMessage) t.Errorf("expected %v %q, got %v %q", reason, message, actualReason.Err, actualReason.Message)
} }
} }

View File

@ -35,6 +35,7 @@ const (
runOnceRetryDelayBackoff = 2 runOnceRetryDelayBackoff = 2
) )
// RunPodResult defines the running results of a Pod.
type RunPodResult struct { type RunPodResult struct {
Pod *v1.Pod Pod *v1.Pod
Err error Err error

View File

@ -44,7 +44,7 @@ import (
type HollowKubelet struct { type HollowKubelet struct {
KubeletFlags *options.KubeletFlags KubeletFlags *options.KubeletFlags
KubeletConfiguration *componentconfig.KubeletConfiguration KubeletConfiguration *componentconfig.KubeletConfiguration
KubeletDeps *kubelet.KubeletDeps KubeletDeps *kubelet.Dependencies
} }
func NewHollowKubelet( func NewHollowKubelet(
@ -66,7 +66,7 @@ func NewHollowKubelet(
// ----------------- // -----------------
volumePlugins := empty_dir.ProbeVolumePlugins() volumePlugins := empty_dir.ProbeVolumePlugins()
volumePlugins = append(volumePlugins, secret.ProbeVolumePlugins()...) volumePlugins = append(volumePlugins, secret.ProbeVolumePlugins()...)
d := &kubelet.KubeletDeps{ d := &kubelet.Dependencies{
KubeClient: client, KubeClient: client,
DockerClient: dockerClient, DockerClient: dockerClient,
CAdvisorInterface: cadvisorInterface, CAdvisorInterface: cadvisorInterface,