mirror of https://github.com/k3s-io/k3s
Merge pull request #47316 from k82cn/k8s_47315
Automatic merge from submit-queue (batch tested with PRs 48981, 47316, 49180) Added golint check for pkg/kubelet. **What this PR does / why we need it**: Added golint check for pkg/kubelet, and make golint happy. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #47315 **Release note**: ```release-note-none ```pull/6/head
commit
c0287ce420
|
@ -126,9 +126,9 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
|
|||
return cmd
|
||||
}
|
||||
|
||||
// UnsecuredKubeletDeps returns a KubeletDeps suitable for being run, or an error if the server setup
|
||||
// UnsecuredDependencies returns a Dependencies suitable for being run, or an error if the server setup
|
||||
// is not valid. It will not start any background processes, and does not include authentication/authorization
|
||||
func UnsecuredKubeletDeps(s *options.KubeletServer) (*kubelet.KubeletDeps, error) {
|
||||
func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, error) {
|
||||
// Initialize the TLS Options
|
||||
tlsOptions, err := InitializeTLS(&s.KubeletFlags, &s.KubeletConfiguration)
|
||||
if err != nil {
|
||||
|
@ -151,7 +151,7 @@ func UnsecuredKubeletDeps(s *options.KubeletServer) (*kubelet.KubeletDeps, error
|
|||
dockerClient = nil
|
||||
}
|
||||
|
||||
return &kubelet.KubeletDeps{
|
||||
return &kubelet.Dependencies{
|
||||
Auth: nil, // default does not enforce auth[nz]
|
||||
CAdvisorInterface: nil, // cadvisor.New launches background processes (bg http.ListenAndServe, and some bg cleaners), not set here
|
||||
Cloud: nil, // cloud provider might start background processes
|
||||
|
@ -182,7 +182,7 @@ func getKubeClient(s *options.KubeletServer) (*clientset.Clientset, error) {
|
|||
}
|
||||
|
||||
// Tries to download the kubelet-<node-name> configmap from "kube-system" namespace via the API server and returns a JSON string or error
|
||||
func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (string, error) {
|
||||
func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (string, error) {
|
||||
// TODO(mtaufen): should probably cache clientset and pass into this function rather than regenerate on every request
|
||||
kubeClient, err := getKubeClient(s)
|
||||
if err != nil {
|
||||
|
@ -281,11 +281,11 @@ func initKubeletConfigSync(s *options.KubeletServer) (*componentconfig.KubeletCo
|
|||
}
|
||||
}
|
||||
|
||||
// Run runs the specified KubeletServer with the given KubeletDeps. This should never exit.
|
||||
// Run runs the specified KubeletServer with the given Dependencies. This should never exit.
|
||||
// The kubeDeps argument may be nil - if so, it is initialized from the settings on KubeletServer.
|
||||
// Otherwise, the caller is assumed to have set up the KubeletDeps object and a default one will
|
||||
// Otherwise, the caller is assumed to have set up the Dependencies object and a default one will
|
||||
// not be generated.
|
||||
func Run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) error {
|
||||
func Run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) error {
|
||||
if err := run(s, kubeDeps); err != nil {
|
||||
return fmt.Errorf("failed to run Kubelet: %v", err)
|
||||
}
|
||||
|
@ -339,7 +339,7 @@ func validateConfig(s *options.KubeletServer) error {
|
|||
}
|
||||
|
||||
// makeEventRecorder sets up kubeDeps.Recorder if its nil. Its a no-op otherwise.
|
||||
func makeEventRecorder(s *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, nodeName types.NodeName) {
|
||||
func makeEventRecorder(s *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies, nodeName types.NodeName) {
|
||||
if kubeDeps.Recorder != nil {
|
||||
return
|
||||
}
|
||||
|
@ -354,7 +354,7 @@ func makeEventRecorder(s *componentconfig.KubeletConfiguration, kubeDeps *kubele
|
|||
}
|
||||
}
|
||||
|
||||
func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) {
|
||||
func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) {
|
||||
// TODO: this should be replaced by a --standalone flag
|
||||
standaloneMode := (len(s.APIServerList) == 0 && !s.RequireKubeConfig)
|
||||
|
||||
|
@ -417,7 +417,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) {
|
|||
}
|
||||
|
||||
if kubeDeps == nil {
|
||||
kubeDeps, err = UnsecuredKubeletDeps(s)
|
||||
kubeDeps, err = UnsecuredDependencies(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -817,7 +817,7 @@ func addChaosToClientConfig(s *options.KubeletServer, config *restclient.Config)
|
|||
// 2 Kubelet binary
|
||||
// 3 Standalone 'kubernetes' binary
|
||||
// Eventually, #2 will be replaced with instances of #3
|
||||
func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, runOnce bool, standaloneMode bool) error {
|
||||
func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies, runOnce bool, standaloneMode bool) error {
|
||||
hostname := nodeutil.GetHostname(kubeFlags.HostnameOverride)
|
||||
// Query the cloud provider for our node name, default to hostname if kcfg.Cloud == nil
|
||||
nodeName, err := getNodeName(kubeDeps.Cloud, hostname)
|
||||
|
@ -891,7 +891,7 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *componentconfig.Kubele
|
|||
return nil
|
||||
}
|
||||
|
||||
func startKubelet(k kubelet.KubeletBootstrap, podCfg *config.PodConfig, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps) {
|
||||
func startKubelet(k kubelet.Bootstrap, podCfg *config.PodConfig, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies) {
|
||||
// start the kubelet
|
||||
go wait.Until(func() { k.Run(podCfg.Updates()) }, 0, wait.NeverStop)
|
||||
|
||||
|
@ -908,7 +908,7 @@ func startKubelet(k kubelet.KubeletBootstrap, podCfg *config.PodConfig, kubeCfg
|
|||
}
|
||||
}
|
||||
|
||||
func CreateAndInitKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (k kubelet.KubeletBootstrap, err error) {
|
||||
func CreateAndInitKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (k kubelet.Bootstrap, err error) {
|
||||
// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
|
||||
// up into "per source" synchronizations
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ type diskSpaceManager interface {
|
|||
IsRuntimeDiskSpaceAvailable() (bool, error)
|
||||
}
|
||||
|
||||
// DiskSpacePolicy defines the free disk for Docker and Root.
|
||||
type DiskSpacePolicy struct {
|
||||
// free disk space threshold for filesystem holding docker images.
|
||||
DockerFreeDiskMB int
|
||||
|
@ -112,10 +113,10 @@ func (dm *realDiskSpaceManager) isSpaceAvailable(fsType string, threshold int, f
|
|||
|
||||
func validatePolicy(policy DiskSpacePolicy) error {
|
||||
if policy.DockerFreeDiskMB < 0 {
|
||||
return fmt.Errorf("free disk space should be non-negative. Invalid value %d for docker disk space threshold.", policy.DockerFreeDiskMB)
|
||||
return fmt.Errorf("free disk space should be non-negative; invalid value %d for docker disk space threshold", policy.DockerFreeDiskMB)
|
||||
}
|
||||
if policy.RootFreeDiskMB < 0 {
|
||||
return fmt.Errorf("free disk space should be non-negative. Invalid value %d for root disk space threshold.", policy.RootFreeDiskMB)
|
||||
return fmt.Errorf("free disk space should be non-negative; invalid value %d for root disk space threshold", policy.RootFreeDiskMB)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -116,10 +116,10 @@ const (
|
|||
// nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed.
|
||||
nodeStatusUpdateRetry = 5
|
||||
|
||||
// Location of container logs.
|
||||
// ContainerLogsDir is the location of container logs.
|
||||
ContainerLogsDir = "/var/log/containers"
|
||||
|
||||
// max backoff period, exported for the e2e test
|
||||
// MaxContainerBackOff is the max backoff period, exported for the e2e test
|
||||
MaxContainerBackOff = 300 * time.Second
|
||||
|
||||
// Capacity of the channel for storing pods to kill. A small number should
|
||||
|
@ -156,9 +156,9 @@ const (
|
|||
// container restarts and image pulls.
|
||||
backOffPeriod = time.Second * 10
|
||||
|
||||
// Period for performing container garbage collection.
|
||||
// ContainerGCPeriod is the period for performing container garbage collection.
|
||||
ContainerGCPeriod = time.Minute
|
||||
// Period for performing image garbage collection.
|
||||
// ImageGCPeriod is the period for performing image garbage collection.
|
||||
ImageGCPeriod = 5 * time.Minute
|
||||
|
||||
// Minimum number of dead containers to keep in a pod
|
||||
|
@ -178,8 +178,8 @@ type SyncHandler interface {
|
|||
// Option is a functional option type for Kubelet
|
||||
type Option func(*Kubelet)
|
||||
|
||||
// bootstrapping interface for kubelet, targets the initialization protocol
|
||||
type KubeletBootstrap interface {
|
||||
// Bootstrap is a bootstrapping interface for kubelet, targets the initialization protocol
|
||||
type Bootstrap interface {
|
||||
GetConfiguration() componentconfig.KubeletConfiguration
|
||||
BirthCry()
|
||||
StartGarbageCollection()
|
||||
|
@ -189,13 +189,13 @@ type KubeletBootstrap interface {
|
|||
RunOnce(<-chan kubetypes.PodUpdate) ([]RunPodResult, error)
|
||||
}
|
||||
|
||||
// create and initialize a Kubelet instance
|
||||
type KubeletBuilder func(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (KubeletBootstrap, error)
|
||||
// Builder creates and initializes a Kubelet instance
|
||||
type Builder func(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (Bootstrap, error)
|
||||
|
||||
// KubeletDeps is a bin for things we might consider "injected dependencies" -- objects constructed
|
||||
// Dependencies is a bin for things we might consider "injected dependencies" -- objects constructed
|
||||
// at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping
|
||||
// these objects while we figure out a more comprehensive dependency injection story for the Kubelet.
|
||||
type KubeletDeps struct {
|
||||
type Dependencies struct {
|
||||
// TODO(mtaufen): KubeletBuilder:
|
||||
// Mesos currently uses this as a hook to let them make their own call to
|
||||
// let them wrap the KubeletBootstrap that CreateAndInitKubelet returns with
|
||||
|
@ -203,7 +203,7 @@ type KubeletDeps struct {
|
|||
// a nice home for it would be. There seems to be a trend, between this and
|
||||
// the Options fields below, of providing hooks where you can add extra functionality
|
||||
// to the Kubelet for your solution. Maybe we should centralize these sorts of things?
|
||||
Builder KubeletBuilder
|
||||
Builder Builder
|
||||
|
||||
// TODO(mtaufen): ContainerRuntimeOptions and Options:
|
||||
// Arrays of functions that can do arbitrary things to the Kubelet and the Runtime
|
||||
|
@ -240,7 +240,7 @@ type KubeletDeps struct {
|
|||
|
||||
// makePodSourceConfig creates a config.PodConfig from the given
|
||||
// KubeletConfiguration or returns an error.
|
||||
func makePodSourceConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps, nodeName types.NodeName) (*config.PodConfig, error) {
|
||||
func makePodSourceConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName) (*config.PodConfig, error) {
|
||||
manifestURLHeader := make(http.Header)
|
||||
if kubeCfg.ManifestURLHeader != "" {
|
||||
pieces := strings.Split(kubeCfg.ManifestURLHeader, ":")
|
||||
|
@ -285,7 +285,7 @@ func getRuntimeAndImageServices(config *componentconfig.KubeletConfiguration) (i
|
|||
|
||||
// NewMainKubelet instantiates a new Kubelet object along with all the required internal modules.
|
||||
// No initialization of Kubelet and its modules should happen here.
|
||||
func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (*Kubelet, error) {
|
||||
func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (*Kubelet, error) {
|
||||
if kubeCfg.RootDirectory == "" {
|
||||
return nil, fmt.Errorf("invalid root directory %q", kubeCfg.RootDirectory)
|
||||
}
|
||||
|
@ -514,11 +514,11 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
|||
glog.Warningf("Failed to close iptables lock file: %v", err)
|
||||
}
|
||||
|
||||
if plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, crOptions.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, hairpinMode, kubeCfg.NonMasqueradeCIDR, int(crOptions.NetworkPluginMTU)); err != nil {
|
||||
plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, crOptions.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, hairpinMode, kubeCfg.NonMasqueradeCIDR, int(crOptions.NetworkPluginMTU))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
klet.networkPlugin = plug
|
||||
}
|
||||
klet.networkPlugin = plug
|
||||
|
||||
machineInfo, err := klet.GetCachedMachineInfo()
|
||||
if err != nil {
|
||||
|
@ -698,14 +698,15 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
|
|||
var ips []net.IP
|
||||
cfgAddress := net.ParseIP(kubeCfg.Address)
|
||||
if cfgAddress == nil || cfgAddress.IsUnspecified() {
|
||||
if localIPs, err := allLocalIPsWithoutLoopback(); err != nil {
|
||||
localIPs, err := allLocalIPsWithoutLoopback()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
ips = localIPs
|
||||
}
|
||||
ips = localIPs
|
||||
} else {
|
||||
ips = []net.IP{cfgAddress}
|
||||
}
|
||||
|
||||
ips = append(ips, cloudIPs...)
|
||||
names := append([]string{klet.GetHostname(), hostnameOverride}, cloudNames...)
|
||||
klet.serverCertificateManager, err = certificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, ips, names)
|
||||
|
@ -1118,7 +1119,7 @@ func allLocalIPsWithoutLoopback() ([]net.IP, error) {
|
|||
for _, i := range interfaces {
|
||||
addresses, err := i.Addrs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list the addresses for network interface %v: %v\n", i, err)
|
||||
return nil, fmt.Errorf("could not list the addresses for network interface %v: %v", i, err)
|
||||
}
|
||||
for _, address := range addresses {
|
||||
switch v := address.(type) {
|
||||
|
@ -1150,7 +1151,7 @@ func (kl *Kubelet) setupDataDirs() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Starts garbage collection threads.
|
||||
// StartGarbageCollection starts garbage collection threads.
|
||||
func (kl *Kubelet) StartGarbageCollection() {
|
||||
loggedContainerGCFailure := false
|
||||
go wait.Until(func() {
|
||||
|
@ -2128,10 +2129,10 @@ func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint) {
|
|||
}
|
||||
|
||||
// Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around.
|
||||
func (kl *Kubelet) cleanUpContainersInPod(podId types.UID, exitedContainerID string) {
|
||||
if podStatus, err := kl.podCache.Get(podId); err == nil {
|
||||
func (kl *Kubelet) cleanUpContainersInPod(podID types.UID, exitedContainerID string) {
|
||||
if podStatus, err := kl.podCache.Get(podID); err == nil {
|
||||
removeAll := false
|
||||
if syncedPod, ok := kl.podManager.GetPodByUID(podId); ok {
|
||||
if syncedPod, ok := kl.podManager.GetPodByUID(podID); ok {
|
||||
// When an evicted pod has already synced, all containers can be removed.
|
||||
removeAll = eviction.PodIsEvicted(syncedPod.Status)
|
||||
}
|
||||
|
@ -2146,7 +2147,7 @@ func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool {
|
|||
}
|
||||
|
||||
// Gets the streaming server configuration to use with in-process CRI shims.
|
||||
func getStreamingConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps) *streaming.Config {
|
||||
func getStreamingConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies) *streaming.Config {
|
||||
config := &streaming.Config{
|
||||
// Use a relative redirect (no scheme or host).
|
||||
BaseURL: &url.URL{
|
||||
|
|
|
@ -61,19 +61,19 @@ func (kl *Kubelet) RootFsInfo() (cadvisorapiv2.FsInfo, error) {
|
|||
return kl.cadvisor.RootFsInfo()
|
||||
}
|
||||
|
||||
// Returns stats (from Cadvisor) for a non-Kubernetes container.
|
||||
// GetRawContainerInfo returns stats (from Cadvisor) for a non-Kubernetes container.
|
||||
func (kl *Kubelet) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) {
|
||||
if subcontainers {
|
||||
return kl.cadvisor.SubcontainerInfo(containerName, req)
|
||||
} else {
|
||||
containerInfo, err := kl.cadvisor.ContainerInfo(containerName, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]*cadvisorapi.ContainerInfo{
|
||||
containerInfo.Name: containerInfo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
containerInfo, err := kl.cadvisor.ContainerInfo(containerName, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]*cadvisorapi.ContainerInfo{
|
||||
containerInfo.Name: containerInfo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetVersionInfo returns information about the version of cAdvisor in use.
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
)
|
||||
|
||||
func TestGetContainerInfo(t *testing.T) {
|
||||
cadvisorApiFailure := fmt.Errorf("cAdvisor failure")
|
||||
cadvisorAPIFailure := fmt.Errorf("cAdvisor failure")
|
||||
runtimeError := fmt.Errorf("List containers error")
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -40,7 +40,7 @@ func TestGetContainerInfo(t *testing.T) {
|
|||
runtimeError error
|
||||
podList []*kubecontainertest.FakePod
|
||||
requestedPodFullName string
|
||||
requestedPodUid types.UID
|
||||
requestedPodUID types.UID
|
||||
requestedContainerName string
|
||||
expectDockerContainerCall bool
|
||||
mockError error
|
||||
|
@ -73,7 +73,7 @@ func TestGetContainerInfo(t *testing.T) {
|
|||
},
|
||||
},
|
||||
requestedPodFullName: "qux_ns",
|
||||
requestedPodUid: "",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
expectDockerContainerCall: true,
|
||||
mockError: nil,
|
||||
|
@ -102,11 +102,11 @@ func TestGetContainerInfo(t *testing.T) {
|
|||
},
|
||||
},
|
||||
requestedPodFullName: "qux_ns",
|
||||
requestedPodUid: "uuid",
|
||||
requestedPodUID: "uuid",
|
||||
requestedContainerName: "foo",
|
||||
expectDockerContainerCall: true,
|
||||
mockError: cadvisorApiFailure,
|
||||
expectedError: cadvisorApiFailure,
|
||||
mockError: cadvisorAPIFailure,
|
||||
expectedError: cadvisorAPIFailure,
|
||||
expectStats: false,
|
||||
},
|
||||
{
|
||||
|
@ -117,7 +117,7 @@ func TestGetContainerInfo(t *testing.T) {
|
|||
runtimeError: nil,
|
||||
podList: []*kubecontainertest.FakePod{},
|
||||
requestedPodFullName: "qux",
|
||||
requestedPodUid: "",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
expectDockerContainerCall: false,
|
||||
mockError: nil,
|
||||
|
@ -132,7 +132,7 @@ func TestGetContainerInfo(t *testing.T) {
|
|||
runtimeError: runtimeError,
|
||||
podList: []*kubecontainertest.FakePod{},
|
||||
requestedPodFullName: "qux",
|
||||
requestedPodUid: "",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
mockError: nil,
|
||||
expectedError: runtimeError,
|
||||
|
@ -146,7 +146,7 @@ func TestGetContainerInfo(t *testing.T) {
|
|||
runtimeError: nil,
|
||||
podList: []*kubecontainertest.FakePod{},
|
||||
requestedPodFullName: "qux_ns",
|
||||
requestedPodUid: "",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
mockError: nil,
|
||||
expectedError: kubecontainer.ErrContainerNotFound,
|
||||
|
@ -174,7 +174,7 @@ func TestGetContainerInfo(t *testing.T) {
|
|||
},
|
||||
},
|
||||
requestedPodFullName: "qux_ns",
|
||||
requestedPodUid: "",
|
||||
requestedPodUID: "",
|
||||
requestedContainerName: "foo",
|
||||
mockError: nil,
|
||||
expectedError: kubecontainer.ErrContainerNotFound,
|
||||
|
@ -195,7 +195,7 @@ func TestGetContainerInfo(t *testing.T) {
|
|||
fakeRuntime.Err = tc.runtimeError
|
||||
fakeRuntime.PodList = tc.podList
|
||||
|
||||
stats, err := kubelet.GetContainerInfo(tc.requestedPodFullName, tc.requestedPodUid, tc.requestedContainerName, cadvisorReq)
|
||||
stats, err := kubelet.GetContainerInfo(tc.requestedPodFullName, tc.requestedPodUID, tc.requestedContainerName, cadvisorReq)
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
|
||||
if tc.expectStats {
|
||||
|
|
|
@ -214,7 +214,7 @@ func (kl *Kubelet) GetNodeConfig() cm.NodeConfig {
|
|||
return kl.containerManager.GetNodeConfig()
|
||||
}
|
||||
|
||||
// Returns host IP or nil in case of error.
|
||||
// GetHostIP returns host IP or nil in case of error.
|
||||
func (kl *Kubelet) GetHostIP() (net.IP, error) {
|
||||
node, err := kl.GetNode()
|
||||
if err != nil {
|
||||
|
|
|
@ -31,17 +31,17 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// the mark-for-masquerade chain
|
||||
// KubeMarkMasqChain is the mark-for-masquerade chain
|
||||
// TODO: clean up this logic in kube-proxy
|
||||
KubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
|
||||
|
||||
// the mark-for-drop chain
|
||||
// KubeMarkDropChain is the mark-for-drop chain
|
||||
KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
|
||||
|
||||
// kubernetes postrouting rules
|
||||
// KubePostroutingChain is kubernetes postrouting rules
|
||||
KubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING"
|
||||
|
||||
// kubernetes firewall rules
|
||||
// KubeFirewallChain is kubernetes firewall rules
|
||||
KubeFirewallChain utiliptables.Chain = "KUBE-FIREWALL"
|
||||
)
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ func TestComposeDNSSearch(t *testing.T) {
|
|||
recorder := record.NewFakeRecorder(20)
|
||||
kubelet.recorder = recorder
|
||||
|
||||
pod := podWithUidNameNs("", "test_pod", "testNS")
|
||||
pod := podWithUIDNameNs("", "test_pod", "testNS")
|
||||
kubelet.clusterDomain = "TEST"
|
||||
|
||||
testCases := []struct {
|
||||
|
|
|
@ -58,10 +58,10 @@ const (
|
|||
maxNamesPerImageInNodeStatus = 5
|
||||
)
|
||||
|
||||
// registerWithApiServer registers the node with the cluster master. It is safe
|
||||
// registerWithAPIServer registers the node with the cluster master. It is safe
|
||||
// to call multiple times, but not concurrently (kl.registrationCompleted is
|
||||
// not locked).
|
||||
func (kl *Kubelet) registerWithApiServer() {
|
||||
func (kl *Kubelet) registerWithAPIServer() {
|
||||
if kl.registrationCompleted {
|
||||
return
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func (kl *Kubelet) registerWithApiServer() {
|
|||
}
|
||||
|
||||
glog.Infof("Attempting to register node %s", node.Name)
|
||||
registered := kl.tryRegisterWithApiServer(node)
|
||||
registered := kl.tryRegisterWithAPIServer(node)
|
||||
if registered {
|
||||
glog.Infof("Successfully registered node %s", node.Name)
|
||||
kl.registrationCompleted = true
|
||||
|
@ -90,14 +90,14 @@ func (kl *Kubelet) registerWithApiServer() {
|
|||
}
|
||||
}
|
||||
|
||||
// tryRegisterWithApiServer makes an attempt to register the given node with
|
||||
// tryRegisterWithAPIServer makes an attempt to register the given node with
|
||||
// the API server, returning a boolean indicating whether the attempt was
|
||||
// successful. If a node with the same name already exists, it reconciles the
|
||||
// value of the annotation for controller-managed attach-detach of attachable
|
||||
// persistent volumes for the node. If a node of the same name exists but has
|
||||
// a different externalID value, it attempts to delete that node so that a
|
||||
// later attempt can recreate it.
|
||||
func (kl *Kubelet) tryRegisterWithApiServer(node *v1.Node) bool {
|
||||
func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
|
||||
_, err := kl.kubeClient.Core().Nodes().Create(node)
|
||||
if err == nil {
|
||||
return true
|
||||
|
@ -344,7 +344,7 @@ func (kl *Kubelet) syncNodeStatus() {
|
|||
}
|
||||
if kl.registerNode {
|
||||
// This will exit immediately if it doesn't need to do anything.
|
||||
kl.registerWithApiServer()
|
||||
kl.registerWithAPIServer()
|
||||
}
|
||||
if err := kl.updateNodeStatus(); err != nil {
|
||||
glog.Errorf("Unable to update node status: %v", err)
|
||||
|
@ -499,11 +499,10 @@ func (kl *Kubelet) setNodeAddress(node *v1.Node) error {
|
|||
if ipAddr == nil {
|
||||
// We tried everything we could, but the IP address wasn't fetchable; error out
|
||||
return fmt.Errorf("can't get ip address of node %s. error: %v", node.Name, err)
|
||||
} else {
|
||||
node.Status.Addresses = []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: ipAddr.String()},
|
||||
{Type: v1.NodeHostName, Address: kl.GetHostname()},
|
||||
}
|
||||
}
|
||||
node.Status.Addresses = []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: ipAddr.String()},
|
||||
{Type: v1.NodeHostName, Address: kl.GetHostname()},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -954,7 +954,7 @@ func TestRegisterWithApiServer(t *testing.T) {
|
|||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
kubelet.registerWithApiServer()
|
||||
kubelet.registerWithAPIServer()
|
||||
done <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
|
@ -1113,7 +1113,7 @@ func TestTryRegisterWithApiServer(t *testing.T) {
|
|||
return notImplemented(action)
|
||||
})
|
||||
|
||||
result := kubelet.tryRegisterWithApiServer(tc.newNode)
|
||||
result := kubelet.tryRegisterWithAPIServer(tc.newNode)
|
||||
require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name)
|
||||
|
||||
actions := kubeClient.Actions()
|
||||
|
|
|
@ -381,7 +381,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
|
|||
}
|
||||
services, err := kl.serviceLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return m, fmt.Errorf("failed to list services when setting up env vars.")
|
||||
return m, fmt.Errorf("failed to list services when setting up env vars")
|
||||
}
|
||||
|
||||
// project the services in namespace ns onto the master services
|
||||
|
@ -546,7 +546,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||
return result, err
|
||||
}
|
||||
case envVar.ValueFrom.ResourceFieldRef != nil:
|
||||
defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardApi(pod, container)
|
||||
defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardAPI(pod, container)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
@ -669,9 +669,8 @@ func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, co
|
|||
containerName := fs.ContainerName
|
||||
if len(containerName) == 0 {
|
||||
return resource.ExtractContainerResourceValue(fs, container)
|
||||
} else {
|
||||
return resource.ExtractResourceValueByContainerName(fs, pod, containerName)
|
||||
}
|
||||
return resource.ExtractResourceValueByContainerName(fs, pod, containerName)
|
||||
}
|
||||
|
||||
// One of the following arguments must be non-nil: runningPod, status.
|
||||
|
@ -1353,7 +1352,7 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
|
|||
continue
|
||||
}
|
||||
status := statuses[container.Name]
|
||||
reason, message, ok := kl.reasonCache.Get(pod.UID, container.Name)
|
||||
reason, ok := kl.reasonCache.Get(pod.UID, container.Name)
|
||||
if !ok {
|
||||
// In fact, we could also apply Waiting state here, but it is less informative,
|
||||
// and the container will be restarted soon, so we prefer the original state here.
|
||||
|
@ -1368,8 +1367,8 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
|
|||
}
|
||||
status.State = v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{
|
||||
Reason: reason.Error(),
|
||||
Message: message,
|
||||
Reason: reason.Err.Error(),
|
||||
Message: reason.Message,
|
||||
},
|
||||
}
|
||||
statuses[container.Name] = status
|
||||
|
@ -1390,7 +1389,7 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
|
|||
return containerStatuses
|
||||
}
|
||||
|
||||
// Returns logs of current machine.
|
||||
// ServeLogs returns logs of current machine.
|
||||
func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
|
||||
// TODO: whitelist logs we are willing to serve
|
||||
kl.logServer.ServeHTTP(w, req)
|
||||
|
@ -1408,7 +1407,7 @@ func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, container
|
|||
return pod.FindContainerByName(containerName), nil
|
||||
}
|
||||
|
||||
// Run a command in a container, returns the combined stdout, stderr as an array of bytes
|
||||
// RunInContainer runs a command in a container, returns the combined stdout, stderr as an array of bytes
|
||||
func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) {
|
||||
container, err := kl.findContainer(podFullName, podUID, containerName)
|
||||
if err != nil {
|
||||
|
|
|
@ -1723,7 +1723,7 @@ func TestExec(t *testing.T) {
|
|||
tty = true
|
||||
)
|
||||
var (
|
||||
podFullName = kubecontainer.GetPodFullName(podWithUidNameNs(podUID, podName, podNamespace))
|
||||
podFullName = kubecontainer.GetPodFullName(podWithUIDNameNs(podUID, podName, podNamespace))
|
||||
command = []string{"ls"}
|
||||
stdin = &bytes.Buffer{}
|
||||
stdout = &fakeReadWriteCloser{}
|
||||
|
@ -1859,7 +1859,7 @@ func TestPortForward(t *testing.T) {
|
|||
}},
|
||||
}
|
||||
|
||||
podFullName := kubecontainer.GetPodFullName(podWithUidNameNs(podUID, tc.podName, podNamespace))
|
||||
podFullName := kubecontainer.GetPodFullName(podWithUIDNameNs(podUID, tc.podName, podNamespace))
|
||||
{ // No streaming case
|
||||
description := "no streaming - " + tc.description
|
||||
redirect, err := kubelet.GetPortForward(tc.podName, podNamespace, podUID, portforward.V4Options{})
|
||||
|
|
|
@ -26,14 +26,14 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
)
|
||||
|
||||
// defaultPodLimitsForDownwardApi copies the input pod, and optional container,
|
||||
// defaultPodLimitsForDownwardAPI copies the input pod, and optional container,
|
||||
// and applies default resource limits. it returns a copy of the input pod,
|
||||
// and a copy of the input container (if specified) with default limits
|
||||
// applied. if a container has no limit specified, it will default the limit to
|
||||
// the node allocatable.
|
||||
// TODO: if/when we have pod level resources, we need to update this function
|
||||
// to use those limits instead of node allocatable.
|
||||
func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *v1.Pod, container *v1.Container) (*v1.Pod, *v1.Container, error) {
|
||||
func (kl *Kubelet) defaultPodLimitsForDownwardAPI(pod *v1.Pod, container *v1.Container) (*v1.Pod, *v1.Container, error) {
|
||||
if pod == nil {
|
||||
return nil, nil, fmt.Errorf("invalid input, pod cannot be nil")
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ func TestPodResourceLimitsDefaulting(t *testing.T) {
|
|||
}
|
||||
as := assert.New(t)
|
||||
for idx, tc := range cases {
|
||||
actual, _, err := tk.kubelet.defaultPodLimitsForDownwardApi(tc.pod, nil)
|
||||
actual, _, err := tk.kubelet.defaultPodLimitsForDownwardAPI(tc.pod, nil)
|
||||
as.Nil(err, "failed to default pod limits: %v", err)
|
||||
if !apiequality.Semantic.DeepEqual(tc.expected, actual) {
|
||||
as.Fail("test case [%d] failed. Expected: %+v, Got: %+v", idx, tc.expected, actual)
|
||||
|
|
|
@ -356,7 +356,7 @@ func TestSyncPodsStartPod(t *testing.T) {
|
|||
kubelet := testKubelet.kubelet
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
pods := []*v1.Pod{
|
||||
podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "bar"},
|
||||
},
|
||||
|
@ -443,8 +443,8 @@ func TestHandlePortConflicts(t *testing.T) {
|
|||
|
||||
spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}
|
||||
pods := []*v1.Pod{
|
||||
podWithUidNameNsSpec("123456789", "newpod", "foo", spec),
|
||||
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec),
|
||||
podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
|
||||
podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
|
||||
}
|
||||
// Make sure the Pods are in the reverse order of creation time.
|
||||
pods[1].CreationTimestamp = metav1.NewTime(time.Now())
|
||||
|
@ -488,8 +488,8 @@ func TestHandleHostNameConflicts(t *testing.T) {
|
|||
|
||||
// default NodeName in test is 127.0.0.1
|
||||
pods := []*v1.Pod{
|
||||
podWithUidNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}),
|
||||
podWithUidNameNsSpec("987654321", "fittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.1"}),
|
||||
podWithUIDNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}),
|
||||
podWithUIDNameNsSpec("987654321", "fittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.1"}),
|
||||
}
|
||||
|
||||
notfittingPod := pods[0]
|
||||
|
@ -528,8 +528,8 @@ func TestHandleNodeSelector(t *testing.T) {
|
|||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
pods := []*v1.Pod{
|
||||
podWithUidNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
|
||||
podWithUidNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
|
||||
podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
|
||||
podWithUIDNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
|
||||
}
|
||||
// The first pod should be rejected.
|
||||
notfittingPod := pods[0]
|
||||
|
@ -574,8 +574,8 @@ func TestHandleMemExceeded(t *testing.T) {
|
|||
}}},
|
||||
}
|
||||
pods := []*v1.Pod{
|
||||
podWithUidNameNsSpec("123456789", "newpod", "foo", spec),
|
||||
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec),
|
||||
podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
|
||||
podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
|
||||
}
|
||||
// Make sure the Pods are in the reverse order of creation time.
|
||||
pods[1].CreationTimestamp = metav1.NewTime(time.Now())
|
||||
|
@ -768,7 +768,7 @@ func TestCreateMirrorPod(t *testing.T) {
|
|||
|
||||
kl := testKubelet.kubelet
|
||||
manager := testKubelet.fakeMirrorClient
|
||||
pod := podWithUidNameNs("12345678", "bar", "foo")
|
||||
pod := podWithUIDNameNs("12345678", "bar", "foo")
|
||||
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
|
||||
pods := []*v1.Pod{pod}
|
||||
kl.podManager.SetPods(pods)
|
||||
|
@ -795,7 +795,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
|||
|
||||
kl := testKubelet.kubelet
|
||||
manager := testKubelet.fakeMirrorClient
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "ns", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "ns", v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234", Image: "foo"},
|
||||
},
|
||||
|
@ -803,7 +803,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) {
|
|||
pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
|
||||
|
||||
// Mirror pod has an outdated spec.
|
||||
mirrorPod := podWithUidNameNsSpec("11111111", "foo", "ns", v1.PodSpec{
|
||||
mirrorPod := podWithUIDNameNsSpec("11111111", "foo", "ns", v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234", Image: "bar"},
|
||||
},
|
||||
|
@ -966,7 +966,7 @@ func TestHostNetworkAllowed(t *testing.T) {
|
|||
HostNetworkSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource},
|
||||
},
|
||||
})
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "foo"},
|
||||
},
|
||||
|
@ -999,7 +999,7 @@ func TestHostNetworkDisallowed(t *testing.T) {
|
|||
HostNetworkSources: []string{},
|
||||
},
|
||||
})
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "foo"},
|
||||
},
|
||||
|
@ -1031,7 +1031,7 @@ func TestHostPIDAllowed(t *testing.T) {
|
|||
HostPIDSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource},
|
||||
},
|
||||
})
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "foo"},
|
||||
},
|
||||
|
@ -1064,7 +1064,7 @@ func TestHostPIDDisallowed(t *testing.T) {
|
|||
HostPIDSources: []string{},
|
||||
},
|
||||
})
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "foo"},
|
||||
},
|
||||
|
@ -1096,7 +1096,7 @@ func TestHostIPCAllowed(t *testing.T) {
|
|||
HostIPCSources: []string{kubetypes.ApiserverSource, kubetypes.FileSource},
|
||||
},
|
||||
})
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "foo"},
|
||||
},
|
||||
|
@ -1129,7 +1129,7 @@ func TestHostIPCDisallowed(t *testing.T) {
|
|||
HostIPCSources: []string{},
|
||||
},
|
||||
})
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "foo"},
|
||||
},
|
||||
|
@ -1160,7 +1160,7 @@ func TestPrivilegeContainerAllowed(t *testing.T) {
|
|||
AllowPrivileged: true,
|
||||
})
|
||||
privileged := true
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "foo", SecurityContext: &v1.SecurityContext{Privileged: &privileged}},
|
||||
},
|
||||
|
@ -1188,7 +1188,7 @@ func TestPrivilegedContainerDisallowed(t *testing.T) {
|
|||
AllowPrivileged: false,
|
||||
})
|
||||
privileged := true
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "foo", SecurityContext: &v1.SecurityContext{Privileged: &privileged}},
|
||||
},
|
||||
|
@ -1218,7 +1218,7 @@ func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
|
|||
},
|
||||
})
|
||||
|
||||
pod := podWithUidNameNsSpec("12345678", "hostnetwork", "new", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "hostnetwork", "new", v1.PodSpec{
|
||||
HostNetwork: false,
|
||||
|
||||
Containers: []v1.Container{
|
||||
|
@ -1372,7 +1372,7 @@ func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
|
|||
assert.NotEqual(t, v1.PodFailed, status.Phase)
|
||||
}
|
||||
|
||||
func podWithUidNameNs(uid types.UID, name, namespace string) *v1.Pod {
|
||||
func podWithUIDNameNs(uid types.UID, name, namespace string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uid,
|
||||
|
@ -1383,8 +1383,8 @@ func podWithUidNameNs(uid types.UID, name, namespace string) *v1.Pod {
|
|||
}
|
||||
}
|
||||
|
||||
func podWithUidNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec) *v1.Pod {
|
||||
pod := podWithUidNameNs(uid, name, namespace)
|
||||
func podWithUIDNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec) *v1.Pod {
|
||||
pod := podWithUIDNameNs(uid, name, namespace)
|
||||
pod.Spec = spec
|
||||
return pod
|
||||
}
|
||||
|
@ -1399,8 +1399,8 @@ func TestDeletePodDirsForDeletedPods(t *testing.T) {
|
|||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
kl := testKubelet.kubelet
|
||||
pods := []*v1.Pod{
|
||||
podWithUidNameNs("12345678", "pod1", "ns"),
|
||||
podWithUidNameNs("12345679", "pod2", "ns"),
|
||||
podWithUIDNameNs("12345678", "pod1", "ns"),
|
||||
podWithUIDNameNs("12345679", "pod2", "ns"),
|
||||
}
|
||||
|
||||
kl.podManager.SetPods(pods)
|
||||
|
@ -1439,9 +1439,9 @@ func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
|
|||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
kl := testKubelet.kubelet
|
||||
pods := []*v1.Pod{
|
||||
podWithUidNameNs("12345678", "pod1", "ns"),
|
||||
podWithUidNameNs("12345679", "pod2", "ns"),
|
||||
podWithUidNameNs("12345680", "pod3", "ns"),
|
||||
podWithUIDNameNs("12345678", "pod1", "ns"),
|
||||
podWithUIDNameNs("12345679", "pod2", "ns"),
|
||||
podWithUIDNameNs("12345680", "pod3", "ns"),
|
||||
}
|
||||
|
||||
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
|
||||
|
@ -1465,7 +1465,7 @@ func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
|
|||
Name: "pod1",
|
||||
Namespace: "ns",
|
||||
}
|
||||
apiPod := podWithUidNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace)
|
||||
apiPod := podWithUIDNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace)
|
||||
|
||||
// Sync once to create pod directory; confirm that the pod directory has
|
||||
// already been created.
|
||||
|
@ -1544,7 +1544,7 @@ func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
|
|||
}
|
||||
specContainerList = append(specContainerList, v1.Container{Name: containerName})
|
||||
}
|
||||
pod := podWithUidNameNs("uid1", "foo", "test")
|
||||
pod := podWithUIDNameNs("uid1", "foo", "test")
|
||||
pod.Spec = v1.PodSpec{
|
||||
Containers: specContainerList,
|
||||
}
|
||||
|
@ -1587,7 +1587,7 @@ func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
|
|||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
pod := podWithUidNameNs("12345678", "foo", "new")
|
||||
pod := podWithUIDNameNs("12345678", "foo", "new")
|
||||
pod.Spec = v1.PodSpec{RestartPolicy: v1.RestartPolicyOnFailure}
|
||||
|
||||
podStatus := &kubecontainer.PodStatus{
|
||||
|
@ -1777,7 +1777,7 @@ func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
|
|||
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
pod := podWithUidNameNs("12345678", "foo", "new")
|
||||
pod := podWithUIDNameNs("12345678", "foo", "new")
|
||||
containers := []v1.Container{{Name: "succeed"}, {Name: "failed"}}
|
||||
podStatus := &kubecontainer.PodStatus{
|
||||
ID: pod.UID,
|
||||
|
|
|
@ -113,7 +113,7 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
|
|||
continue
|
||||
}
|
||||
if len(volumePaths) > 0 {
|
||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk.", uid))
|
||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk", uid))
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("Orphaned pod %q found, removing", uid)
|
||||
|
|
|
@ -36,7 +36,7 @@ func TestListVolumesForPod(t *testing.T) {
|
|||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
|
@ -163,7 +163,7 @@ func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
|
|||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
|
@ -209,7 +209,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
|
|||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
|
@ -298,7 +298,7 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
|
|||
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
|
||||
})
|
||||
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
|
@ -367,7 +367,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
|
|||
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
|
||||
})
|
||||
|
||||
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
||||
pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
|
|
|
@ -69,23 +69,27 @@ func (c *criNetworkHost) GetNetNS(containerID string) (string, error) {
|
|||
return c.kubelet.GetRuntime().GetNetNS(kubecontainer.ContainerID{Type: "", ID: containerID})
|
||||
}
|
||||
|
||||
// noOpLegacyHost implements the network.LegacyHost interface for the remote
|
||||
// NoOpLegacyHost implements the network.LegacyHost interface for the remote
|
||||
// runtime shim by just returning empties. It doesn't support legacy features
|
||||
// like host port and bandwidth shaping.
|
||||
type NoOpLegacyHost struct{}
|
||||
|
||||
// GetPodByName always returns "nil, true" for 'NoOpLegacyHost'
|
||||
func (n *NoOpLegacyHost) GetPodByName(namespace, name string) (*v1.Pod, bool) {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
// GetKubeClient always returns "nil" for 'NoOpLegacyHost'
|
||||
func (n *NoOpLegacyHost) GetKubeClient() clientset.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRuntime always returns "nil" for 'NoOpLegacyHost'
|
||||
func (n *NoOpLegacyHost) GetRuntime() kubecontainer.Runtime {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nh *NoOpLegacyHost) SupportsLegacyFeatures() bool {
|
||||
// SupportsLegacyFeatures always returns "false" for 'NoOpLegacyHost'
|
||||
func (n *NoOpLegacyHost) SupportsLegacyFeatures() bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
)
|
||||
|
||||
// OOMWatcher defines the interface of OOM watchers.
|
||||
type OOMWatcher interface {
|
||||
Start(ref *v1.ObjectReference) error
|
||||
}
|
||||
|
@ -36,6 +37,7 @@ type realOOMWatcher struct {
|
|||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// NewOOMWatcher creates and initializes a OOMWatcher based on parameters.
|
||||
func NewOOMWatcher(cadvisor cadvisor.Interface, recorder record.EventRecorder) OOMWatcher {
|
||||
return &realOOMWatcher{
|
||||
cadvisor: cadvisor,
|
||||
|
|
|
@ -59,8 +59,8 @@ func newPodContainerDeletor(runtime kubecontainer.Runtime, containersToKeep int)
|
|||
}
|
||||
|
||||
// getContainersToDeleteInPod returns the exited containers in a pod whose name matches the name inferred from filterContainerId (if not empty), ordered by the creation time from the latest to the earliest.
|
||||
// If filterContainerId is empty, all dead containers in the pod are returned.
|
||||
func getContainersToDeleteInPod(filterContainerId string, podStatus *kubecontainer.PodStatus, containersToKeep int) containerStatusbyCreatedList {
|
||||
// If filterContainerID is empty, all dead containers in the pod are returned.
|
||||
func getContainersToDeleteInPod(filterContainerID string, podStatus *kubecontainer.PodStatus, containersToKeep int) containerStatusbyCreatedList {
|
||||
matchedContainer := func(filterContainerId string, podStatus *kubecontainer.PodStatus) *kubecontainer.ContainerStatus {
|
||||
if filterContainerId == "" {
|
||||
return nil
|
||||
|
@ -71,10 +71,10 @@ func getContainersToDeleteInPod(filterContainerId string, podStatus *kubecontain
|
|||
}
|
||||
}
|
||||
return nil
|
||||
}(filterContainerId, podStatus)
|
||||
}(filterContainerID, podStatus)
|
||||
|
||||
if filterContainerId != "" && matchedContainer == nil {
|
||||
glog.Warningf("Container %q not found in pod's containers", filterContainerId)
|
||||
if filterContainerID != "" && matchedContainer == nil {
|
||||
glog.Warningf("Container %q not found in pod's containers", filterContainerID)
|
||||
return containerStatusbyCreatedList{}
|
||||
}
|
||||
|
||||
|
@ -97,13 +97,13 @@ func getContainersToDeleteInPod(filterContainerId string, podStatus *kubecontain
|
|||
}
|
||||
|
||||
// deleteContainersInPod issues container deletion requests for containers selected by getContainersToDeleteInPod.
|
||||
func (p *podContainerDeletor) deleteContainersInPod(filterContainerId string, podStatus *kubecontainer.PodStatus, removeAll bool) {
|
||||
func (p *podContainerDeletor) deleteContainersInPod(filterContainerID string, podStatus *kubecontainer.PodStatus, removeAll bool) {
|
||||
containersToKeep := p.containersToKeep
|
||||
if removeAll {
|
||||
containersToKeep = 0
|
||||
}
|
||||
|
||||
for _, candidate := range getContainersToDeleteInPod(filterContainerId, podStatus, containersToKeep) {
|
||||
for _, candidate := range getContainersToDeleteInPod(filterContainerID, podStatus, containersToKeep) {
|
||||
select {
|
||||
case p.worker <- candidate.ID:
|
||||
default:
|
||||
|
|
|
@ -185,7 +185,7 @@ func TestGetContainersToDeleteInPodWithNoMatch(t *testing.T) {
|
|||
}
|
||||
|
||||
testCases := []struct {
|
||||
filterId string
|
||||
filterID string
|
||||
expectedContainersToDelete containerStatusbyCreatedList
|
||||
}{
|
||||
{
|
||||
|
@ -195,7 +195,7 @@ func TestGetContainersToDeleteInPodWithNoMatch(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
candidates := getContainersToDeleteInPod(test.filterId, &pod, len(pod.ContainerStatuses))
|
||||
candidates := getContainersToDeleteInPod(test.filterID, &pod, len(pod.ContainerStatuses))
|
||||
if !reflect.DeepEqual(candidates, test.expectedContainersToDelete) {
|
||||
t.Errorf("expected %v got %v", test.expectedContainersToDelete, candidates)
|
||||
}
|
||||
|
|
|
@ -292,12 +292,12 @@ func TestFakePodWorkers(t *testing.T) {
|
|||
&v1.Pod{},
|
||||
},
|
||||
{
|
||||
podWithUidNameNs("12345678", "foo", "new"),
|
||||
podWithUidNameNs("12345678", "fooMirror", "new"),
|
||||
podWithUIDNameNs("12345678", "foo", "new"),
|
||||
podWithUIDNameNs("12345678", "fooMirror", "new"),
|
||||
},
|
||||
{
|
||||
podWithUidNameNs("98765", "bar", "new"),
|
||||
podWithUidNameNs("98765", "barMirror", "new"),
|
||||
podWithUIDNameNs("98765", "bar", "new"),
|
||||
podWithUIDNameNs("98765", "barMirror", "new"),
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -40,10 +40,10 @@ type ReasonCache struct {
|
|||
cache *lru.Cache
|
||||
}
|
||||
|
||||
// reasonInfo is the cached item in ReasonCache
|
||||
type reasonInfo struct {
|
||||
reason error
|
||||
message string
|
||||
// Reason is the cached item in ReasonCache
|
||||
type reasonItem struct {
|
||||
Err error
|
||||
Message string
|
||||
}
|
||||
|
||||
// maxReasonCacheEntries is the cache entry number in lru cache. 1000 is a proper number
|
||||
|
@ -51,6 +51,7 @@ type reasonInfo struct {
|
|||
// may want to increase the number.
|
||||
const maxReasonCacheEntries = 1000
|
||||
|
||||
// NewReasonCache creates an instance of 'ReasonCache'.
|
||||
func NewReasonCache() *ReasonCache {
|
||||
return &ReasonCache{cache: lru.New(maxReasonCacheEntries)}
|
||||
}
|
||||
|
@ -63,7 +64,7 @@ func (c *ReasonCache) composeKey(uid types.UID, name string) string {
|
|||
func (c *ReasonCache) add(uid types.UID, name string, reason error, message string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.cache.Add(c.composeKey(uid, name), reasonInfo{reason, message})
|
||||
c.cache.Add(c.composeKey(uid, name), reasonItem{reason, message})
|
||||
}
|
||||
|
||||
// Update updates the reason cache with the SyncPodResult. Only SyncResult with
|
||||
|
@ -92,13 +93,13 @@ func (c *ReasonCache) Remove(uid types.UID, name string) {
|
|||
// Get gets error reason from the cache. The return values are error reason, error message and
|
||||
// whether an error reason is found in the cache. If no error reason is found, empty string will
|
||||
// be returned for error reason and error message.
|
||||
func (c *ReasonCache) Get(uid types.UID, name string) (error, string, bool) {
|
||||
func (c *ReasonCache) Get(uid types.UID, name string) (*reasonItem, bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
value, ok := c.cache.Get(c.composeKey(uid, name))
|
||||
if !ok {
|
||||
return nil, "", ok
|
||||
return nil, false
|
||||
}
|
||||
info := value.(reasonInfo)
|
||||
return info.reason, info.message, ok
|
||||
info := value.(reasonItem)
|
||||
return &info, true
|
||||
}
|
||||
|
|
|
@ -51,9 +51,9 @@ func TestReasonCache(t *testing.T) {
|
|||
|
||||
func assertReasonInfo(t *testing.T, cache *ReasonCache, uid types.UID, result *kubecontainer.SyncResult, found bool) {
|
||||
name := result.Target.(string)
|
||||
actualReason, actualMessage, ok := cache.Get(uid, name)
|
||||
actualReason, ok := cache.Get(uid, name)
|
||||
if ok && !found {
|
||||
t.Fatalf("unexpected cache hit: %v, %q", actualReason, actualMessage)
|
||||
t.Fatalf("unexpected cache hit: %v, %q", actualReason.Err, actualReason.Message)
|
||||
}
|
||||
if !ok && found {
|
||||
t.Fatalf("corresponding reason info not found")
|
||||
|
@ -63,7 +63,7 @@ func assertReasonInfo(t *testing.T, cache *ReasonCache, uid types.UID, result *k
|
|||
}
|
||||
reason := result.Error
|
||||
message := result.Message
|
||||
if actualReason != reason || actualMessage != message {
|
||||
t.Errorf("expected %v %q, got %v %q", reason, message, actualReason, actualMessage)
|
||||
if actualReason.Err != reason || actualReason.Message != message {
|
||||
t.Errorf("expected %v %q, got %v %q", reason, message, actualReason.Err, actualReason.Message)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ const (
|
|||
runOnceRetryDelayBackoff = 2
|
||||
)
|
||||
|
||||
// RunPodResult defines the running results of a Pod.
|
||||
type RunPodResult struct {
|
||||
Pod *v1.Pod
|
||||
Err error
|
||||
|
|
|
@ -44,7 +44,7 @@ import (
|
|||
type HollowKubelet struct {
|
||||
KubeletFlags *options.KubeletFlags
|
||||
KubeletConfiguration *componentconfig.KubeletConfiguration
|
||||
KubeletDeps *kubelet.KubeletDeps
|
||||
KubeletDeps *kubelet.Dependencies
|
||||
}
|
||||
|
||||
func NewHollowKubelet(
|
||||
|
@ -66,7 +66,7 @@ func NewHollowKubelet(
|
|||
// -----------------
|
||||
volumePlugins := empty_dir.ProbeVolumePlugins()
|
||||
volumePlugins = append(volumePlugins, secret.ProbeVolumePlugins()...)
|
||||
d := &kubelet.KubeletDeps{
|
||||
d := &kubelet.Dependencies{
|
||||
KubeClient: client,
|
||||
DockerClient: dockerClient,
|
||||
CAdvisorInterface: cadvisorInterface,
|
||||
|
|
Loading…
Reference in New Issue