diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 472a824246..5b5b68d6d1 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -147,7 +147,7 @@ func UnsecuredKubeletDeps(s *options.KubeletServer) (*kubelet.KubeletDeps, error } var dockerClient libdocker.Interface - if s.ContainerRuntime == "docker" { + if s.ContainerRuntime == kubetypes.DockerContainerRuntime { dockerClient = libdocker.ConnectToDockerOrDie(s.DockerEndpoint, s.RuntimeRequestTimeout.Duration, s.ImagePullProgressDeadline.Duration) } else { diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 2debf6229e..5294cd519a 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -564,7 +564,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub pluginSettings.LegacyRuntimeHost = nl // rktnetes cannot be run with CRI. - if kubeCfg.ContainerRuntime != "rkt" { + if kubeCfg.ContainerRuntime != kubetypes.RktContainerRuntime { // kubelet defers to the runtime shim to setup networking. Setting // this to nil will prevent it from trying to invoke the plugin. // It's easier to always probe and initialize plugins till cri @@ -572,7 +572,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub klet.networkPlugin = nil switch kubeCfg.ContainerRuntime { - case "docker": + case kubetypes.DockerContainerRuntime: // Create and start the CRI shim running as a grpc server. streamingConfig := getStreamingConfig(kubeCfg, kubeDeps) ds, err := dockershim.NewDockerService(kubeDeps.DockerClient, kubeCfg.SeccompProfileRoot, crOptions.PodSandboxImage, @@ -606,7 +606,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub if !supported { klet.dockerLegacyService = dockershim.NewDockerLegacyService(kubeDeps.DockerClient) } - case "remote": + case kubetypes.RemoteContainerRuntime: // No-op. break default: @@ -814,7 +814,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub klet.appArmorValidator = apparmor.NewValidator(kubeCfg.ContainerRuntime) klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator)) if utilfeature.DefaultFeatureGate.Enabled(features.Accelerators) { - if kubeCfg.ContainerRuntime == "docker" { + if kubeCfg.ContainerRuntime == kubetypes.DockerContainerRuntime { if klet.gpuManager, err = nvidia.NewNvidiaGPUManager(klet, kubeDeps.DockerClient); err != nil { return nil, err } @@ -2122,7 +2122,7 @@ func (kl *Kubelet) updateRuntimeUp() { } // rkt uses the legacy, non-CRI integration. Don't check the runtime // conditions for it. - if kl.kubeletConfiguration.ContainerRuntime != "rkt" { + if kl.kubeletConfiguration.ContainerRuntime != kubetypes.RktContainerRuntime { if s == nil { glog.Errorf("Container runtime status is nil") return diff --git a/pkg/kubelet/types/constants.go b/pkg/kubelet/types/constants.go index eeabba0174..65f17c4a7a 100644 --- a/pkg/kubelet/types/constants.go +++ b/pkg/kubelet/types/constants.go @@ -19,4 +19,9 @@ package types const ( // system default DNS resolver configuration ResolvConfDefault = "/etc/resolv.conf" + + // different container runtimes + DockerContainerRuntime = "docker" + RktContainerRuntime = "rkt" + RemoteContainerRuntime = "remote" ) diff --git a/pkg/kubemark/hollow_kubelet.go b/pkg/kubemark/hollow_kubelet.go index 88da1ffcf6..a37589ecff 100644 --- a/pkg/kubemark/hollow_kubelet.go +++ b/pkg/kubemark/hollow_kubelet.go @@ -144,7 +144,7 @@ func GetHollowKubeletConfig( c.LowDiskSpaceThresholdMB = 256 c.VolumeStatsAggPeriod.Duration = time.Minute c.CgroupRoot = "" - c.ContainerRuntime = "docker" + c.ContainerRuntime = kubetypes.DockerContainerRuntime c.CPUCFSQuota = true c.RuntimeCgroups = "" c.EnableControllerAttachDetach = false