mirror of https://github.com/k3s-io/k3s
kubeadm: set pod-infra-container-image for the kubelet
The kubelet allows you to set `--pod-infra-container-image` (also called `PodSandboxImage` in the kubelet config), which can be a custom location to the "pause" image in the case of Docker. Other CRIs are not supported. Set the CLI flag for the Docker case in flags.go using WriteKubeletDynamicEnvFile().pull/58/head
parent
9199025b24
commit
9a37f2d878
|
@ -370,7 +370,7 @@ func (j *Join) Run(out io.Writer) error {
|
|||
// if the node is hosting a new control plane instance, since it uses static pods for the control plane,
|
||||
// as soon as the kubelet starts it will take charge of creating control plane
|
||||
// components on the node.
|
||||
if err := j.BootstrapKubelet(j.tlsBootstrapCfg); err != nil {
|
||||
if err := j.BootstrapKubelet(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -462,19 +462,19 @@ func (j *Join) PrepareForHostingControlPlane(initConfiguration *kubeadmapi.InitC
|
|||
// BootstrapKubelet executes the kubelet TLS bootstrap process.
|
||||
// This process is executed by the kubelet and completes with the node joining the cluster
|
||||
// with a dedicates set of credentials as required by the node authorizer
|
||||
func (j *Join) BootstrapKubelet(tlsBootstrapCfg *clientcmdapi.Config) error {
|
||||
func (j *Join) BootstrapKubelet() error {
|
||||
bootstrapKubeConfigFile := kubeadmconstants.GetBootstrapKubeletKubeConfigPath()
|
||||
|
||||
// Write the bootstrap kubelet config file or the TLS-Boostrapped kubelet config file down to disk
|
||||
klog.V(1).Infoln("[join] writing bootstrap kubelet config file at", bootstrapKubeConfigFile)
|
||||
if err := kubeconfigutil.WriteToDisk(bootstrapKubeConfigFile, tlsBootstrapCfg); err != nil {
|
||||
if err := kubeconfigutil.WriteToDisk(bootstrapKubeConfigFile, j.tlsBootstrapCfg); err != nil {
|
||||
return errors.Wrap(err, "couldn't save bootstrap-kubelet.conf to disk")
|
||||
}
|
||||
|
||||
// Write the ca certificate to disk so kubelet can use it for authentication
|
||||
cluster := tlsBootstrapCfg.Contexts[tlsBootstrapCfg.CurrentContext].Cluster
|
||||
cluster := j.tlsBootstrapCfg.Contexts[j.tlsBootstrapCfg.CurrentContext].Cluster
|
||||
if _, err := os.Stat(j.cfg.CACertPath); os.IsNotExist(err) {
|
||||
if err := certutil.WriteCert(j.cfg.CACertPath, tlsBootstrapCfg.Clusters[cluster].CertificateAuthorityData); err != nil {
|
||||
if err := certutil.WriteCert(j.cfg.CACertPath, j.tlsBootstrapCfg.Clusters[cluster].CertificateAuthorityData); err != nil {
|
||||
return errors.Wrap(err, "couldn't save the CA certificate to disk")
|
||||
}
|
||||
}
|
||||
|
@ -502,12 +502,8 @@ func (j *Join) BootstrapKubelet(tlsBootstrapCfg *clientcmdapi.Config) error {
|
|||
// Write env file with flags for the kubelet to use. We only want to
|
||||
// register the joining node with the specified taints if the node
|
||||
// is not a master. The markmaster phase will register the taints otherwise.
|
||||
registerTaintsUsingFlags := false
|
||||
if j.cfg.ControlPlane == nil {
|
||||
registerTaintsUsingFlags = true
|
||||
}
|
||||
|
||||
if err := kubeletphase.WriteKubeletDynamicEnvFile(&j.cfg.NodeRegistration, j.initCfg.FeatureGates, registerTaintsUsingFlags, kubeadmconstants.KubeletRunDirectory); err != nil {
|
||||
registerTaintsUsingFlags := j.cfg.ControlPlane == nil
|
||||
if err := kubeletphase.WriteKubeletDynamicEnvFile(j.initCfg, registerTaintsUsingFlags, kubeadmconstants.KubeletRunDirectory); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ func runKubeletStart(c workflow.RunData) error {
|
|||
// Write env file with flags for the kubelet to use. We do not need to write the --register-with-taints for the master,
|
||||
// as we handle that ourselves in the markmaster phase
|
||||
// TODO: Maybe we want to do that some time in the future, in order to remove some logic from the markmaster phase?
|
||||
if err := kubeletphase.WriteKubeletDynamicEnvFile(&data.Cfg().NodeRegistration, data.Cfg().FeatureGates, false, data.KubeletDir()); err != nil {
|
||||
if err := kubeletphase.WriteKubeletDynamicEnvFile(data.Cfg(), false, data.KubeletDir()); err != nil {
|
||||
return errors.Wrap(err, "error writing a dynamic environment file for the kubelet")
|
||||
}
|
||||
|
||||
|
|
|
@ -27,10 +27,9 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
|
||||
|
|
|
@ -80,6 +80,11 @@ func GetEtcdImage(cfg *kubeadmapi.ClusterConfiguration) string {
|
|||
return GetGenericImage(etcdImageRepository, constants.Etcd, etcdImageTag)
|
||||
}
|
||||
|
||||
// GetPauseImage returns the image for the "pause" container
|
||||
func GetPauseImage(cfg *kubeadmapi.ClusterConfiguration) string {
|
||||
return GetGenericImage(cfg.ImageRepository, "pause", constants.PauseVersion)
|
||||
}
|
||||
|
||||
// GetAllImages returns a list of container images kubeadm expects to use on a control plane node
|
||||
func GetAllImages(cfg *kubeadmapi.ClusterConfiguration) []string {
|
||||
imgs := []string{}
|
||||
|
@ -95,7 +100,7 @@ func GetAllImages(cfg *kubeadmapi.ClusterConfiguration) []string {
|
|||
}
|
||||
|
||||
// pause is not available on the ci image repository so use the default image repository.
|
||||
imgs = append(imgs, GetGenericImage(cfg.ImageRepository, "pause", constants.PauseVersion))
|
||||
imgs = append(imgs, GetPauseImage(cfg))
|
||||
|
||||
// if etcd is not external then add the image as it will be required
|
||||
if cfg.Etcd.Local != nil {
|
||||
|
|
|
@ -158,6 +158,34 @@ func TestGetEtcdImage(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetPauseImage(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
cfg *kubeadmapi.ClusterConfiguration
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "pause image defined",
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
ImageRepository: "test.repo",
|
||||
},
|
||||
expected: "test.repo/pause:" + constants.PauseVersion,
|
||||
},
|
||||
}
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual := GetPauseImage(tc.cfg)
|
||||
if actual != tc.expected {
|
||||
t.Fatalf(
|
||||
"failed GetPauseImage:\n\texpected: %s\n\t actual: %s",
|
||||
tc.expected,
|
||||
actual,
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAllImages(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
|
|
|
@ -20,8 +20,6 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -30,6 +28,7 @@ import (
|
|||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
rbachelper "k8s.io/kubernetes/pkg/apis/rbac/v1"
|
||||
)
|
||||
|
|
|
@ -15,6 +15,7 @@ go_library(
|
|||
"//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library",
|
||||
"//cmd/kubeadm/app/componentconfigs:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/images:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//pkg/apis/rbac/v1:go_default_library",
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
|
@ -25,10 +25,10 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/klog"
|
||||
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/images"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/util/procfs"
|
||||
|
@ -38,6 +38,7 @@ import (
|
|||
type kubeletFlagsOpts struct {
|
||||
nodeRegOpts *kubeadmapi.NodeRegistrationOptions
|
||||
featureGates map[string]bool
|
||||
pauseImage string
|
||||
registerTaintsUsingFlags bool
|
||||
execer utilsexec.Interface
|
||||
pidOfFunc func(string) ([]int, error)
|
||||
|
@ -46,22 +47,23 @@ type kubeletFlagsOpts struct {
|
|||
|
||||
// WriteKubeletDynamicEnvFile writes an environment file with dynamic flags to the kubelet.
|
||||
// Used at "kubeadm init" and "kubeadm join" time.
|
||||
func WriteKubeletDynamicEnvFile(nodeRegOpts *kubeadmapi.NodeRegistrationOptions, featureGates map[string]bool, registerTaintsUsingFlags bool, kubeletDir string) error {
|
||||
func WriteKubeletDynamicEnvFile(cfg *kubeadmapi.InitConfiguration, registerTaintsUsingFlags bool, kubeletDir string) error {
|
||||
hostName, err := nodeutil.GetHostname("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
flagOpts := kubeletFlagsOpts{
|
||||
nodeRegOpts: nodeRegOpts,
|
||||
featureGates: featureGates,
|
||||
nodeRegOpts: &cfg.NodeRegistration,
|
||||
featureGates: cfg.FeatureGates,
|
||||
pauseImage: images.GetPauseImage(&cfg.ClusterConfiguration),
|
||||
registerTaintsUsingFlags: registerTaintsUsingFlags,
|
||||
execer: utilsexec.New(),
|
||||
pidOfFunc: procfs.PidOf,
|
||||
defaultHostname: hostName,
|
||||
}
|
||||
stringMap := buildKubeletArgMap(flagOpts)
|
||||
argList := kubeadmutil.BuildArgumentListFromMap(stringMap, nodeRegOpts.KubeletExtraArgs)
|
||||
argList := kubeadmutil.BuildArgumentListFromMap(stringMap, cfg.NodeRegistration.KubeletExtraArgs)
|
||||
envFileContent := fmt.Sprintf("%s=%s\n", constants.KubeletEnvFileVariableName, strings.Join(argList, " "))
|
||||
|
||||
return writeKubeletFlagBytesToDisk([]byte(envFileContent), kubeletDir)
|
||||
|
@ -81,6 +83,9 @@ func buildKubeletArgMap(opts kubeletFlagsOpts) map[string]string {
|
|||
} else {
|
||||
kubeletFlags["cgroup-driver"] = driver
|
||||
}
|
||||
if opts.pauseImage != "" {
|
||||
kubeletFlags["pod-infra-container-image"] = opts.pauseImage
|
||||
}
|
||||
} else {
|
||||
kubeletFlags["container-runtime"] = "remote"
|
||||
kubeletFlags["container-runtime-endpoint"] = opts.nodeRegOpts.CRISocket
|
||||
|
|
|
@ -231,6 +231,24 @@ func TestBuildKubeletArgMap(t *testing.T) {
|
|||
"resolv-conf": "/run/systemd/resolve/resolv.conf",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pause image is set",
|
||||
opts: kubeletFlagsOpts{
|
||||
nodeRegOpts: &kubeadmapi.NodeRegistrationOptions{
|
||||
CRISocket: "/var/run/dockershim.sock",
|
||||
Name: "foo",
|
||||
},
|
||||
pauseImage: "gcr.io/pause:3.1",
|
||||
execer: cgroupfsCgroupExecer,
|
||||
pidOfFunc: binaryNotRunningPidOfFunc,
|
||||
defaultHostname: "foo",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"network-plugin": "cni",
|
||||
"cgroup-driver": "cgroupfs",
|
||||
"pod-infra-container-image": "gcr.io/pause:3.1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
|
|
@ -209,7 +209,7 @@ func writeKubeletConfigFiles(client clientset.Interface, cfg *kubeadmapi.InitCon
|
|||
// Write env file with flags for the kubelet to use. We do not need to write the --register-with-taints for the master,
|
||||
// as we handle that ourselves in the markmaster phase
|
||||
// TODO: Maybe we want to do that some time in the future, in order to remove some logic from the markmaster phase?
|
||||
if err := kubeletphase.WriteKubeletDynamicEnvFile(&cfg.NodeRegistration, cfg.FeatureGates, false, kubeletDir); err != nil {
|
||||
if err := kubeletphase.WriteKubeletDynamicEnvFile(cfg, false, kubeletDir); err != nil {
|
||||
errs = append(errs, pkgerrors.Wrap(err, "error writing a dynamic environment file for the kubelet"))
|
||||
}
|
||||
|
||||
|
|
|
@ -25,9 +25,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
|
@ -93,7 +91,7 @@ func getInitConfigurationFromCluster(kubeconfigDir string, client clientset.Inte
|
|||
// Also, the config map really should be KubeadmConfigConfigMap...
|
||||
configMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(constants.KubeadmConfigConfigMap, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "failed to get config map")
|
||||
}
|
||||
|
||||
// InitConfiguration is composed with data from different places
|
||||
|
@ -105,12 +103,12 @@ func getInitConfigurationFromCluster(kubeconfigDir string, client clientset.Inte
|
|||
return nil, errors.Errorf("unexpected error when reading kubeadm-config ConfigMap: %s key value pair missing", constants.ClusterConfigurationConfigMapKey)
|
||||
}
|
||||
if err := runtime.DecodeInto(kubeadmscheme.Codecs.UniversalDecoder(), []byte(clusterConfigurationData), &initcfg.ClusterConfiguration); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "failed to decode cluster configuration data")
|
||||
}
|
||||
|
||||
// gets the component configs from the corresponding config maps
|
||||
if err := getComponentConfigs(client, &initcfg.ClusterConfiguration); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "failed to get component configs")
|
||||
}
|
||||
|
||||
// if this isn't a new controlplane instance (e.g. in case of kubeadm upgrades)
|
||||
|
@ -118,11 +116,11 @@ func getInitConfigurationFromCluster(kubeconfigDir string, client clientset.Inte
|
|||
if !newControlPlane {
|
||||
// gets the nodeRegistration for the current from the node object
|
||||
if err := getNodeRegistration(kubeconfigDir, client, &initcfg.NodeRegistration); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "failed to get node registration")
|
||||
}
|
||||
// gets the APIEndpoint for the current node from then ClusterStatus in the kubeadm-config ConfigMap
|
||||
if err := getAPIEndpoint(configMap.Data, initcfg.NodeRegistration.Name, &initcfg.LocalAPIEndpoint); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "failed to getAPIEndpoint")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -134,13 +132,13 @@ func getNodeRegistration(kubeconfigDir string, client clientset.Interface, nodeR
|
|||
// gets the name of the current node
|
||||
nodeName, err := getNodeNameFromKubeletConfig(kubeconfigDir)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "failed to get node name from kubelet config")
|
||||
}
|
||||
|
||||
// gets the corresponding node and retrives attributes stored there.
|
||||
node, err := client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "faild to get corresponding node")
|
||||
}
|
||||
|
||||
criSocket, ok := node.ObjectMeta.Annotations[constants.AnnotationKubeadmCRISocket]
|
||||
|
|
Loading…
Reference in New Issue