mirror of https://github.com/k3s-io/k3s
Merge pull request #72111 from rosti/reduce-initcfg
kubeadm: Reduce the usage of InitConfigurationpull/564/head
commit
b8b689aae0
|
@ -125,7 +125,7 @@ func getSelfhostingSubCommand(in io.Reader) *cobra.Command {
|
|||
|
||||
// KubernetesVersion is not used, but we set it explicitly to avoid the lookup
|
||||
// of the version from the internet when executing ConfigFileAndDefaultsToInternalConfig
|
||||
phases.SetKubernetesVersion(cfg)
|
||||
phases.SetKubernetesVersion(&cfg.ClusterConfiguration)
|
||||
|
||||
// This call returns the ready-to-use configuration based on the configuration file that might or might not exist and the default cfg populated by flags
|
||||
internalcfg, err := configutil.ConfigFileAndDefaultsToInternalConfig(cfgPath, cfg)
|
||||
|
|
|
@ -388,7 +388,7 @@ func RunConfigView(out io.Writer, client clientset.Interface) error {
|
|||
func uploadConfiguration(client clientset.Interface, cfgPath string, defaultcfg *kubeadmapiv1beta1.InitConfiguration) error {
|
||||
// KubernetesVersion is not used, but we set it explicitly to avoid the lookup
|
||||
// of the version from the internet when executing ConfigFileAndDefaultsToInternalConfig
|
||||
phaseutil.SetKubernetesVersion(defaultcfg)
|
||||
phaseutil.SetKubernetesVersion(&defaultcfg.ClusterConfiguration)
|
||||
|
||||
// Default both statically and dynamically, convert to internal API type, and validate everything
|
||||
// First argument is unset here as we shouldn't load a config file from disk
|
||||
|
|
|
@ -333,7 +333,7 @@ func newInitData(cmd *cobra.Command, args []string, options *initOptions, out io
|
|||
}
|
||||
|
||||
// Checks if an external CA is provided by the user.
|
||||
externalCA, _ := certsphase.UsingExternalCA(cfg)
|
||||
externalCA, _ := certsphase.UsingExternalCA(&cfg.ClusterConfiguration)
|
||||
if externalCA {
|
||||
kubeconfigDir := kubeadmconstants.KubernetesDir
|
||||
if options.dryRun {
|
||||
|
|
|
@ -466,14 +466,14 @@ func (j *joinData) Run() error {
|
|||
|
||||
// Continue with more specific checks based on the init configuration
|
||||
klog.V(1).Infoln("[preflight] Running configuration dependant checks")
|
||||
if err := preflight.RunOptionalJoinNodeChecks(utilsexec.New(), initCfg, j.ignorePreflightErrors); err != nil {
|
||||
if err := preflight.RunOptionalJoinNodeChecks(utilsexec.New(), &initCfg.ClusterConfiguration, j.ignorePreflightErrors); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if j.cfg.ControlPlane != nil {
|
||||
// Checks if the cluster configuration supports
|
||||
// joining a new control plane instance and if all the necessary certificates are provided
|
||||
if err := j.CheckIfReadyForAdditionalControlPlane(initCfg); err != nil {
|
||||
if err := j.CheckIfReadyForAdditionalControlPlane(&initCfg.ClusterConfiguration); err != nil {
|
||||
// outputs the not ready for hosting a new control plane instance message
|
||||
ctx := map[string]string{
|
||||
"Error": err.Error(),
|
||||
|
@ -539,14 +539,14 @@ func (j *joinData) Run() error {
|
|||
|
||||
// CheckIfReadyForAdditionalControlPlane ensures that the cluster is in a state that supports
|
||||
// joining an additional control plane instance and if the node is ready to join
|
||||
func (j *joinData) CheckIfReadyForAdditionalControlPlane(initConfiguration *kubeadmapi.InitConfiguration) error {
|
||||
func (j *joinData) CheckIfReadyForAdditionalControlPlane(cfg *kubeadmapi.ClusterConfiguration) error {
|
||||
// blocks if the cluster was created without a stable control plane endpoint
|
||||
if initConfiguration.ControlPlaneEndpoint == "" {
|
||||
if cfg.ControlPlaneEndpoint == "" {
|
||||
return errors.New("unable to add a new control plane instance a cluster that doesn't have a stable controlPlaneEndpoint address")
|
||||
}
|
||||
|
||||
// checks if the certificates that must be equal across contolplane instances are provided
|
||||
if ret, err := certsphase.SharedCertificateExists(initConfiguration); !ret {
|
||||
if ret, err := certsphase.SharedCertificateExists(cfg); !ret {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -586,7 +586,7 @@ func (j *joinData) PrepareForHostingControlPlane(initConfiguration *kubeadmapi.I
|
|||
return errors.Wrap(err, "couldn't create Kubernetes client")
|
||||
}
|
||||
|
||||
if err := etcdphase.CheckLocalEtcdClusterStatus(client, initConfiguration); err != nil {
|
||||
if err := etcdphase.CheckLocalEtcdClusterStatus(client, &initConfiguration.ClusterConfiguration); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -638,7 +638,7 @@ func (j *joinData) BootstrapKubelet(tlsBootstrapCfg *clientcmdapi.Config, initCo
|
|||
// register the joining node with the specified taints if the node
|
||||
// is not a master. The markmaster phase will register the taints otherwise.
|
||||
registerTaintsUsingFlags := j.cfg.ControlPlane == nil
|
||||
if err := kubeletphase.WriteKubeletDynamicEnvFile(initConfiguration, registerTaintsUsingFlags, kubeadmconstants.KubeletRunDirectory); err != nil {
|
||||
if err := kubeletphase.WriteKubeletDynamicEnvFile(&initConfiguration.ClusterConfiguration, &initConfiguration.NodeRegistration, registerTaintsUsingFlags, kubeadmconstants.KubeletRunDirectory); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -690,7 +690,7 @@ func (j *joinData) PostInstallControlPlane(initConfiguration *kubeadmapi.InitCon
|
|||
// because it needs two members as majority to agree on the consensus. You will only see this behavior between the time
|
||||
// etcdctl member add informs the cluster about the new member and the new member successfully establishing a connection to the existing one."
|
||||
klog.V(1).Info("[join] adding etcd")
|
||||
if err := etcdphase.CreateStackedEtcdStaticPodManifestFile(client, kubeadmconstants.GetStaticPodDirectory(), initConfiguration); err != nil {
|
||||
if err := etcdphase.CreateStackedEtcdStaticPodManifestFile(client, kubeadmconstants.GetStaticPodDirectory(), initConfiguration.NodeRegistration.Name, &initConfiguration.ClusterConfiguration, &initConfiguration.LocalAPIEndpoint); err != nil {
|
||||
return errors.Wrap(err, "error creating local etcd static pod manifest file")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ func runCoreDNSAddon(c workflow.RunData) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dnsaddon.EnsureDNSAddon(cfg, client)
|
||||
return dnsaddon.EnsureDNSAddon(&cfg.ClusterConfiguration, client)
|
||||
}
|
||||
|
||||
// runKubeProxyAddon installs KubeProxy addon to a Kubernetes cluster
|
||||
|
@ -104,7 +104,7 @@ func runKubeProxyAddon(c workflow.RunData) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return proxyaddon.EnsureProxyAddon(cfg, client)
|
||||
return proxyaddon.EnsureProxyAddon(&cfg.ClusterConfiguration, &cfg.LocalAPIEndpoint, client)
|
||||
}
|
||||
|
||||
func getAddonPhaseFlags(name string) []string {
|
||||
|
|
|
@ -204,13 +204,8 @@ func runCertsSa(c workflow.RunData) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// if dryrunning, write certificates to a temporary folder (and defer restore to the path originally specified by the user)
|
||||
cfg := data.Cfg()
|
||||
cfg.CertificatesDir = data.CertificateWriteDir()
|
||||
defer func() { cfg.CertificatesDir = data.CertificateDir() }()
|
||||
|
||||
// create the new service account key (or use existing)
|
||||
return certsphase.CreateServiceAccountKeyAndPublicKeyFiles(cfg)
|
||||
return certsphase.CreateServiceAccountKeyAndPublicKeyFiles(data.CertificateWriteDir())
|
||||
}
|
||||
|
||||
func runCerts(c workflow.RunData) error {
|
||||
|
|
|
@ -150,6 +150,6 @@ func runControlPlaneSubPhase(component string) func(c workflow.RunData) error {
|
|||
cfg := data.Cfg()
|
||||
|
||||
fmt.Printf("[control-plane] Creating static Pod manifest for %q\n", component)
|
||||
return controlplane.CreateStaticPodFiles(data.ManifestDir(), cfg, component)
|
||||
return controlplane.CreateStaticPodFiles(data.ManifestDir(), &cfg.ClusterConfiguration, &cfg.LocalAPIEndpoint, component)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ func runEtcdPhaseLocal() func(c workflow.RunData) error {
|
|||
// Add etcd static pod spec only if external etcd is not configured
|
||||
if cfg.Etcd.External == nil {
|
||||
fmt.Printf("[etcd] Creating static Pod manifest for local etcd in %q\n", data.ManifestDir())
|
||||
if err := etcdphase.CreateLocalEtcdStaticPodManifestFile(data.ManifestDir(), cfg); err != nil {
|
||||
if err := etcdphase.CreateLocalEtcdStaticPodManifestFile(data.ManifestDir(), cfg.NodeRegistration.Name, &cfg.ClusterConfiguration, &cfg.LocalAPIEndpoint); err != nil {
|
||||
return errors.Wrap(err, "error creating local etcd static pod manifest file")
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -75,7 +75,7 @@ func runKubeletStart(c workflow.RunData) error {
|
|||
// Write env file with flags for the kubelet to use. We do not need to write the --register-with-taints for the master,
|
||||
// as we handle that ourselves in the markmaster phase
|
||||
// TODO: Maybe we want to do that some time in the future, in order to remove some logic from the markmaster phase?
|
||||
if err := kubeletphase.WriteKubeletDynamicEnvFile(data.Cfg(), false, data.KubeletDir()); err != nil {
|
||||
if err := kubeletphase.WriteKubeletDynamicEnvFile(&data.Cfg().ClusterConfiguration, &data.Cfg().NodeRegistration, false, data.KubeletDir()); err != nil {
|
||||
return errors.Wrap(err, "error writing a dynamic environment file for the kubelet")
|
||||
}
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ func runUploadKubeletConfig(c workflow.RunData) error {
|
|||
}
|
||||
|
||||
klog.V(1).Infof("[upload-config] Uploading the kubelet component config to a ConfigMap")
|
||||
if err = kubeletphase.CreateConfigMap(cfg, client); err != nil {
|
||||
if err = kubeletphase.CreateConfigMap(cfg.ClusterConfiguration.ComponentConfigs.Kubelet, cfg.KubernetesVersion, client); err != nil {
|
||||
return errors.Wrap(err, "error creating kubelet configuration ConfigMap")
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
// SetKubernetesVersion gets the current Kubeadm version and sets it as KubeadmVersion in the config,
|
||||
// unless it's already set to a value different from the default.
|
||||
func SetKubernetesVersion(cfg *kubeadmapiv1beta1.InitConfiguration) {
|
||||
func SetKubernetesVersion(cfg *kubeadmapiv1beta1.ClusterConfiguration) {
|
||||
|
||||
if cfg.KubernetesVersion != kubeadmapiv1beta1.DefaultKubernetesVersion && cfg.KubernetesVersion != "" {
|
||||
return
|
||||
|
|
|
@ -51,7 +51,7 @@ func TestSetKubernetesVersion(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
cfg := &kubeadmapiv1beta1.InitConfiguration{ClusterConfiguration: kubeadmapiv1beta1.ClusterConfiguration{KubernetesVersion: test.input}}
|
||||
cfg := &kubeadmapiv1beta1.ClusterConfiguration{KubernetesVersion: test.input}
|
||||
SetKubernetesVersion(cfg)
|
||||
if cfg.KubernetesVersion != test.output {
|
||||
t.Fatalf("expected %q, got %q", test.output, cfg.KubernetesVersion)
|
||||
|
|
|
@ -212,7 +212,7 @@ func NewCmdTokenGenerate(out io.Writer) *cobra.Command {
|
|||
func RunCreateToken(out io.Writer, client clientset.Interface, cfgPath string, cfg *kubeadmapiv1beta1.InitConfiguration, printJoinCommand bool, kubeConfigFile string) error {
|
||||
// KubernetesVersion is not used, but we set it explicitly to avoid the lookup
|
||||
// of the version from the internet when executing ConfigFileAndDefaultsToInternalConfig
|
||||
phaseutil.SetKubernetesVersion(cfg)
|
||||
phaseutil.SetKubernetesVersion(&cfg.ClusterConfiguration)
|
||||
|
||||
// This call returns the ready-to-use configuration based on the configuration file that might or might not exist and the default cfg populated by flags
|
||||
klog.V(1).Infoln("[token] loading configurations")
|
||||
|
|
|
@ -105,7 +105,7 @@ func runDiff(flags *diffFlags, args []string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
specs := controlplane.GetStaticPodSpecs(cfg, k8sVer)
|
||||
specs := controlplane.GetStaticPodSpecs(&cfg.ClusterConfiguration, &cfg.LocalAPIEndpoint, k8sVer)
|
||||
for spec, pod := range specs {
|
||||
var path string
|
||||
switch spec {
|
||||
|
|
|
@ -74,14 +74,14 @@ func DeployedDNSAddon(client clientset.Interface) (kubeadmapi.DNSAddOnType, stri
|
|||
}
|
||||
|
||||
// EnsureDNSAddon creates the kube-dns or CoreDNS addon
|
||||
func EnsureDNSAddon(cfg *kubeadmapi.InitConfiguration, client clientset.Interface) error {
|
||||
func EnsureDNSAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interface) error {
|
||||
if cfg.DNS.Type == kubeadmapi.CoreDNS {
|
||||
return coreDNSAddon(cfg, client)
|
||||
}
|
||||
return kubeDNSAddon(cfg, client)
|
||||
}
|
||||
|
||||
func kubeDNSAddon(cfg *kubeadmapi.InitConfiguration, client clientset.Interface) error {
|
||||
func kubeDNSAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interface) error {
|
||||
if err := CreateServiceAccount(client); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -103,9 +103,9 @@ func kubeDNSAddon(cfg *kubeadmapi.InitConfiguration, client clientset.Interface)
|
|||
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(KubeDNSDeployment,
|
||||
struct{ DeploymentName, KubeDNSImage, DNSMasqImage, SidecarImage, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{
|
||||
DeploymentName: kubeadmconstants.KubeDNSDeploymentName,
|
||||
KubeDNSImage: images.GetDNSImage(&cfg.ClusterConfiguration, kubeadmconstants.KubeDNSKubeDNSImageName),
|
||||
DNSMasqImage: images.GetDNSImage(&cfg.ClusterConfiguration, kubeadmconstants.KubeDNSDnsMasqNannyImageName),
|
||||
SidecarImage: images.GetDNSImage(&cfg.ClusterConfiguration, kubeadmconstants.KubeDNSSidecarImageName),
|
||||
KubeDNSImage: images.GetDNSImage(cfg, kubeadmconstants.KubeDNSKubeDNSImageName),
|
||||
DNSMasqImage: images.GetDNSImage(cfg, kubeadmconstants.KubeDNSDnsMasqNannyImageName),
|
||||
SidecarImage: images.GetDNSImage(cfg, kubeadmconstants.KubeDNSSidecarImageName),
|
||||
DNSBindAddr: dnsBindAddr,
|
||||
DNSProbeAddr: dnsProbeAddr,
|
||||
DNSDomain: cfg.Networking.DNSDomain,
|
||||
|
@ -155,11 +155,11 @@ func createKubeDNSAddon(deploymentBytes, serviceBytes []byte, client clientset.I
|
|||
return createDNSService(kubednsService, serviceBytes, client)
|
||||
}
|
||||
|
||||
func coreDNSAddon(cfg *kubeadmapi.InitConfiguration, client clientset.Interface) error {
|
||||
func coreDNSAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interface) error {
|
||||
// Get the YAML manifest
|
||||
coreDNSDeploymentBytes, err := kubeadmutil.ParseTemplate(CoreDNSDeployment, struct{ DeploymentName, Image, MasterTaintKey string }{
|
||||
DeploymentName: kubeadmconstants.CoreDNSDeploymentName,
|
||||
Image: images.GetDNSImage(&cfg.ClusterConfiguration, kubeadmconstants.CoreDNSImageName),
|
||||
Image: images.GetDNSImage(cfg, kubeadmconstants.CoreDNSImageName),
|
||||
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -47,13 +47,13 @@ const (
|
|||
)
|
||||
|
||||
// EnsureProxyAddon creates the kube-proxy addons
|
||||
func EnsureProxyAddon(cfg *kubeadmapi.InitConfiguration, client clientset.Interface) error {
|
||||
func EnsureProxyAddon(cfg *kubeadmapi.ClusterConfiguration, localEndpoint *kubeadmapi.APIEndpoint, client clientset.Interface) error {
|
||||
if err := CreateServiceAccount(client); err != nil {
|
||||
return errors.Wrap(err, "error when creating kube-proxy service account")
|
||||
}
|
||||
|
||||
// Generate Master Enpoint kubeconfig file
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg.ControlPlaneEndpoint, localEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func EnsureProxyAddon(cfg *kubeadmapi.InitConfiguration, client clientset.Interf
|
|||
return errors.Wrap(err, "error when parsing kube-proxy configmap template")
|
||||
}
|
||||
proxyDaemonSetBytes, err = kubeadmutil.ParseTemplate(KubeProxyDaemonSet19, struct{ Image, ProxyConfigMap, ProxyConfigMapKey string }{
|
||||
Image: images.GetKubernetesImage(constants.KubeProxy, &cfg.ClusterConfiguration),
|
||||
Image: images.GetKubernetesImage(constants.KubeProxy, cfg),
|
||||
ProxyConfigMap: constants.KubeProxyConfigMap,
|
||||
ProxyConfigMapKey: constants.KubeProxyConfigMapKey,
|
||||
})
|
||||
|
|
|
@ -223,7 +223,7 @@ func TestEnsureProxyAddon(t *testing.T) {
|
|||
t.Errorf("test failed to set dynamic defaults: %v", err)
|
||||
break
|
||||
}
|
||||
err = EnsureProxyAddon(intMaster, client)
|
||||
err = EnsureProxyAddon(&intMaster.ClusterConfiguration, &intMaster.LocalAPIEndpoint, client)
|
||||
|
||||
// Compare actual to expected errors
|
||||
actErr := "No error"
|
||||
|
|
|
@ -60,12 +60,12 @@ func CreatePKIAssets(cfg *kubeadmapi.InitConfiguration) error {
|
|||
fmt.Printf("[certs] valid certificates and keys now exist in %q\n", cfg.CertificatesDir)
|
||||
|
||||
// Service accounts are not x509 certs, so handled separately
|
||||
return CreateServiceAccountKeyAndPublicKeyFiles(cfg)
|
||||
return CreateServiceAccountKeyAndPublicKeyFiles(cfg.CertificatesDir)
|
||||
}
|
||||
|
||||
// CreateServiceAccountKeyAndPublicKeyFiles create a new public/private key files for signing service account users.
|
||||
// If the sa public/private key files already exists in the target folder, they are used only if evaluated equals; otherwise an error is returned.
|
||||
func CreateServiceAccountKeyAndPublicKeyFiles(cfg *kubeadmapi.InitConfiguration) error {
|
||||
func CreateServiceAccountKeyAndPublicKeyFiles(certsDir string) error {
|
||||
klog.V(1).Infoln("creating a new public/private key files for signing service account users")
|
||||
saSigningKey, err := NewServiceAccountSigningKey()
|
||||
if err != nil {
|
||||
|
@ -73,7 +73,7 @@ func CreateServiceAccountKeyAndPublicKeyFiles(cfg *kubeadmapi.InitConfiguration)
|
|||
}
|
||||
|
||||
return writeKeyFilesIfNotExist(
|
||||
cfg.CertificatesDir,
|
||||
certsDir,
|
||||
kubeadmconstants.ServiceAccountKeyBaseName,
|
||||
saSigningKey,
|
||||
)
|
||||
|
@ -329,7 +329,7 @@ type certKeyLocation struct {
|
|||
|
||||
// SharedCertificateExists verifies if the shared certificates - the certificates that must be
|
||||
// equal across masters: ca.key, ca.crt, sa.key, sa.pub + etcd/ca.key, etcd/ca.crt if local/stacked etcd
|
||||
func SharedCertificateExists(cfg *kubeadmapi.InitConfiguration) (bool, error) {
|
||||
func SharedCertificateExists(cfg *kubeadmapi.ClusterConfiguration) (bool, error) {
|
||||
|
||||
if err := validateCACertAndKey(certKeyLocation{cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName, "", "CA"}); err != nil {
|
||||
return false, err
|
||||
|
@ -356,7 +356,7 @@ func SharedCertificateExists(cfg *kubeadmapi.InitConfiguration) (bool, error) {
|
|||
// UsingExternalCA determines whether the user is relying on an external CA. We currently implicitly determine this is the case
|
||||
// when both the CA Cert and the front proxy CA Cert are present but the CA Key and front proxy CA Key are not.
|
||||
// This allows us to, e.g., skip generating certs or not start the csr signing controller.
|
||||
func UsingExternalCA(cfg *kubeadmapi.InitConfiguration) (bool, error) {
|
||||
func UsingExternalCA(cfg *kubeadmapi.ClusterConfiguration) (bool, error) {
|
||||
|
||||
if err := validateCACert(certKeyLocation{cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName, "", "CA"}); err != nil {
|
||||
return false, err
|
||||
|
|
|
@ -463,10 +463,8 @@ func TestSharedCertificateExists(t *testing.T) {
|
|||
os.MkdirAll(tmpdir+"/etcd", os.ModePerm)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
cfg := &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
CertificatesDir: tmpdir,
|
||||
},
|
||||
cfg := &kubeadmapi.ClusterConfiguration{
|
||||
CertificatesDir: tmpdir,
|
||||
}
|
||||
|
||||
// created expected keys
|
||||
|
@ -554,7 +552,7 @@ func TestUsingExternalCA(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
if val, _ := UsingExternalCA(cfg); val != test.expected {
|
||||
if val, _ := UsingExternalCA(&cfg.ClusterConfiguration); val != test.expected {
|
||||
t.Errorf("UsingExternalCA did not match expected: %v", test.expected)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ go_library(
|
|||
"//cmd/kubeadm/app/util/staticpod:go_default_library",
|
||||
"//pkg/kubeapiserver/authorizer/modes:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/klog"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
|
@ -40,12 +41,12 @@ import (
|
|||
// CreateInitStaticPodManifestFiles will write all static pod manifest files needed to bring up the control plane.
|
||||
func CreateInitStaticPodManifestFiles(manifestDir string, cfg *kubeadmapi.InitConfiguration) error {
|
||||
klog.V(1).Infoln("[control-plane] creating static Pod files")
|
||||
return CreateStaticPodFiles(manifestDir, cfg, kubeadmconstants.KubeAPIServer, kubeadmconstants.KubeControllerManager, kubeadmconstants.KubeScheduler)
|
||||
return CreateStaticPodFiles(manifestDir, &cfg.ClusterConfiguration, &cfg.LocalAPIEndpoint, kubeadmconstants.KubeAPIServer, kubeadmconstants.KubeControllerManager, kubeadmconstants.KubeScheduler)
|
||||
}
|
||||
|
||||
// GetStaticPodSpecs returns all staticPodSpecs actualized to the context of the current InitConfiguration
|
||||
// GetStaticPodSpecs returns all staticPodSpecs actualized to the context of the current configuration
|
||||
// NB. this methods holds the information about how kubeadm creates static pod manifests.
|
||||
func GetStaticPodSpecs(cfg *kubeadmapi.InitConfiguration, k8sVersion *version.Version) map[string]v1.Pod {
|
||||
func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmapi.APIEndpoint, k8sVersion *version.Version) map[string]v1.Pod {
|
||||
// Get the required hostpath mounts
|
||||
mounts := getHostPathVolumesForTheControlPlane(cfg)
|
||||
|
||||
|
@ -53,31 +54,31 @@ func GetStaticPodSpecs(cfg *kubeadmapi.InitConfiguration, k8sVersion *version.Ve
|
|||
staticPodSpecs := map[string]v1.Pod{
|
||||
kubeadmconstants.KubeAPIServer: staticpodutil.ComponentPod(v1.Container{
|
||||
Name: kubeadmconstants.KubeAPIServer,
|
||||
Image: images.GetKubernetesImage(kubeadmconstants.KubeAPIServer, &cfg.ClusterConfiguration),
|
||||
Image: images.GetKubernetesImage(kubeadmconstants.KubeAPIServer, cfg),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: getAPIServerCommand(cfg),
|
||||
Command: getAPIServerCommand(cfg, endpoint),
|
||||
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeAPIServer)),
|
||||
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeAPIServer, int(cfg.LocalAPIEndpoint.BindPort), "/healthz", v1.URISchemeHTTPS),
|
||||
LivenessProbe: livenessProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), int(endpoint.BindPort), v1.URISchemeHTTPS),
|
||||
Resources: staticpodutil.ComponentResources("250m"),
|
||||
Env: getProxyEnvVars(),
|
||||
}, mounts.GetVolumes(kubeadmconstants.KubeAPIServer)),
|
||||
kubeadmconstants.KubeControllerManager: staticpodutil.ComponentPod(v1.Container{
|
||||
Name: kubeadmconstants.KubeControllerManager,
|
||||
Image: images.GetKubernetesImage(kubeadmconstants.KubeControllerManager, &cfg.ClusterConfiguration),
|
||||
Image: images.GetKubernetesImage(kubeadmconstants.KubeControllerManager, cfg),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: getControllerManagerCommand(cfg, k8sVersion),
|
||||
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeControllerManager)),
|
||||
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeControllerManager, 10252, "/healthz", v1.URISchemeHTTP),
|
||||
LivenessProbe: livenessProbe(staticpodutil.GetControllerManagerProbeAddress(cfg), 10252, v1.URISchemeHTTP),
|
||||
Resources: staticpodutil.ComponentResources("200m"),
|
||||
Env: getProxyEnvVars(),
|
||||
}, mounts.GetVolumes(kubeadmconstants.KubeControllerManager)),
|
||||
kubeadmconstants.KubeScheduler: staticpodutil.ComponentPod(v1.Container{
|
||||
Name: kubeadmconstants.KubeScheduler,
|
||||
Image: images.GetKubernetesImage(kubeadmconstants.KubeScheduler, &cfg.ClusterConfiguration),
|
||||
Image: images.GetKubernetesImage(kubeadmconstants.KubeScheduler, cfg),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: getSchedulerCommand(cfg),
|
||||
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeScheduler)),
|
||||
LivenessProbe: staticpodutil.ComponentProbe(cfg, kubeadmconstants.KubeScheduler, 10251, "/healthz", v1.URISchemeHTTP),
|
||||
LivenessProbe: livenessProbe(staticpodutil.GetSchedulerProbeAddress(cfg), 10251, v1.URISchemeHTTP),
|
||||
Resources: staticpodutil.ComponentResources("100m"),
|
||||
Env: getProxyEnvVars(),
|
||||
}, mounts.GetVolumes(kubeadmconstants.KubeScheduler)),
|
||||
|
@ -85,17 +86,33 @@ func GetStaticPodSpecs(cfg *kubeadmapi.InitConfiguration, k8sVersion *version.Ve
|
|||
return staticPodSpecs
|
||||
}
|
||||
|
||||
func livenessProbe(host string, port int, scheme v1.URIScheme) *v1.Probe {
|
||||
return &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Host: host,
|
||||
Path: "/healthz",
|
||||
Port: intstr.FromInt(port),
|
||||
Scheme: scheme,
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
TimeoutSeconds: 15,
|
||||
FailureThreshold: 8,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateStaticPodFiles creates all the requested static pod files.
|
||||
func CreateStaticPodFiles(manifestDir string, cfg *kubeadmapi.InitConfiguration, componentNames ...string) error {
|
||||
func CreateStaticPodFiles(manifestDir string, cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmapi.APIEndpoint, componentNames ...string) error {
|
||||
// TODO: Move the "pkg/util/version".Version object into the internal API instead of always parsing the string
|
||||
k8sVersion, err := version.ParseSemantic(cfg.KubernetesVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// gets the StaticPodSpecs, actualized for the current InitConfiguration
|
||||
// gets the StaticPodSpecs, actualized for the current ClusterConfiguration
|
||||
klog.V(1).Infoln("[control-plane] getting StaticPodSpecs")
|
||||
specs := GetStaticPodSpecs(cfg, k8sVersion)
|
||||
specs := GetStaticPodSpecs(cfg, endpoint, k8sVersion)
|
||||
|
||||
// creates required static pod specs
|
||||
for _, componentName := range componentNames {
|
||||
|
@ -117,9 +134,9 @@ func CreateStaticPodFiles(manifestDir string, cfg *kubeadmapi.InitConfiguration,
|
|||
}
|
||||
|
||||
// getAPIServerCommand builds the right API server command from the given config object and version
|
||||
func getAPIServerCommand(cfg *kubeadmapi.InitConfiguration) []string {
|
||||
func getAPIServerCommand(cfg *kubeadmapi.ClusterConfiguration, localAPIEndpoint *kubeadmapi.APIEndpoint) []string {
|
||||
defaultArguments := map[string]string{
|
||||
"advertise-address": cfg.LocalAPIEndpoint.AdvertiseAddress,
|
||||
"advertise-address": localAPIEndpoint.AdvertiseAddress,
|
||||
"enable-admission-plugins": "NodeRestriction",
|
||||
"service-cluster-ip-range": cfg.Networking.ServiceSubnet,
|
||||
"service-account-key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.ServiceAccountPublicKeyName),
|
||||
|
@ -129,7 +146,7 @@ func getAPIServerCommand(cfg *kubeadmapi.InitConfiguration) []string {
|
|||
"kubelet-client-certificate": filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerKubeletClientCertName),
|
||||
"kubelet-client-key": filepath.Join(cfg.CertificatesDir, kubeadmconstants.APIServerKubeletClientKeyName),
|
||||
"enable-bootstrap-token-auth": "true",
|
||||
"secure-port": fmt.Sprintf("%d", cfg.LocalAPIEndpoint.BindPort),
|
||||
"secure-port": fmt.Sprintf("%d", localAPIEndpoint.BindPort),
|
||||
"allow-privileged": "true",
|
||||
"kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
|
||||
// add options to configure the front proxy. Without the generated client cert, this will never be useable
|
||||
|
@ -243,7 +260,7 @@ func calcNodeCidrSize(podSubnet string) string {
|
|||
}
|
||||
|
||||
// getControllerManagerCommand builds the right controller manager command from the given config object and version
|
||||
func getControllerManagerCommand(cfg *kubeadmapi.InitConfiguration, k8sVersion *version.Version) []string {
|
||||
func getControllerManagerCommand(cfg *kubeadmapi.ClusterConfiguration, k8sVersion *version.Version) []string {
|
||||
|
||||
kubeconfigFile := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ControllerManagerKubeConfigFileName)
|
||||
caFile := filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName)
|
||||
|
@ -287,7 +304,7 @@ func getControllerManagerCommand(cfg *kubeadmapi.InitConfiguration, k8sVersion *
|
|||
}
|
||||
|
||||
// getSchedulerCommand builds the right scheduler command from the given config object and version
|
||||
func getSchedulerCommand(cfg *kubeadmapi.InitConfiguration) []string {
|
||||
func getSchedulerCommand(cfg *kubeadmapi.ClusterConfiguration) []string {
|
||||
defaultArguments := map[string]string{
|
||||
"bind-address": "127.0.0.1",
|
||||
"leader-elect": "true",
|
||||
|
|
|
@ -44,11 +44,9 @@ var cpVersion = kubeadmconstants.MinimumControlPlaneVersion.WithPreRelease("beta
|
|||
|
||||
func TestGetStaticPodSpecs(t *testing.T) {
|
||||
|
||||
// Creates a Master Configuration
|
||||
cfg := &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
KubernetesVersion: "v1.9.0",
|
||||
},
|
||||
// Creates a Cluster Configuration
|
||||
cfg := &kubeadmapi.ClusterConfiguration{
|
||||
KubernetesVersion: "v1.9.0",
|
||||
}
|
||||
|
||||
// Executes GetStaticPodSpecs
|
||||
|
@ -56,7 +54,7 @@ func TestGetStaticPodSpecs(t *testing.T) {
|
|||
// TODO: Move the "pkg/util/version".Version object into the internal API instead of always parsing the string
|
||||
k8sVersion, _ := version.ParseSemantic(cfg.KubernetesVersion)
|
||||
|
||||
specs := GetStaticPodSpecs(cfg, k8sVersion)
|
||||
specs := GetStaticPodSpecs(cfg, &kubeadmapi.APIEndpoint{}, k8sVersion)
|
||||
|
||||
var assertions = []struct {
|
||||
staticPodName string
|
||||
|
@ -113,16 +111,14 @@ func TestCreateStaticPodFilesAndWrappers(t *testing.T) {
|
|||
tmpdir := testutil.SetupTempDir(t)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
// Creates a Master Configuration
|
||||
cfg := &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
KubernetesVersion: "v1.9.0",
|
||||
},
|
||||
// Creates a Cluster Configuration
|
||||
cfg := &kubeadmapi.ClusterConfiguration{
|
||||
KubernetesVersion: "v1.9.0",
|
||||
}
|
||||
|
||||
// Execute createStaticPodFunction
|
||||
manifestPath := filepath.Join(tmpdir, kubeadmconstants.ManifestsSubDirName)
|
||||
err := CreateStaticPodFiles(manifestPath, cfg, test.components...)
|
||||
err := CreateStaticPodFiles(manifestPath, cfg, &kubeadmapi.APIEndpoint{}, test.components...)
|
||||
if err != nil {
|
||||
t.Errorf("Error executing createStaticPodFunction: %v", err)
|
||||
continue
|
||||
|
@ -140,18 +136,17 @@ func TestCreateStaticPodFilesAndWrappers(t *testing.T) {
|
|||
func TestGetAPIServerCommand(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
cfg *kubeadmapi.InitConfiguration
|
||||
cfg *kubeadmapi.ClusterConfiguration
|
||||
endpoint *kubeadmapi.APIEndpoint
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "testing defaults",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
CertificatesDir: testCertsDir,
|
||||
},
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
CertificatesDir: testCertsDir,
|
||||
},
|
||||
endpoint: &kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--enable-admission-plugins=NodeRestriction",
|
||||
|
@ -183,13 +178,11 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "ipv6 advertise address",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "2001:db8::1"},
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
CertificatesDir: testCertsDir,
|
||||
},
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
CertificatesDir: testCertsDir,
|
||||
},
|
||||
endpoint: &kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "2001:db8::1"},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--enable-admission-plugins=NodeRestriction",
|
||||
|
@ -221,21 +214,19 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "an external etcd with custom ca, certs and keys",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "2001:db8::1"},
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
External: &kubeadmapi.ExternalEtcd{
|
||||
Endpoints: []string{"https://8.6.4.1:2379", "https://8.6.4.2:2379"},
|
||||
CAFile: "fuz",
|
||||
CertFile: "fiz",
|
||||
KeyFile: "faz",
|
||||
},
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
External: &kubeadmapi.ExternalEtcd{
|
||||
Endpoints: []string{"https://8.6.4.1:2379", "https://8.6.4.2:2379"},
|
||||
CAFile: "fuz",
|
||||
CertFile: "fiz",
|
||||
KeyFile: "faz",
|
||||
},
|
||||
CertificatesDir: testCertsDir,
|
||||
},
|
||||
CertificatesDir: testCertsDir,
|
||||
},
|
||||
endpoint: &kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "2001:db8::1"},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--enable-admission-plugins=NodeRestriction",
|
||||
|
@ -267,18 +258,16 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "an insecure etcd",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "2001:db8::1"},
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
External: &kubeadmapi.ExternalEtcd{
|
||||
Endpoints: []string{"http://127.0.0.1:2379", "http://127.0.0.1:2380"},
|
||||
},
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
External: &kubeadmapi.ExternalEtcd{
|
||||
Endpoints: []string{"http://127.0.0.1:2379", "http://127.0.0.1:2380"},
|
||||
},
|
||||
CertificatesDir: testCertsDir,
|
||||
},
|
||||
CertificatesDir: testCertsDir,
|
||||
},
|
||||
endpoint: &kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "2001:db8::1"},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--enable-admission-plugins=NodeRestriction",
|
||||
|
@ -307,23 +296,21 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "test APIServer.ExtraArgs works as expected",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
CertificatesDir: testCertsDir,
|
||||
APIServer: kubeadmapi.APIServer{
|
||||
ControlPlaneComponent: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{
|
||||
"service-cluster-ip-range": "baz",
|
||||
"advertise-address": "9.9.9.9",
|
||||
"audit-policy-file": "/etc/config/audit.yaml",
|
||||
"audit-log-path": "/var/log/kubernetes",
|
||||
},
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
CertificatesDir: testCertsDir,
|
||||
APIServer: kubeadmapi.APIServer{
|
||||
ControlPlaneComponent: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{
|
||||
"service-cluster-ip-range": "baz",
|
||||
"advertise-address": "9.9.9.9",
|
||||
"audit-policy-file": "/etc/config/audit.yaml",
|
||||
"audit-log-path": "/var/log/kubernetes",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
endpoint: &kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--enable-admission-plugins=NodeRestriction",
|
||||
|
@ -357,20 +344,18 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "authorization-mode extra-args ABAC",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
CertificatesDir: testCertsDir,
|
||||
APIServer: kubeadmapi.APIServer{
|
||||
ControlPlaneComponent: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{
|
||||
"authorization-mode": authzmodes.ModeABAC,
|
||||
},
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
CertificatesDir: testCertsDir,
|
||||
APIServer: kubeadmapi.APIServer{
|
||||
ControlPlaneComponent: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{
|
||||
"authorization-mode": authzmodes.ModeABAC,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
endpoint: &kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--enable-admission-plugins=NodeRestriction",
|
||||
|
@ -402,20 +387,18 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "secure-port extra-args",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
CertificatesDir: testCertsDir,
|
||||
APIServer: kubeadmapi.APIServer{
|
||||
ControlPlaneComponent: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{
|
||||
"secure-port": "123",
|
||||
},
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
CertificatesDir: testCertsDir,
|
||||
APIServer: kubeadmapi.APIServer{
|
||||
ControlPlaneComponent: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{
|
||||
"secure-port": "123",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
endpoint: &kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--enable-admission-plugins=NodeRestriction",
|
||||
|
@ -447,20 +430,18 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "authorization-mode extra-args Webhook",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
CertificatesDir: testCertsDir,
|
||||
APIServer: kubeadmapi.APIServer{
|
||||
ControlPlaneComponent: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{
|
||||
"authorization-mode": authzmodes.ModeWebhook,
|
||||
},
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Networking: kubeadmapi.Networking{ServiceSubnet: "bar"},
|
||||
CertificatesDir: testCertsDir,
|
||||
APIServer: kubeadmapi.APIServer{
|
||||
ControlPlaneComponent: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{
|
||||
"authorization-mode": authzmodes.ModeWebhook,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
endpoint: &kubeadmapi.APIEndpoint{BindPort: 123, AdvertiseAddress: "1.2.3.4"},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--enable-admission-plugins=NodeRestriction",
|
||||
|
@ -494,7 +475,7 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
actual := getAPIServerCommand(rt.cfg)
|
||||
actual := getAPIServerCommand(rt.cfg, rt.endpoint)
|
||||
sort.Strings(actual)
|
||||
sort.Strings(rt.expected)
|
||||
if !reflect.DeepEqual(actual, rt.expected) {
|
||||
|
@ -638,11 +619,7 @@ func TestGetControllerManagerCommand(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
// TODO: Make getControllerManagerCommand accept a ClusterConfiguration object instead of InitConfiguration
|
||||
initcfg := &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: *rt.cfg,
|
||||
}
|
||||
actual := getControllerManagerCommand(initcfg, version.MustParseSemantic(rt.cfg.KubernetesVersion))
|
||||
actual := getControllerManagerCommand(rt.cfg, version.MustParseSemantic(rt.cfg.KubernetesVersion))
|
||||
sort.Strings(actual)
|
||||
sort.Strings(rt.expected)
|
||||
if !reflect.DeepEqual(actual, rt.expected) {
|
||||
|
@ -815,7 +792,7 @@ func TestGetControllerManagerCommandExternalCA(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
actual := getControllerManagerCommand(test.cfg, version.MustParseSemantic(test.cfg.KubernetesVersion))
|
||||
actual := getControllerManagerCommand(&test.cfg.ClusterConfiguration, version.MustParseSemantic(test.cfg.KubernetesVersion))
|
||||
expected := test.expectedArgFunc(tmpdir)
|
||||
sort.Strings(actual)
|
||||
sort.Strings(expected)
|
||||
|
@ -844,11 +821,7 @@ func TestGetSchedulerCommand(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
// TODO: Make getSchedulerCommand accept a ClusterConfiguration object instead of InitConfiguration
|
||||
initcfg := &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: *rt.cfg,
|
||||
}
|
||||
actual := getSchedulerCommand(initcfg)
|
||||
actual := getSchedulerCommand(rt.cfg)
|
||||
sort.Strings(actual)
|
||||
sort.Strings(rt.expected)
|
||||
if !reflect.DeepEqual(actual, rt.expected) {
|
||||
|
|
|
@ -42,7 +42,7 @@ const (
|
|||
var caCertsExtraVolumePaths = []string{"/etc/pki", "/usr/share/ca-certificates", "/usr/local/share/ca-certificates", "/etc/ca-certificates"}
|
||||
|
||||
// getHostPathVolumesForTheControlPlane gets the required hostPath volumes and mounts for the control plane
|
||||
func getHostPathVolumesForTheControlPlane(cfg *kubeadmapi.InitConfiguration) controlPlaneHostPathMounts {
|
||||
func getHostPathVolumesForTheControlPlane(cfg *kubeadmapi.ClusterConfiguration) controlPlaneHostPathMounts {
|
||||
hostPathDirectoryOrCreate := v1.HostPathDirectoryOrCreate
|
||||
hostPathFileOrCreate := v1.HostPathFileOrCreate
|
||||
mounts := newControlPlaneHostPathMounts()
|
||||
|
|
|
@ -514,11 +514,7 @@ func TestGetHostPathVolumesForTheControlPlane(t *testing.T) {
|
|||
defer func() { caCertsExtraVolumePaths = []string{"/etc/pki", "/usr/share/ca-certificates"} }()
|
||||
|
||||
for _, rt := range tests {
|
||||
// TODO: Make getHostPathVolumesForTheControlPlane accept a ClusterConfiguration object instead of InitConfiguration
|
||||
initcfg := &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: *rt.cfg,
|
||||
}
|
||||
mounts := getHostPathVolumesForTheControlPlane(initcfg)
|
||||
mounts := getHostPathVolumesForTheControlPlane(rt.cfg)
|
||||
|
||||
// Avoid unit test errors when the flexvolume is mounted
|
||||
if _, ok := mounts.volumes[kubeadmconstants.KubeControllerManager][flexvolumeDirVolumeName]; ok {
|
||||
|
|
|
@ -46,8 +46,8 @@ const (
|
|||
// CreateLocalEtcdStaticPodManifestFile will write local etcd static pod manifest file.
|
||||
// This function is used by init - when the etcd cluster is empty - or by kubeadm
|
||||
// upgrade - when the etcd cluster is already up and running (and the --initial-cluster flag have no impact)
|
||||
func CreateLocalEtcdStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.InitConfiguration) error {
|
||||
if cfg.ClusterConfiguration.Etcd.External != nil {
|
||||
func CreateLocalEtcdStaticPodManifestFile(manifestDir string, nodeName string, cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmapi.APIEndpoint) error {
|
||||
if cfg.Etcd.External != nil {
|
||||
return errors.New("etcd static pod manifest cannot be generated for cluster using external etcd")
|
||||
}
|
||||
// gets etcd StaticPodSpec
|
||||
|
@ -58,7 +58,7 @@ func CreateLocalEtcdStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.In
|
|||
return errors.Wrapf(err, "failed to create etcd directory %q", cfg.Etcd.Local.DataDir)
|
||||
}
|
||||
|
||||
spec := GetEtcdPodSpec(cfg, emptyInitialCluster)
|
||||
spec := GetEtcdPodSpec(cfg, endpoint, nodeName, emptyInitialCluster)
|
||||
// writes etcd StaticPod to disk
|
||||
if err := staticpodutil.WriteStaticPodToDisk(kubeadmconstants.Etcd, manifestDir, spec); err != nil {
|
||||
return err
|
||||
|
@ -69,7 +69,7 @@ func CreateLocalEtcdStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.In
|
|||
}
|
||||
|
||||
// CheckLocalEtcdClusterStatus verifies health state of local/stacked etcd cluster before installing a new etcd member
|
||||
func CheckLocalEtcdClusterStatus(client clientset.Interface, cfg *kubeadmapi.InitConfiguration) error {
|
||||
func CheckLocalEtcdClusterStatus(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration) error {
|
||||
fmt.Println("[etcd] Checking etcd cluster health")
|
||||
|
||||
// creates an etcd client that connects to all the local/stacked etcd members
|
||||
|
@ -91,7 +91,7 @@ func CheckLocalEtcdClusterStatus(client clientset.Interface, cfg *kubeadmapi.Ini
|
|||
// CreateStackedEtcdStaticPodManifestFile will write local etcd static pod manifest file
|
||||
// for an additional etcd member that is joining an existing local/stacked etcd cluster.
|
||||
// Other members of the etcd cluster will be notified of the joining node in beforehand as well.
|
||||
func CreateStackedEtcdStaticPodManifestFile(client clientset.Interface, manifestDir string, cfg *kubeadmapi.InitConfiguration) error {
|
||||
func CreateStackedEtcdStaticPodManifestFile(client clientset.Interface, manifestDir string, nodeName string, cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmapi.APIEndpoint) error {
|
||||
// creates an etcd client that connects to all the local/stacked etcd members
|
||||
klog.V(1).Info("creating etcd client that connects to etcd pods")
|
||||
etcdClient, err := etcdutil.NewFromCluster(client, cfg.CertificatesDir)
|
||||
|
@ -100,10 +100,10 @@ func CreateStackedEtcdStaticPodManifestFile(client clientset.Interface, manifest
|
|||
}
|
||||
|
||||
// notifies the other members of the etcd cluster about the joining member
|
||||
etcdPeerAddress := etcdutil.GetPeerURL(cfg)
|
||||
etcdPeerAddress := etcdutil.GetPeerURL(endpoint)
|
||||
|
||||
klog.V(1).Infof("Adding etcd member: %s", etcdPeerAddress)
|
||||
initialCluster, err := etcdClient.AddMember(cfg.NodeRegistration.Name, etcdPeerAddress)
|
||||
initialCluster, err := etcdClient.AddMember(nodeName, etcdPeerAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ func CreateStackedEtcdStaticPodManifestFile(client clientset.Interface, manifest
|
|||
|
||||
klog.V(1).Info("Creating local etcd static pod manifest file")
|
||||
// gets etcd StaticPodSpec, actualized for the current InitConfiguration and the new list of etcd members
|
||||
spec := GetEtcdPodSpec(cfg, initialCluster)
|
||||
spec := GetEtcdPodSpec(cfg, endpoint, nodeName, initialCluster)
|
||||
// writes etcd StaticPod to disk
|
||||
if err := staticpodutil.WriteStaticPodToDisk(kubeadmconstants.Etcd, manifestDir, spec); err != nil {
|
||||
return err
|
||||
|
@ -133,9 +133,9 @@ func CreateStackedEtcdStaticPodManifestFile(client clientset.Interface, manifest
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetEtcdPodSpec returns the etcd static Pod actualized to the context of the current InitConfiguration
|
||||
// GetEtcdPodSpec returns the etcd static Pod actualized to the context of the current configuration
|
||||
// NB. GetEtcdPodSpec methods holds the information about how kubeadm creates etcd static pod manifests.
|
||||
func GetEtcdPodSpec(cfg *kubeadmapi.InitConfiguration, initialCluster []etcdutil.Member) v1.Pod {
|
||||
func GetEtcdPodSpec(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmapi.APIEndpoint, nodeName string, initialCluster []etcdutil.Member) v1.Pod {
|
||||
pathType := v1.HostPathDirectoryOrCreate
|
||||
etcdMounts := map[string]v1.Volume{
|
||||
etcdVolumeName: staticpodutil.NewVolume(etcdVolumeName, cfg.Etcd.Local.DataDir, &pathType),
|
||||
|
@ -143,8 +143,8 @@ func GetEtcdPodSpec(cfg *kubeadmapi.InitConfiguration, initialCluster []etcdutil
|
|||
}
|
||||
return staticpodutil.ComponentPod(v1.Container{
|
||||
Name: kubeadmconstants.Etcd,
|
||||
Command: getEtcdCommand(cfg, initialCluster),
|
||||
Image: images.GetEtcdImage(&cfg.ClusterConfiguration),
|
||||
Command: getEtcdCommand(cfg, endpoint, nodeName, initialCluster),
|
||||
Image: images.GetEtcdImage(cfg),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
// Mount the etcd datadir path read-write so etcd can store data in a more persistent manner
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
|
@ -152,20 +152,20 @@ func GetEtcdPodSpec(cfg *kubeadmapi.InitConfiguration, initialCluster []etcdutil
|
|||
staticpodutil.NewVolumeMount(certsVolumeName, cfg.CertificatesDir+"/etcd", false),
|
||||
},
|
||||
LivenessProbe: staticpodutil.EtcdProbe(
|
||||
cfg, kubeadmconstants.Etcd, kubeadmconstants.EtcdListenClientPort, cfg.CertificatesDir,
|
||||
&cfg.Etcd, kubeadmconstants.EtcdListenClientPort, cfg.CertificatesDir,
|
||||
kubeadmconstants.EtcdCACertName, kubeadmconstants.EtcdHealthcheckClientCertName, kubeadmconstants.EtcdHealthcheckClientKeyName,
|
||||
),
|
||||
}, etcdMounts)
|
||||
}
|
||||
|
||||
// getEtcdCommand builds the right etcd command from the given config object
|
||||
func getEtcdCommand(cfg *kubeadmapi.InitConfiguration, initialCluster []etcdutil.Member) []string {
|
||||
func getEtcdCommand(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmapi.APIEndpoint, nodeName string, initialCluster []etcdutil.Member) []string {
|
||||
defaultArguments := map[string]string{
|
||||
"name": cfg.GetNodeName(),
|
||||
"listen-client-urls": fmt.Sprintf("%s,%s", etcdutil.GetClientURLByIP("127.0.0.1"), etcdutil.GetClientURL(cfg)),
|
||||
"advertise-client-urls": etcdutil.GetClientURL(cfg),
|
||||
"listen-peer-urls": etcdutil.GetPeerURL(cfg),
|
||||
"initial-advertise-peer-urls": etcdutil.GetPeerURL(cfg),
|
||||
"name": nodeName,
|
||||
"listen-client-urls": fmt.Sprintf("%s,%s", etcdutil.GetClientURLByIP("127.0.0.1"), etcdutil.GetClientURL(endpoint)),
|
||||
"advertise-client-urls": etcdutil.GetClientURL(endpoint),
|
||||
"listen-peer-urls": etcdutil.GetPeerURL(endpoint),
|
||||
"initial-advertise-peer-urls": etcdutil.GetPeerURL(endpoint),
|
||||
"data-dir": cfg.Etcd.Local.DataDir,
|
||||
"cert-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdServerCertName),
|
||||
"key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.EtcdServerKeyName),
|
||||
|
@ -179,7 +179,7 @@ func getEtcdCommand(cfg *kubeadmapi.InitConfiguration, initialCluster []etcdutil
|
|||
}
|
||||
|
||||
if len(initialCluster) == 0 {
|
||||
defaultArguments["initial-cluster"] = fmt.Sprintf("%s=%s", cfg.GetNodeName(), etcdutil.GetPeerURL(cfg))
|
||||
defaultArguments["initial-cluster"] = fmt.Sprintf("%s=%s", nodeName, etcdutil.GetPeerURL(endpoint))
|
||||
} else {
|
||||
// NB. the joining etcd member should be part of the initialCluster list
|
||||
endpoints := []string{}
|
||||
|
|
|
@ -32,19 +32,18 @@ import (
|
|||
|
||||
func TestGetEtcdPodSpec(t *testing.T) {
|
||||
// Creates a Master Configuration
|
||||
cfg := &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
KubernetesVersion: "v1.7.0",
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
DataDir: "/var/lib/etcd",
|
||||
},
|
||||
cfg := &kubeadmapi.ClusterConfiguration{
|
||||
KubernetesVersion: "v1.7.0",
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
DataDir: "/var/lib/etcd",
|
||||
},
|
||||
},
|
||||
}
|
||||
endpoint := &kubeadmapi.APIEndpoint{}
|
||||
|
||||
// Executes GetEtcdPodSpec
|
||||
spec := GetEtcdPodSpec(cfg, []etcdutil.Member{})
|
||||
spec := GetEtcdPodSpec(cfg, endpoint, "", []etcdutil.Member{})
|
||||
|
||||
// Assert each specs refers to the right pod
|
||||
if spec.Spec.Containers[0].Name != kubeadmconstants.Etcd {
|
||||
|
@ -58,35 +57,31 @@ func TestCreateLocalEtcdStaticPodManifestFile(t *testing.T) {
|
|||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
var tests = []struct {
|
||||
cfg *kubeadmapi.InitConfiguration
|
||||
cfg *kubeadmapi.ClusterConfiguration
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
KubernetesVersion: "v1.7.0",
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
DataDir: tmpdir + "/etcd",
|
||||
},
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
KubernetesVersion: "v1.7.0",
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
DataDir: tmpdir + "/etcd",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
KubernetesVersion: "v1.7.0",
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
External: &kubeadmapi.ExternalEtcd{
|
||||
Endpoints: []string{
|
||||
"https://etcd-instance:2379",
|
||||
},
|
||||
CAFile: "/etc/kubernetes/pki/etcd/ca.crt",
|
||||
CertFile: "/etc/kubernetes/pki/etcd/apiserver-etcd-client.crt",
|
||||
KeyFile: "/etc/kubernetes/pki/etcd/apiserver-etcd-client.key",
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
KubernetesVersion: "v1.7.0",
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
External: &kubeadmapi.ExternalEtcd{
|
||||
Endpoints: []string{
|
||||
"https://etcd-instance:2379",
|
||||
},
|
||||
CAFile: "/etc/kubernetes/pki/etcd/ca.crt",
|
||||
CertFile: "/etc/kubernetes/pki/etcd/apiserver-etcd-client.crt",
|
||||
KeyFile: "/etc/kubernetes/pki/etcd/apiserver-etcd-client.key",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -97,7 +92,7 @@ func TestCreateLocalEtcdStaticPodManifestFile(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
// Execute createStaticPodFunction
|
||||
manifestPath := filepath.Join(tmpdir, kubeadmconstants.ManifestsSubDirName)
|
||||
err := CreateLocalEtcdStaticPodManifestFile(manifestPath, test.cfg)
|
||||
err := CreateLocalEtcdStaticPodManifestFile(manifestPath, "", test.cfg, &kubeadmapi.APIEndpoint{})
|
||||
|
||||
if !test.expectedError {
|
||||
if err != nil {
|
||||
|
@ -230,23 +225,18 @@ func TestGetEtcdCommand(t *testing.T) {
|
|||
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
cfg := &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{
|
||||
AdvertiseAddress: rt.advertiseAddress,
|
||||
},
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{
|
||||
Name: rt.nodeName,
|
||||
},
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
DataDir: "/var/lib/etcd",
|
||||
ExtraArgs: rt.extraArgs,
|
||||
},
|
||||
endpoint := &kubeadmapi.APIEndpoint{
|
||||
AdvertiseAddress: rt.advertiseAddress,
|
||||
}
|
||||
cfg := &kubeadmapi.ClusterConfiguration{
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
DataDir: "/var/lib/etcd",
|
||||
ExtraArgs: rt.extraArgs,
|
||||
},
|
||||
},
|
||||
}
|
||||
actual := getEtcdCommand(cfg, rt.initialCluster)
|
||||
actual := getEtcdCommand(cfg, endpoint, rt.nodeName, rt.initialCluster)
|
||||
sort.Strings(actual)
|
||||
sort.Strings(rt.expected)
|
||||
if !reflect.DeepEqual(actual, rt.expected) {
|
||||
|
|
|
@ -135,7 +135,7 @@ func getKubeConfigSpecs(cfg *kubeadmapi.InitConfiguration) (map[string]*kubeConf
|
|||
return nil, errors.Wrap(err, "couldn't create a kubeconfig; the CA files couldn't be loaded")
|
||||
}
|
||||
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg.ControlPlaneEndpoint, &cfg.LocalAPIEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -285,7 +285,7 @@ func WriteKubeConfigWithClientCert(out io.Writer, cfg *kubeadmapi.InitConfigurat
|
|||
return errors.Wrap(err, "couldn't create a kubeconfig; the CA files couldn't be loaded")
|
||||
}
|
||||
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg.ControlPlaneEndpoint, &cfg.LocalAPIEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -312,7 +312,7 @@ func WriteKubeConfigWithToken(out io.Writer, cfg *kubeadmapi.InitConfiguration,
|
|||
return errors.Wrap(err, "couldn't create a kubeconfig; the CA files couldn't be loaded")
|
||||
}
|
||||
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg.ControlPlaneEndpoint, &cfg.LocalAPIEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -162,7 +162,7 @@ func TestGetKubeConfigSpecs(t *testing.T) {
|
|||
}
|
||||
|
||||
// Asserts InitConfiguration values injected into spec
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg)
|
||||
masterEndpoint, err := kubeadmutil.GetMasterEndpoint(cfg.ControlPlaneEndpoint, &cfg.LocalAPIEndpoint)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
|
@ -51,9 +50,9 @@ func WriteConfigToDisk(kubeletConfig *kubeletconfig.KubeletConfiguration, kubele
|
|||
|
||||
// CreateConfigMap creates a ConfigMap with the generic kubelet configuration.
|
||||
// Used at "kubeadm init" and "kubeadm upgrade" time
|
||||
func CreateConfigMap(cfg *kubeadmapi.InitConfiguration, client clientset.Interface) error {
|
||||
func CreateConfigMap(cfg *kubeletconfig.KubeletConfiguration, k8sVersionStr string, client clientset.Interface) error {
|
||||
|
||||
k8sVersion, err := version.ParseSemantic(cfg.KubernetesVersion)
|
||||
k8sVersion, err := version.ParseSemantic(k8sVersionStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -61,7 +60,7 @@ func CreateConfigMap(cfg *kubeadmapi.InitConfiguration, client clientset.Interfa
|
|||
configMapName := kubeadmconstants.GetKubeletConfigMapName(k8sVersion)
|
||||
fmt.Printf("[kubelet] Creating a ConfigMap %q in namespace %s with the configuration for the kubelets in the cluster\n", configMapName, metav1.NamespaceSystem)
|
||||
|
||||
kubeletBytes, err := getConfigBytes(cfg.ComponentConfigs.Kubelet)
|
||||
kubeletBytes, err := getConfigBytes(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
)
|
||||
|
@ -33,15 +32,8 @@ import (
|
|||
func TestCreateConfigMap(t *testing.T) {
|
||||
nodeName := "fake-node"
|
||||
client := fake.NewSimpleClientset()
|
||||
cfg := &kubeadmapi.InitConfiguration{
|
||||
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: nodeName},
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
KubernetesVersion: constants.CurrentKubernetesVersion.String(),
|
||||
ComponentConfigs: kubeadmapi.ComponentConfigs{
|
||||
Kubelet: &kubeletconfig.KubeletConfiguration{},
|
||||
},
|
||||
},
|
||||
}
|
||||
k8sVersionStr := constants.CurrentKubernetesVersion.String()
|
||||
cfg := &kubeletconfig.KubeletConfiguration{}
|
||||
|
||||
client.PrependReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.Node{
|
||||
|
@ -61,7 +53,7 @@ func TestCreateConfigMap(t *testing.T) {
|
|||
return true, nil, nil
|
||||
})
|
||||
|
||||
if err := CreateConfigMap(cfg, client); err != nil {
|
||||
if err := CreateConfigMap(cfg, k8sVersionStr, client); err != nil {
|
||||
t.Errorf("CreateConfigMap: unexpected error %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,29 +46,29 @@ type kubeletFlagsOpts struct {
|
|||
|
||||
// WriteKubeletDynamicEnvFile writes an environment file with dynamic flags to the kubelet.
|
||||
// Used at "kubeadm init" and "kubeadm join" time.
|
||||
func WriteKubeletDynamicEnvFile(cfg *kubeadmapi.InitConfiguration, registerTaintsUsingFlags bool, kubeletDir string) error {
|
||||
func WriteKubeletDynamicEnvFile(cfg *kubeadmapi.ClusterConfiguration, nodeReg *kubeadmapi.NodeRegistrationOptions, registerTaintsUsingFlags bool, kubeletDir string) error {
|
||||
hostName, err := nodeutil.GetHostname("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
flagOpts := kubeletFlagsOpts{
|
||||
nodeRegOpts: &cfg.NodeRegistration,
|
||||
nodeRegOpts: nodeReg,
|
||||
featureGates: cfg.FeatureGates,
|
||||
pauseImage: images.GetPauseImage(&cfg.ClusterConfiguration),
|
||||
pauseImage: images.GetPauseImage(cfg),
|
||||
registerTaintsUsingFlags: registerTaintsUsingFlags,
|
||||
execer: utilsexec.New(),
|
||||
pidOfFunc: procfs.PidOf,
|
||||
defaultHostname: hostName,
|
||||
}
|
||||
stringMap := buildKubeletArgMap(flagOpts)
|
||||
argList := kubeadmutil.BuildArgumentListFromMap(stringMap, cfg.NodeRegistration.KubeletExtraArgs)
|
||||
argList := kubeadmutil.BuildArgumentListFromMap(stringMap, nodeReg.KubeletExtraArgs)
|
||||
envFileContent := fmt.Sprintf("%s=%s\n", constants.KubeletEnvFileVariableName, strings.Join(argList, " "))
|
||||
|
||||
return writeKubeletFlagBytesToDisk([]byte(envFileContent), kubeletDir)
|
||||
}
|
||||
|
||||
// buildKubeletArgMap takes a InitConfiguration object and builds based on that a string-string map with flags
|
||||
// buildKubeletArgMap takes a kubeletFlagsOpts object and builds based on that a string-string map with flags
|
||||
// that should be given to the local kubelet daemon.
|
||||
func buildKubeletArgMap(opts kubeletFlagsOpts) map[string]string {
|
||||
kubeletFlags := map[string]string{}
|
||||
|
|
|
@ -61,7 +61,7 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitCon
|
|||
}
|
||||
|
||||
// Create the new, version-branched kubelet ComponentConfig ConfigMap
|
||||
if err := kubeletphase.CreateConfigMap(cfg, client); err != nil {
|
||||
if err := kubeletphase.CreateConfigMap(cfg.ClusterConfiguration.ComponentConfigs.Kubelet, cfg.KubernetesVersion, client); err != nil {
|
||||
errs = append(errs, errors.Wrap(err, "error creating kubelet configuration ConfigMap"))
|
||||
}
|
||||
|
||||
|
@ -108,21 +108,21 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitCon
|
|||
}
|
||||
|
||||
// Upgrade kube-dns/CoreDNS and kube-proxy
|
||||
if err := dns.EnsureDNSAddon(cfg, client); err != nil {
|
||||
if err := dns.EnsureDNSAddon(&cfg.ClusterConfiguration, client); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// Remove the old DNS deployment if a new DNS service is now used (kube-dns to CoreDNS or vice versa)
|
||||
if err := removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg, client, dryRun); err != nil {
|
||||
if err := removeOldDNSDeploymentIfAnotherDNSIsUsed(&cfg.ClusterConfiguration, client, dryRun); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if err := proxy.EnsureProxyAddon(cfg, client); err != nil {
|
||||
if err := proxy.EnsureProxyAddon(&cfg.ClusterConfiguration, &cfg.LocalAPIEndpoint, client); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return errorsutil.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.InitConfiguration, client clientset.Interface, dryRun bool) error {
|
||||
func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interface, dryRun bool) error {
|
||||
return apiclient.TryRunCommand(func() error {
|
||||
installedDeploymentName := kubeadmconstants.KubeDNSDeploymentName
|
||||
deploymentToDelete := kubeadmconstants.CoreDNSDeploymentName
|
||||
|
@ -210,7 +210,7 @@ func writeKubeletConfigFiles(client clientset.Interface, cfg *kubeadmapi.InitCon
|
|||
// Write env file with flags for the kubelet to use. We do not need to write the --register-with-taints for the master,
|
||||
// as we handle that ourselves in the markmaster phase
|
||||
// TODO: Maybe we want to do that some time in the future, in order to remove some logic from the markmaster phase?
|
||||
if err := kubeletphase.WriteKubeletDynamicEnvFile(cfg, false, kubeletDir); err != nil {
|
||||
if err := kubeletphase.WriteKubeletDynamicEnvFile(&cfg.ClusterConfiguration, &cfg.NodeRegistration, false, kubeletDir); err != nil {
|
||||
errs = append(errs, errors.Wrap(err, "error writing a dynamic environment file for the kubelet"))
|
||||
}
|
||||
|
||||
|
|
|
@ -267,7 +267,7 @@ func performEtcdStaticPodUpgrade(client clientset.Interface, waiter apiclient.Wa
|
|||
if err != nil {
|
||||
return true, errors.Wrap(err, "failed to retrieve the current etcd version")
|
||||
}
|
||||
currentEtcdVersionStr, ok := currentEtcdVersions[etcdutil.GetClientURL(cfg)]
|
||||
currentEtcdVersionStr, ok := currentEtcdVersions[etcdutil.GetClientURL(&cfg.LocalAPIEndpoint)]
|
||||
if !ok {
|
||||
return true, errors.Wrap(err, "failed to retrieve the current etcd version")
|
||||
}
|
||||
|
@ -293,7 +293,7 @@ func performEtcdStaticPodUpgrade(client clientset.Interface, waiter apiclient.Wa
|
|||
|
||||
// Write the updated etcd static Pod manifest into the temporary directory, at this point no etcd change
|
||||
// has occurred in any aspects.
|
||||
if err := etcdphase.CreateLocalEtcdStaticPodManifestFile(pathMgr.TempManifestDir(), cfg); err != nil {
|
||||
if err := etcdphase.CreateLocalEtcdStaticPodManifestFile(pathMgr.TempManifestDir(), cfg.NodeRegistration.Name, &cfg.ClusterConfiguration, &cfg.LocalAPIEndpoint); err != nil {
|
||||
return true, errors.Wrap(err, "error creating local etcd static pod manifest file")
|
||||
}
|
||||
|
||||
|
|
|
@ -453,7 +453,7 @@ func TestStaticPodControlPlane(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("couldn't run CreateInitStaticPodManifestFiles: %v", err)
|
||||
}
|
||||
err = etcdphase.CreateLocalEtcdStaticPodManifestFile(pathMgr.RealManifestDir(), oldcfg)
|
||||
err = etcdphase.CreateLocalEtcdStaticPodManifestFile(pathMgr.RealManifestDir(), oldcfg.NodeRegistration.Name, &oldcfg.ClusterConfiguration, &oldcfg.LocalAPIEndpoint)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't run CreateLocalEtcdStaticPodManifestFile: %v", err)
|
||||
}
|
||||
|
|
|
@ -973,11 +973,11 @@ func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.JoinConfigura
|
|||
}
|
||||
|
||||
// RunOptionalJoinNodeChecks executes all individual, applicable to node configuration dependant checks
|
||||
func RunOptionalJoinNodeChecks(execer utilsexec.Interface, initCfg *kubeadmapi.InitConfiguration, ignorePreflightErrors sets.String) error {
|
||||
func RunOptionalJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.ClusterConfiguration, ignorePreflightErrors sets.String) error {
|
||||
checks := []Checker{}
|
||||
|
||||
// Check ipvs required kernel module if we use ipvs kube-proxy mode
|
||||
if initCfg.ComponentConfigs.KubeProxy != nil && initCfg.ComponentConfigs.KubeProxy.Mode == ipvsutil.IPVSProxyMode {
|
||||
if cfg.ComponentConfigs.KubeProxy != nil && cfg.ComponentConfigs.KubeProxy.Mode == ipvsutil.IPVSProxyMode {
|
||||
checks = append(checks,
|
||||
ipvsutil.RequiredIPVSKernelModulesAvailableCheck{Executor: execer},
|
||||
)
|
||||
|
|
|
@ -29,35 +29,35 @@ import (
|
|||
)
|
||||
|
||||
// GetMasterEndpoint returns a properly formatted endpoint for the control plane built according following rules:
|
||||
// - If the ControlPlaneEndpoint is defined, use it.
|
||||
// - if the ControlPlaneEndpoint is defined but without a port number, use the ControlPlaneEndpoint + api.BindPort is used.
|
||||
// - Otherwise, in case the ControlPlaneEndpoint is not defined, use the api.AdvertiseAddress + the api.BindPort.
|
||||
func GetMasterEndpoint(cfg *kubeadmapi.InitConfiguration) (string, error) {
|
||||
// - If the controlPlaneEndpoint is defined, use it.
|
||||
// - if the controlPlaneEndpoint is defined but without a port number, use the controlPlaneEndpoint + localEndpoint.BindPort is used.
|
||||
// - Otherwise, in case the controlPlaneEndpoint is not defined, use the localEndpoint.AdvertiseAddress + the localEndpoint.BindPort.
|
||||
func GetMasterEndpoint(controlPlaneEndpoint string, localEndpoint *kubeadmapi.APIEndpoint) (string, error) {
|
||||
// parse the bind port
|
||||
bindPortString := strconv.Itoa(int(cfg.LocalAPIEndpoint.BindPort))
|
||||
bindPortString := strconv.Itoa(int(localEndpoint.BindPort))
|
||||
if _, err := ParsePort(bindPortString); err != nil {
|
||||
return "", errors.Wrapf(err, "invalid value %q given for api.bindPort", cfg.LocalAPIEndpoint.BindPort)
|
||||
return "", errors.Wrapf(err, "invalid value %q given for api.bindPort", localEndpoint.BindPort)
|
||||
}
|
||||
|
||||
// parse the AdvertiseAddress
|
||||
var ip = net.ParseIP(cfg.LocalAPIEndpoint.AdvertiseAddress)
|
||||
var ip = net.ParseIP(localEndpoint.AdvertiseAddress)
|
||||
if ip == nil {
|
||||
return "", errors.Errorf("invalid value `%s` given for api.advertiseAddress", cfg.LocalAPIEndpoint.AdvertiseAddress)
|
||||
return "", errors.Errorf("invalid value `%s` given for api.advertiseAddress", localEndpoint.AdvertiseAddress)
|
||||
}
|
||||
|
||||
// set the master url using cfg.API.AdvertiseAddress + the cfg.API.BindPort
|
||||
// set the master url using localEndpoint.AdvertiseAddress + the localEndpoint.BindPort
|
||||
masterURL := &url.URL{
|
||||
Scheme: "https",
|
||||
Host: net.JoinHostPort(ip.String(), bindPortString),
|
||||
}
|
||||
|
||||
// if the controlplane endpoint is defined
|
||||
if len(cfg.ControlPlaneEndpoint) > 0 {
|
||||
if len(controlPlaneEndpoint) > 0 {
|
||||
// parse the controlplane endpoint
|
||||
var host, port string
|
||||
var err error
|
||||
if host, port, err = ParseHostPort(cfg.ControlPlaneEndpoint); err != nil {
|
||||
return "", errors.Wrapf(err, "invalid value %q given for controlPlaneEndpoint", cfg.ControlPlaneEndpoint)
|
||||
if host, port, err = ParseHostPort(controlPlaneEndpoint); err != nil {
|
||||
return "", errors.Wrapf(err, "invalid value %q given for controlPlaneEndpoint", controlPlaneEndpoint)
|
||||
}
|
||||
|
||||
// if a port is provided within the controlPlaneAddress warn the users we are using it, else use the bindport
|
||||
|
|
|
@ -198,7 +198,7 @@ func TestGetMasterEndpoint(t *testing.T) {
|
|||
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
actualEndpoint, actualError := GetMasterEndpoint(rt.cfg)
|
||||
actualEndpoint, actualError := GetMasterEndpoint(rt.cfg.ControlPlaneEndpoint, &rt.cfg.LocalAPIEndpoint)
|
||||
|
||||
if (actualError != nil) && !rt.expectedError {
|
||||
t.Errorf("%s unexpected failure: %v", rt.name, actualError)
|
||||
|
|
|
@ -290,14 +290,14 @@ func CheckConfigurationIsHA(cfg *kubeadmapi.Etcd) bool {
|
|||
|
||||
// GetClientURL creates an HTTPS URL that uses the configured advertise
|
||||
// address and client port for the API controller
|
||||
func GetClientURL(cfg *kubeadmapi.InitConfiguration) string {
|
||||
return "https://" + net.JoinHostPort(cfg.LocalAPIEndpoint.AdvertiseAddress, strconv.Itoa(constants.EtcdListenClientPort))
|
||||
func GetClientURL(localEndpoint *kubeadmapi.APIEndpoint) string {
|
||||
return "https://" + net.JoinHostPort(localEndpoint.AdvertiseAddress, strconv.Itoa(constants.EtcdListenClientPort))
|
||||
}
|
||||
|
||||
// GetPeerURL creates an HTTPS URL that uses the configured advertise
|
||||
// address and peer port for the API controller
|
||||
func GetPeerURL(cfg *kubeadmapi.InitConfiguration) string {
|
||||
return "https://" + net.JoinHostPort(cfg.LocalAPIEndpoint.AdvertiseAddress, strconv.Itoa(constants.EtcdListenPeerPort))
|
||||
func GetPeerURL(localEndpoint *kubeadmapi.APIEndpoint) string {
|
||||
return "https://" + net.JoinHostPort(localEndpoint.AdvertiseAddress, strconv.Itoa(constants.EtcdListenPeerPort))
|
||||
}
|
||||
|
||||
// GetClientURLByIP creates an HTTPS URL based on an IP address
|
||||
|
|
|
@ -72,7 +72,7 @@ func TestCheckConfigurationIsHA(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testGetURL(t *testing.T, getURLFunc func(*kubeadmapi.InitConfiguration) string, port int) {
|
||||
func testGetURL(t *testing.T, getURLFunc func(*kubeadmapi.APIEndpoint) string, port int) {
|
||||
portStr := strconv.Itoa(port)
|
||||
var tests = []struct {
|
||||
name string
|
||||
|
@ -102,12 +102,7 @@ func testGetURL(t *testing.T, getURLFunc func(*kubeadmapi.InitConfiguration) str
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
cfg := &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{
|
||||
AdvertiseAddress: test.advertiseAddress,
|
||||
},
|
||||
}
|
||||
url := getURLFunc(cfg)
|
||||
url := getURLFunc(&kubeadmapi.APIEndpoint{AdvertiseAddress: test.advertiseAddress})
|
||||
if url != test.expectedURL {
|
||||
t.Errorf("expected %s, got %s", test.expectedURL, url)
|
||||
}
|
||||
|
|
|
@ -12,11 +12,9 @@ go_test(
|
|||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/test:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -32,7 +30,6 @@ go_library(
|
|||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
|
@ -82,28 +81,11 @@ func ComponentResources(cpu string) v1.ResourceRequirements {
|
|||
}
|
||||
}
|
||||
|
||||
// ComponentProbe is a helper function building a ready v1.Probe object from some simple parameters
|
||||
func ComponentProbe(cfg *kubeadmapi.InitConfiguration, componentName string, port int, path string, scheme v1.URIScheme) *v1.Probe {
|
||||
return &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Host: GetProbeAddress(cfg, componentName),
|
||||
Path: path,
|
||||
Port: intstr.FromInt(port),
|
||||
Scheme: scheme,
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
TimeoutSeconds: 15,
|
||||
FailureThreshold: 8,
|
||||
}
|
||||
}
|
||||
|
||||
// EtcdProbe is a helper function for building a shell-based, etcdctl v1.Probe object to healthcheck etcd
|
||||
func EtcdProbe(cfg *kubeadmapi.InitConfiguration, componentName string, port int, certsDir string, CACertName string, CertName string, KeyName string) *v1.Probe {
|
||||
func EtcdProbe(cfg *kubeadmapi.Etcd, port int, certsDir string, CACertName string, CertName string, KeyName string) *v1.Probe {
|
||||
tlsFlags := fmt.Sprintf("--cacert=%[1]s/%[2]s --cert=%[1]s/%[3]s --key=%[1]s/%[4]s", certsDir, CACertName, CertName, KeyName)
|
||||
// etcd pod is alive if a linearizable get succeeds.
|
||||
cmd := fmt.Sprintf("ETCDCTL_API=3 etcdctl --endpoints=https://[%s]:%d %s get foo", GetProbeAddress(cfg, componentName), port, tlsFlags)
|
||||
cmd := fmt.Sprintf("ETCDCTL_API=3 etcdctl --endpoints=https://[%s]:%d %s get foo", GetEtcdProbeAddress(cfg), port, tlsFlags)
|
||||
|
||||
return &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
|
@ -225,71 +207,80 @@ func ReadStaticPodFromDisk(manifestPath string) (*v1.Pod, error) {
|
|||
return pod, nil
|
||||
}
|
||||
|
||||
// GetProbeAddress returns an IP address or 127.0.0.1 to use for liveness probes
|
||||
// in static pod manifests.
|
||||
func GetProbeAddress(cfg *kubeadmapi.InitConfiguration, componentName string) string {
|
||||
switch {
|
||||
case componentName == kubeadmconstants.KubeAPIServer:
|
||||
// In the case of a self-hosted deployment, the initial host on which kubeadm --init is run,
|
||||
// will generate a DaemonSet with a nodeSelector such that all nodes with the label
|
||||
// node-role.kubernetes.io/master='' will have the API server deployed to it. Since the init
|
||||
// is run only once on an initial host, the API advertise address will be invalid for any
|
||||
// future hosts that do not have the same address. Furthermore, since liveness and readiness
|
||||
// probes do not support the Downward API we cannot dynamically set the advertise address to
|
||||
// the node's IP. The only option then is to use localhost.
|
||||
if cfg.LocalAPIEndpoint.AdvertiseAddress != "" {
|
||||
return cfg.LocalAPIEndpoint.AdvertiseAddress
|
||||
}
|
||||
case componentName == kubeadmconstants.KubeControllerManager:
|
||||
if addr, exists := cfg.ControllerManager.ExtraArgs[kubeControllerManagerAddressArg]; exists {
|
||||
return addr
|
||||
}
|
||||
case componentName == kubeadmconstants.KubeScheduler:
|
||||
if addr, exists := cfg.Scheduler.ExtraArgs[kubeSchedulerAddressArg]; exists {
|
||||
return addr
|
||||
}
|
||||
case componentName == kubeadmconstants.Etcd:
|
||||
if cfg.Etcd.Local != nil && cfg.Etcd.Local.ExtraArgs != nil {
|
||||
if arg, exists := cfg.Etcd.Local.ExtraArgs[etcdListenClientURLsArg]; exists {
|
||||
// Use the first url in the listen-client-urls if multiple url's are specified.
|
||||
if strings.ContainsAny(arg, ",") {
|
||||
arg = strings.Split(arg, ",")[0]
|
||||
// GetAPIServerProbeAddress returns the probe address for the API server
|
||||
func GetAPIServerProbeAddress(endpoint *kubeadmapi.APIEndpoint) string {
|
||||
// In the case of a self-hosted deployment, the initial host on which kubeadm --init is run,
|
||||
// will generate a DaemonSet with a nodeSelector such that all nodes with the label
|
||||
// node-role.kubernetes.io/master='' will have the API server deployed to it. Since the init
|
||||
// is run only once on an initial host, the API advertise address will be invalid for any
|
||||
// future hosts that do not have the same address. Furthermore, since liveness and readiness
|
||||
// probes do not support the Downward API we cannot dynamically set the advertise address to
|
||||
// the node's IP. The only option then is to use localhost.
|
||||
if endpoint != nil && endpoint.AdvertiseAddress != "" {
|
||||
return endpoint.AdvertiseAddress
|
||||
}
|
||||
|
||||
return "127.0.0.1"
|
||||
}
|
||||
|
||||
// GetControllerManagerProbeAddress returns the kubernetes controller manager probe address
|
||||
func GetControllerManagerProbeAddress(cfg *kubeadmapi.ClusterConfiguration) string {
|
||||
if addr, exists := cfg.ControllerManager.ExtraArgs[kubeControllerManagerAddressArg]; exists {
|
||||
return addr
|
||||
}
|
||||
return "127.0.0.1"
|
||||
}
|
||||
|
||||
// GetSchedulerProbeAddress returns the kubernetes scheduler probe address
|
||||
func GetSchedulerProbeAddress(cfg *kubeadmapi.ClusterConfiguration) string {
|
||||
if addr, exists := cfg.Scheduler.ExtraArgs[kubeSchedulerAddressArg]; exists {
|
||||
return addr
|
||||
}
|
||||
return "127.0.0.1"
|
||||
}
|
||||
|
||||
// GetEtcdProbeAddress returns the etcd probe address
|
||||
func GetEtcdProbeAddress(cfg *kubeadmapi.Etcd) string {
|
||||
if cfg.Local != nil && cfg.Local.ExtraArgs != nil {
|
||||
if arg, exists := cfg.Local.ExtraArgs[etcdListenClientURLsArg]; exists {
|
||||
// Use the first url in the listen-client-urls if multiple url's are specified.
|
||||
if strings.ContainsAny(arg, ",") {
|
||||
arg = strings.Split(arg, ",")[0]
|
||||
}
|
||||
parsedURL, err := url.Parse(arg)
|
||||
if err != nil || parsedURL.Hostname() == "" {
|
||||
return "127.0.0.1"
|
||||
}
|
||||
// Return the IP if the URL contains an address instead of a name.
|
||||
if ip := net.ParseIP(parsedURL.Hostname()); ip != nil {
|
||||
// etcdctl doesn't support auto-converting zero addresses into loopback addresses
|
||||
if ip.Equal(net.IPv4zero) {
|
||||
return "127.0.0.1"
|
||||
}
|
||||
parsedURL, err := url.Parse(arg)
|
||||
if err != nil || parsedURL.Hostname() == "" {
|
||||
break
|
||||
}
|
||||
// Return the IP if the URL contains an address instead of a name.
|
||||
if ip := net.ParseIP(parsedURL.Hostname()); ip != nil {
|
||||
// etcdctl doesn't support auto-converting zero addresses into loopback addresses
|
||||
if ip.Equal(net.IPv4zero) {
|
||||
return "127.0.0.1"
|
||||
}
|
||||
if ip.Equal(net.IPv6zero) {
|
||||
return net.IPv6loopback.String()
|
||||
}
|
||||
return ip.String()
|
||||
}
|
||||
// Use the local resolver to try resolving the name within the URL.
|
||||
// If the name can not be resolved, return an IPv4 loopback address.
|
||||
// Otherwise, select the first valid IPv4 address.
|
||||
// If the name does not resolve to an IPv4 address, select the first valid IPv6 address.
|
||||
addrs, err := net.LookupIP(parsedURL.Hostname())
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
var ip net.IP
|
||||
for _, addr := range addrs {
|
||||
if addr.To4() != nil {
|
||||
ip = addr
|
||||
break
|
||||
}
|
||||
if addr.To16() != nil && ip == nil {
|
||||
ip = addr
|
||||
}
|
||||
if ip.Equal(net.IPv6zero) {
|
||||
return net.IPv6loopback.String()
|
||||
}
|
||||
return ip.String()
|
||||
}
|
||||
// Use the local resolver to try resolving the name within the URL.
|
||||
// If the name can not be resolved, return an IPv4 loopback address.
|
||||
// Otherwise, select the first valid IPv4 address.
|
||||
// If the name does not resolve to an IPv4 address, select the first valid IPv6 address.
|
||||
addrs, err := net.LookupIP(parsedURL.Hostname())
|
||||
if err != nil {
|
||||
return "127.0.0.1"
|
||||
}
|
||||
var ip net.IP
|
||||
for _, addr := range addrs {
|
||||
if addr.To4() != nil {
|
||||
ip = addr
|
||||
break
|
||||
}
|
||||
if addr.To16() != nil && ip == nil {
|
||||
ip = addr
|
||||
}
|
||||
}
|
||||
return ip.String()
|
||||
}
|
||||
}
|
||||
return "127.0.0.1"
|
||||
|
|
|
@ -27,9 +27,7 @@ import (
|
|||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
|
||||
)
|
||||
|
||||
|
@ -42,151 +40,73 @@ func TestComponentResources(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestComponentProbe(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
cfg *kubeadmapi.InitConfiguration
|
||||
component string
|
||||
port int
|
||||
path string
|
||||
scheme v1.URIScheme
|
||||
expected string
|
||||
func TestGetAPIServerProbeAddress(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
endpoint *kubeadmapi.APIEndpoint
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "default apiserver advertise address with http",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{
|
||||
AdvertiseAddress: "",
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.KubeAPIServer,
|
||||
port: 1,
|
||||
path: "foo",
|
||||
scheme: v1.URISchemeHTTP,
|
||||
expected: "127.0.0.1",
|
||||
desc: "nil endpoint returns 127.0.0.1",
|
||||
expected: "127.0.0.1",
|
||||
},
|
||||
{
|
||||
name: "default apiserver advertise address with https",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{
|
||||
AdvertiseAddress: "",
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.KubeAPIServer,
|
||||
port: 2,
|
||||
path: "bar",
|
||||
scheme: v1.URISchemeHTTPS,
|
||||
expected: "127.0.0.1",
|
||||
desc: "empty AdvertiseAddress endpoint returns 127.0.0.1",
|
||||
endpoint: &kubeadmapi.APIEndpoint{},
|
||||
expected: "127.0.0.1",
|
||||
},
|
||||
{
|
||||
name: "valid ipv4 apiserver advertise address with http",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{
|
||||
AdvertiseAddress: "1.2.3.4",
|
||||
},
|
||||
desc: "filled in AdvertiseAddress endpoint returns it",
|
||||
endpoint: &kubeadmapi.APIEndpoint{
|
||||
AdvertiseAddress: "10.10.10.10",
|
||||
},
|
||||
component: kubeadmconstants.KubeAPIServer,
|
||||
port: 1,
|
||||
path: "foo",
|
||||
scheme: v1.URISchemeHTTP,
|
||||
expected: "1.2.3.4",
|
||||
},
|
||||
{
|
||||
name: "valid ipv6 apiserver advertise address with http",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
LocalAPIEndpoint: kubeadmapi.APIEndpoint{
|
||||
AdvertiseAddress: "2001:db8::1",
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.KubeAPIServer,
|
||||
port: 1,
|
||||
path: "foo",
|
||||
scheme: v1.URISchemeHTTP,
|
||||
expected: "2001:db8::1",
|
||||
},
|
||||
{
|
||||
name: "valid IPv4 controller-manager probe",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
ControllerManager: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{"address": "1.2.3.4"},
|
||||
},
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.KubeControllerManager,
|
||||
port: 1,
|
||||
path: "foo",
|
||||
scheme: v1.URISchemeHTTP,
|
||||
expected: "1.2.3.4",
|
||||
},
|
||||
{
|
||||
name: "valid IPv6 controller-manager probe",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
ControllerManager: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{"address": "2001:db8::1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.KubeControllerManager,
|
||||
port: 1,
|
||||
path: "foo",
|
||||
scheme: v1.URISchemeHTTP,
|
||||
expected: "2001:db8::1",
|
||||
},
|
||||
{
|
||||
name: "valid IPv4 scheduler probe",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
Scheduler: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{"address": "1.2.3.4"},
|
||||
},
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.KubeScheduler,
|
||||
port: 1,
|
||||
path: "foo",
|
||||
scheme: v1.URISchemeHTTP,
|
||||
expected: "1.2.3.4",
|
||||
},
|
||||
{
|
||||
name: "valid IPv6 scheduler probe",
|
||||
cfg: &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
|
||||
Scheduler: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{"address": "2001:db8::1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.KubeScheduler,
|
||||
port: 1,
|
||||
path: "foo",
|
||||
scheme: v1.URISchemeHTTP,
|
||||
expected: "2001:db8::1",
|
||||
expected: "10.10.10.10",
|
||||
},
|
||||
}
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
actual := ComponentProbe(rt.cfg, rt.component, rt.port, rt.path, rt.scheme)
|
||||
if actual.Handler.HTTPGet.Host != rt.expected {
|
||||
t.Errorf("%s test case failed:\n\texpected: %s\n\t actual: %s",
|
||||
rt.name, rt.expected,
|
||||
actual.Handler.HTTPGet.Host)
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
actual := GetAPIServerProbeAddress(test.endpoint)
|
||||
if actual != test.expected {
|
||||
t.Errorf("Unexpected result from GetAPIServerProbeAddress:\n\texpected: %s\n\tactual: %s", test.expected, actual)
|
||||
}
|
||||
if actual.Handler.HTTPGet.Port != intstr.FromInt(rt.port) {
|
||||
t.Errorf("%s test case failed:\n\texpected: %v\n\t actual: %v",
|
||||
rt.name, rt.port,
|
||||
actual.Handler.HTTPGet.Port)
|
||||
}
|
||||
if actual.Handler.HTTPGet.Path != rt.path {
|
||||
t.Errorf("%s test case failed:\n\texpected: %s\n\t actual: %s",
|
||||
rt.name, rt.path,
|
||||
actual.Handler.HTTPGet.Path)
|
||||
}
|
||||
if actual.Handler.HTTPGet.Scheme != rt.scheme {
|
||||
t.Errorf("%s test case failed:\n\texpected: %v\n\t actual: %v",
|
||||
rt.name, rt.scheme,
|
||||
actual.Handler.HTTPGet.Scheme)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetControllerManagerProbeAddress(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
cfg *kubeadmapi.ClusterConfiguration
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
desc: "no controller manager extra args leads to 127.0.0.1 being used",
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
ControllerManager: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{},
|
||||
},
|
||||
},
|
||||
expected: "127.0.0.1",
|
||||
},
|
||||
{
|
||||
desc: "setting controller manager extra address arg to something acknowledges it",
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
ControllerManager: kubeadmapi.ControlPlaneComponent{
|
||||
ExtraArgs: map[string]string{
|
||||
kubeControllerManagerAddressArg: "10.10.10.10",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: "10.10.10.10",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
actual := GetControllerManagerProbeAddress(test.cfg)
|
||||
if actual != test.expected {
|
||||
t.Errorf("Unexpected result from GetControllerManagerProbeAddress:\n\texpected: %s\n\tactual: %s", test.expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -194,150 +114,124 @@ func TestComponentProbe(t *testing.T) {
|
|||
|
||||
func TestEtcdProbe(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
cfg *kubeadmapi.ClusterConfiguration
|
||||
component string
|
||||
port int
|
||||
certsDir string
|
||||
cacert string
|
||||
cert string
|
||||
key string
|
||||
expected string
|
||||
name string
|
||||
cfg *kubeadmapi.Etcd
|
||||
port int
|
||||
certsDir string
|
||||
cacert string
|
||||
cert string
|
||||
key string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "valid etcd probe using listen-client-urls IPv4 addresses",
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://1.2.3.4:2379,http://4.3.2.1:2379"},
|
||||
},
|
||||
cfg: &kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://1.2.3.4:2379,http://4.3.2.1:2379"},
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.Etcd,
|
||||
port: 1,
|
||||
certsDir: "secretsA",
|
||||
cacert: "ca1",
|
||||
cert: "cert1",
|
||||
key: "key1",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[1.2.3.4]:1 --cacert=secretsA/ca1 --cert=secretsA/cert1 --key=secretsA/key1 get foo",
|
||||
port: 1,
|
||||
certsDir: "secretsA",
|
||||
cacert: "ca1",
|
||||
cert: "cert1",
|
||||
key: "key1",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[1.2.3.4]:1 --cacert=secretsA/ca1 --cert=secretsA/cert1 --key=secretsA/key1 get foo",
|
||||
},
|
||||
{
|
||||
name: "valid etcd probe using listen-client-urls unspecified IPv6 address",
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://[0:0:0:0:0:0:0:0]:2379"},
|
||||
},
|
||||
cfg: &kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://[0:0:0:0:0:0:0:0]:2379"},
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.Etcd,
|
||||
port: 1,
|
||||
certsDir: "secretsB",
|
||||
cacert: "ca2",
|
||||
cert: "cert2",
|
||||
key: "key2",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[::1]:1 --cacert=secretsB/ca2 --cert=secretsB/cert2 --key=secretsB/key2 get foo",
|
||||
port: 1,
|
||||
certsDir: "secretsB",
|
||||
cacert: "ca2",
|
||||
cert: "cert2",
|
||||
key: "key2",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[::1]:1 --cacert=secretsB/ca2 --cert=secretsB/cert2 --key=secretsB/key2 get foo",
|
||||
},
|
||||
{
|
||||
name: "valid etcd probe using listen-client-urls unspecified IPv6 address 2",
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://[::0:0]:2379"},
|
||||
},
|
||||
cfg: &kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://[::0:0]:2379"},
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.Etcd,
|
||||
port: 1,
|
||||
certsDir: "secretsB",
|
||||
cacert: "ca2",
|
||||
cert: "cert2",
|
||||
key: "key2",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[::1]:1 --cacert=secretsB/ca2 --cert=secretsB/cert2 --key=secretsB/key2 get foo",
|
||||
port: 1,
|
||||
certsDir: "secretsB",
|
||||
cacert: "ca2",
|
||||
cert: "cert2",
|
||||
key: "key2",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[::1]:1 --cacert=secretsB/ca2 --cert=secretsB/cert2 --key=secretsB/key2 get foo",
|
||||
},
|
||||
{
|
||||
name: "valid etcd probe using listen-client-urls unspecified IPv6 address 3",
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://[::]:2379"},
|
||||
},
|
||||
cfg: &kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://[::]:2379"},
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.Etcd,
|
||||
port: 1,
|
||||
certsDir: "secretsB",
|
||||
cacert: "ca2",
|
||||
cert: "cert2",
|
||||
key: "key2",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[::1]:1 --cacert=secretsB/ca2 --cert=secretsB/cert2 --key=secretsB/key2 get foo",
|
||||
port: 1,
|
||||
certsDir: "secretsB",
|
||||
cacert: "ca2",
|
||||
cert: "cert2",
|
||||
key: "key2",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[::1]:1 --cacert=secretsB/ca2 --cert=secretsB/cert2 --key=secretsB/key2 get foo",
|
||||
},
|
||||
{
|
||||
name: "valid etcd probe using listen-client-urls unspecified IPv4 address",
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://1.2.3.4:2379,http://4.3.2.1:2379"},
|
||||
},
|
||||
cfg: &kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://1.2.3.4:2379,http://4.3.2.1:2379"},
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.Etcd,
|
||||
port: 1,
|
||||
certsDir: "secretsA",
|
||||
cacert: "ca1",
|
||||
cert: "cert1",
|
||||
key: "key1",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[1.2.3.4]:1 --cacert=secretsA/ca1 --cert=secretsA/cert1 --key=secretsA/key1 get foo",
|
||||
port: 1,
|
||||
certsDir: "secretsA",
|
||||
cacert: "ca1",
|
||||
cert: "cert1",
|
||||
key: "key1",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[1.2.3.4]:1 --cacert=secretsA/ca1 --cert=secretsA/cert1 --key=secretsA/key1 get foo",
|
||||
},
|
||||
{
|
||||
name: "valid etcd probe using listen-client-urls IPv6 addresses",
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://[2001:db8::1]:2379,http://[2001:db8::2]:2379"},
|
||||
},
|
||||
cfg: &kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://[2001:db8::1]:2379,http://[2001:db8::2]:2379"},
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.Etcd,
|
||||
port: 1,
|
||||
certsDir: "secretsB",
|
||||
cacert: "ca2",
|
||||
cert: "cert2",
|
||||
key: "key2",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[2001:db8::1]:1 --cacert=secretsB/ca2 --cert=secretsB/cert2 --key=secretsB/key2 get foo",
|
||||
port: 1,
|
||||
certsDir: "secretsB",
|
||||
cacert: "ca2",
|
||||
cert: "cert2",
|
||||
key: "key2",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[2001:db8::1]:1 --cacert=secretsB/ca2 --cert=secretsB/cert2 --key=secretsB/key2 get foo",
|
||||
},
|
||||
{
|
||||
name: "valid IPv4 etcd probe using hostname for listen-client-urls",
|
||||
cfg: &kubeadmapi.ClusterConfiguration{
|
||||
Etcd: kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://localhost:2379"},
|
||||
},
|
||||
cfg: &kubeadmapi.Etcd{
|
||||
Local: &kubeadmapi.LocalEtcd{
|
||||
ExtraArgs: map[string]string{
|
||||
"listen-client-urls": "http://localhost:2379"},
|
||||
},
|
||||
},
|
||||
component: kubeadmconstants.Etcd,
|
||||
port: 1,
|
||||
certsDir: "secretsC",
|
||||
cacert: "ca3",
|
||||
cert: "cert3",
|
||||
key: "key3",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:1 --cacert=secretsC/ca3 --cert=secretsC/cert3 --key=secretsC/key3 get foo",
|
||||
port: 1,
|
||||
certsDir: "secretsC",
|
||||
cacert: "ca3",
|
||||
cert: "cert3",
|
||||
key: "key3",
|
||||
expected: "ETCDCTL_API=3 etcdctl --endpoints=https://[127.0.0.1]:1 --cacert=secretsC/ca3 --cert=secretsC/cert3 --key=secretsC/key3 get foo",
|
||||
},
|
||||
}
|
||||
for _, rt := range tests {
|
||||
t.Run(rt.name, func(t *testing.T) {
|
||||
// TODO: Make EtcdProbe accept a ClusterConfiguration object instead of InitConfiguration
|
||||
initcfg := &kubeadmapi.InitConfiguration{
|
||||
ClusterConfiguration: *rt.cfg,
|
||||
}
|
||||
actual := EtcdProbe(initcfg, rt.component, rt.port, rt.certsDir, rt.cacert, rt.cert, rt.key)
|
||||
actual := EtcdProbe(rt.cfg, rt.port, rt.certsDir, rt.cacert, rt.cert, rt.key)
|
||||
if actual.Handler.Exec.Command[2] != rt.expected {
|
||||
t.Errorf("%s test case failed:\n\texpected: %s\n\t actual: %s",
|
||||
rt.name, rt.expected,
|
||||
|
|
Loading…
Reference in New Issue