Merge pull request #74473 from vanduc95/cleanup-kubeadm-cont.4-20190223

kubeadm cleanup: master -> control-plane (cont.4)
pull/564/head
Kubernetes Prow Robot 2019-02-25 15:15:30 -08:00 committed by GitHub
commit b22da83307
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 110 additions and 110 deletions

View File

@ -101,15 +101,15 @@ func kubeDNSAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interfa
}
dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(KubeDNSDeployment,
struct{ DeploymentName, KubeDNSImage, DNSMasqImage, SidecarImage, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{
DeploymentName: kubeadmconstants.KubeDNSDeploymentName,
KubeDNSImage: images.GetDNSImage(cfg, kubeadmconstants.KubeDNSKubeDNSImageName),
DNSMasqImage: images.GetDNSImage(cfg, kubeadmconstants.KubeDNSDnsMasqNannyImageName),
SidecarImage: images.GetDNSImage(cfg, kubeadmconstants.KubeDNSSidecarImageName),
DNSBindAddr: dnsBindAddr,
DNSProbeAddr: dnsProbeAddr,
DNSDomain: cfg.Networking.DNSDomain,
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
struct{ DeploymentName, KubeDNSImage, DNSMasqImage, SidecarImage, DNSBindAddr, DNSProbeAddr, DNSDomain, ControlPlaneTaintKey string }{
DeploymentName: kubeadmconstants.KubeDNSDeploymentName,
KubeDNSImage: images.GetDNSImage(cfg, kubeadmconstants.KubeDNSKubeDNSImageName),
DNSMasqImage: images.GetDNSImage(cfg, kubeadmconstants.KubeDNSDnsMasqNannyImageName),
SidecarImage: images.GetDNSImage(cfg, kubeadmconstants.KubeDNSSidecarImageName),
DNSBindAddr: dnsBindAddr,
DNSProbeAddr: dnsProbeAddr,
DNSDomain: cfg.Networking.DNSDomain,
ControlPlaneTaintKey: kubeadmconstants.LabelNodeRoleMaster,
})
if err != nil {
return errors.Wrap(err, "error when parsing kube-dns deployment template")
@ -157,10 +157,10 @@ func createKubeDNSAddon(deploymentBytes, serviceBytes []byte, client clientset.I
func coreDNSAddon(cfg *kubeadmapi.ClusterConfiguration, client clientset.Interface) error {
// Get the YAML manifest
coreDNSDeploymentBytes, err := kubeadmutil.ParseTemplate(CoreDNSDeployment, struct{ DeploymentName, Image, MasterTaintKey string }{
DeploymentName: kubeadmconstants.CoreDNSDeploymentName,
Image: images.GetDNSImage(cfg, kubeadmconstants.CoreDNSImageName),
MasterTaintKey: kubeadmconstants.LabelNodeRoleMaster,
coreDNSDeploymentBytes, err := kubeadmutil.ParseTemplate(CoreDNSDeployment, struct{ DeploymentName, Image, ControlPlaneTaintKey string }{
DeploymentName: kubeadmconstants.CoreDNSDeploymentName,
Image: images.GetDNSImage(cfg, kubeadmconstants.CoreDNSImageName),
ControlPlaneTaintKey: kubeadmconstants.LabelNodeRoleMaster,
})
if err != nil {
return errors.Wrap(err, "error when parsing CoreDNS deployment template")

View File

@ -98,15 +98,15 @@ func TestCompileManifests(t *testing.T) {
}{
{
manifest: KubeDNSDeployment,
data: struct{ DeploymentName, KubeDNSImage, DNSMasqImage, SidecarImage, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{
DeploymentName: "foo",
KubeDNSImage: "foo",
DNSMasqImage: "foo",
SidecarImage: "foo",
DNSBindAddr: "foo",
DNSProbeAddr: "foo",
DNSDomain: "foo",
MasterTaintKey: "foo",
data: struct{ DeploymentName, KubeDNSImage, DNSMasqImage, SidecarImage, DNSBindAddr, DNSProbeAddr, DNSDomain, ControlPlaneTaintKey string }{
DeploymentName: "foo",
KubeDNSImage: "foo",
DNSMasqImage: "foo",
SidecarImage: "foo",
DNSBindAddr: "foo",
DNSProbeAddr: "foo",
DNSDomain: "foo",
ControlPlaneTaintKey: "foo",
},
expected: true,
},
@ -119,10 +119,10 @@ func TestCompileManifests(t *testing.T) {
},
{
manifest: CoreDNSDeployment,
data: struct{ DeploymentName, Image, MasterTaintKey string }{
DeploymentName: "foo",
Image: "foo",
MasterTaintKey: "foo",
data: struct{ DeploymentName, Image, ControlPlaneTaintKey string }{
DeploymentName: "foo",
Image: "foo",
ControlPlaneTaintKey: "foo",
},
expected: true,
},
@ -436,23 +436,23 @@ func TestDeploymentsHaveSystemClusterCriticalPriorityClassName(t *testing.T) {
}{
{
manifest: KubeDNSDeployment,
data: struct{ DeploymentName, KubeDNSImage, DNSMasqImage, SidecarImage, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{
DeploymentName: "foo",
KubeDNSImage: "foo",
DNSMasqImage: "foo",
SidecarImage: "foo",
DNSBindAddr: "foo",
DNSProbeAddr: "foo",
DNSDomain: "foo",
MasterTaintKey: "foo",
data: struct{ DeploymentName, KubeDNSImage, DNSMasqImage, SidecarImage, DNSBindAddr, DNSProbeAddr, DNSDomain, ControlPlaneTaintKey string }{
DeploymentName: "foo",
KubeDNSImage: "foo",
DNSMasqImage: "foo",
SidecarImage: "foo",
DNSBindAddr: "foo",
DNSProbeAddr: "foo",
DNSDomain: "foo",
ControlPlaneTaintKey: "foo",
},
},
{
manifest: CoreDNSDeployment,
data: struct{ DeploymentName, Image, MasterTaintKey string }{
DeploymentName: "foo",
Image: "foo",
MasterTaintKey: "foo",
data: struct{ DeploymentName, Image, ControlPlaneTaintKey string }{
DeploymentName: "foo",
Image: "foo",
ControlPlaneTaintKey: "foo",
},
},
}

View File

@ -77,7 +77,7 @@ spec:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# we poll on pod startup for the Kubernetes control-plane service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
@ -173,7 +173,7 @@ spec:
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: {{ .MasterTaintKey }}
- key: {{ .ControlPlaneTaintKey }}
effect: NoSchedule
`
@ -241,7 +241,7 @@ spec:
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: {{ .MasterTaintKey }}
- key: {{ .ControlPlaneTaintKey }}
effect: NoSchedule
nodeSelector:
beta.kubernetes.io/os: linux

View File

@ -33,7 +33,7 @@ data:
clusters:
- cluster:
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
server: {{ .MasterEndpoint }}
server: {{ .ControlPlaneEndpoint }}
name: default
contexts:
- context:

View File

@ -67,15 +67,15 @@ func EnsureProxyAddon(cfg *kubeadmapi.ClusterConfiguration, localEndpoint *kubea
var proxyConfigMapBytes, proxyDaemonSetBytes []byte
proxyConfigMapBytes, err = kubeadmutil.ParseTemplate(KubeProxyConfigMap19,
struct {
MasterEndpoint string
ProxyConfig string
ProxyConfigMap string
ProxyConfigMapKey string
ControlPlaneEndpoint string
ProxyConfig string
ProxyConfigMap string
ProxyConfigMapKey string
}{
MasterEndpoint: controlPlaneEndpoint,
ProxyConfig: prefixBytes.String(),
ProxyConfigMap: constants.KubeProxyConfigMap,
ProxyConfigMapKey: constants.KubeProxyConfigMapKey,
ControlPlaneEndpoint: controlPlaneEndpoint,
ProxyConfig: prefixBytes.String(),
ProxyConfigMap: constants.KubeProxyConfigMap,
ProxyConfigMapKey: constants.KubeProxyConfigMapKey,
})
if err != nil {
return errors.Wrap(err, "error when parsing kube-proxy configmap template")

View File

@ -103,12 +103,12 @@ func TestCompileManifests(t *testing.T) {
{
manifest: KubeProxyConfigMap19,
data: struct {
MasterEndpoint, ProxyConfig, ProxyConfigMap, ProxyConfigMapKey string
ControlPlaneEndpoint, ProxyConfig, ProxyConfigMap, ProxyConfigMapKey string
}{
MasterEndpoint: "foo",
ProxyConfig: " bindAddress: 0.0.0.0\n clusterCIDR: 192.168.1.1\n enableProfiling: false",
ProxyConfigMap: "bar",
ProxyConfigMapKey: "baz",
ControlPlaneEndpoint: "foo",
ProxyConfig: " bindAddress: 0.0.0.0\n clusterCIDR: 192.168.1.1\n enableProfiling: false",
ProxyConfigMap: "bar",
ProxyConfigMapKey: "baz",
},
expected: true,
},
@ -140,7 +140,7 @@ func TestEnsureProxyAddon(t *testing.T) {
const (
NoError SimulatedError = iota
ServiceAccountError
InvalidMasterEndpoint
InvalidControlPlaneEndpoint
IPv6SetBindAddress
)
@ -177,7 +177,7 @@ func TestEnsureProxyAddon(t *testing.T) {
// Create a fake client and set up default test configuration
client := clientsetfake.NewSimpleClientset()
// TODO: Consider using a YAML file instead for this that makes it possible to specify YAML documents for the ComponentConfigs
masterConfig := &kubeadmapiv1beta1.InitConfiguration{
controlPlaneConfig := &kubeadmapiv1beta1.InitConfiguration{
LocalAPIEndpoint: kubeadmapiv1beta1.APIEndpoint{
AdvertiseAddress: "1.2.3.4",
BindPort: 1234,
@ -197,19 +197,19 @@ func TestEnsureProxyAddon(t *testing.T) {
client.PrependReactor("create", "serviceaccounts", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewUnauthorized("")
})
case InvalidMasterEndpoint:
masterConfig.LocalAPIEndpoint.AdvertiseAddress = "1.2.3"
case InvalidControlPlaneEndpoint:
controlPlaneConfig.LocalAPIEndpoint.AdvertiseAddress = "1.2.3"
case IPv6SetBindAddress:
masterConfig.LocalAPIEndpoint.AdvertiseAddress = "1:2::3:4"
masterConfig.Networking.PodSubnet = "2001:101::/96"
controlPlaneConfig.LocalAPIEndpoint.AdvertiseAddress = "1:2::3:4"
controlPlaneConfig.Networking.PodSubnet = "2001:101::/96"
}
intMaster, err := configutil.DefaultedInitConfiguration(masterConfig)
intControlPlane, err := configutil.DefaultedInitConfiguration(controlPlaneConfig)
if err != nil {
t.Errorf("test failed to convert external to internal version")
break
}
intMaster.ComponentConfigs.KubeProxy = &kubeproxyconfig.KubeProxyConfiguration{
intControlPlane.ComponentConfigs.KubeProxy = &kubeproxyconfig.KubeProxyConfiguration{
BindAddress: "",
HealthzBindAddress: "0.0.0.0:10256",
MetricsBindAddress: "127.0.0.1:10249",
@ -222,11 +222,11 @@ func TestEnsureProxyAddon(t *testing.T) {
},
}
// Run dynamic defaulting again as we changed the internal cfg
if err := configutil.SetInitDynamicDefaults(intMaster); err != nil {
if err := configutil.SetInitDynamicDefaults(intControlPlane); err != nil {
t.Errorf("test failed to set dynamic defaults: %v", err)
break
}
err = EnsureProxyAddon(&intMaster.ClusterConfiguration, &intMaster.LocalAPIEndpoint, client)
err = EnsureProxyAddon(&intControlPlane.ClusterConfiguration, &intControlPlane.LocalAPIEndpoint, client)
// Compare actual to expected errors
actErr := "No error"
@ -244,17 +244,17 @@ func TestEnsureProxyAddon(t *testing.T) {
expErr,
actErr)
}
if intMaster.ComponentConfigs.KubeProxy.BindAddress != tc.expBindAddr {
if intControlPlane.ComponentConfigs.KubeProxy.BindAddress != tc.expBindAddr {
t.Errorf("%s test failed, expected: %s, got: %s",
tc.name,
tc.expBindAddr,
intMaster.ComponentConfigs.KubeProxy.BindAddress)
intControlPlane.ComponentConfigs.KubeProxy.BindAddress)
}
if intMaster.ComponentConfigs.KubeProxy.ClusterCIDR != tc.expClusterCIDR {
if intControlPlane.ComponentConfigs.KubeProxy.ClusterCIDR != tc.expClusterCIDR {
t.Errorf("%s test failed, expected: %s, got: %s",
tc.name,
tc.expClusterCIDR,
intMaster.ComponentConfigs.KubeProxy.ClusterCIDR)
intControlPlane.ComponentConfigs.KubeProxy.ClusterCIDR)
}
}
}

View File

@ -136,7 +136,7 @@ func getKubeConfigSpecs(cfg *kubeadmapi.InitConfiguration) (map[string]*kubeConf
return nil, errors.Wrap(err, "couldn't create a kubeconfig; the CA files couldn't be loaded")
}
masterEndpoint, err := kubeadmutil.GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, &cfg.LocalAPIEndpoint)
controlPlaneEndpoint, err := kubeadmutil.GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, &cfg.LocalAPIEndpoint)
if err != nil {
return nil, err
}
@ -144,7 +144,7 @@ func getKubeConfigSpecs(cfg *kubeadmapi.InitConfiguration) (map[string]*kubeConf
var kubeConfigSpec = map[string]*kubeConfigSpec{
kubeadmconstants.AdminKubeConfigFileName: {
CACert: caCert,
APIServer: masterEndpoint,
APIServer: controlPlaneEndpoint,
ClientName: "kubernetes-admin",
ClientCertAuth: &clientCertAuth{
CAKey: caKey,
@ -153,7 +153,7 @@ func getKubeConfigSpecs(cfg *kubeadmapi.InitConfiguration) (map[string]*kubeConf
},
kubeadmconstants.KubeletKubeConfigFileName: {
CACert: caCert,
APIServer: masterEndpoint,
APIServer: controlPlaneEndpoint,
ClientName: fmt.Sprintf("%s%s", kubeadmconstants.NodesUserPrefix, cfg.NodeRegistration.Name),
ClientCertAuth: &clientCertAuth{
CAKey: caKey,
@ -162,7 +162,7 @@ func getKubeConfigSpecs(cfg *kubeadmapi.InitConfiguration) (map[string]*kubeConf
},
kubeadmconstants.ControllerManagerKubeConfigFileName: {
CACert: caCert,
APIServer: masterEndpoint,
APIServer: controlPlaneEndpoint,
ClientName: kubeadmconstants.ControllerManagerUser,
ClientCertAuth: &clientCertAuth{
CAKey: caKey,
@ -170,7 +170,7 @@ func getKubeConfigSpecs(cfg *kubeadmapi.InitConfiguration) (map[string]*kubeConf
},
kubeadmconstants.SchedulerKubeConfigFileName: {
CACert: caCert,
APIServer: masterEndpoint,
APIServer: controlPlaneEndpoint,
ClientName: kubeadmconstants.SchedulerUser,
ClientCertAuth: &clientCertAuth{
CAKey: caKey,
@ -290,14 +290,14 @@ func WriteKubeConfigWithClientCert(out io.Writer, cfg *kubeadmapi.InitConfigurat
return errors.Wrap(err, "couldn't create a kubeconfig; the CA files couldn't be loaded")
}
masterEndpoint, err := kubeadmutil.GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, &cfg.LocalAPIEndpoint)
controlPlaneEndpoint, err := kubeadmutil.GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, &cfg.LocalAPIEndpoint)
if err != nil {
return err
}
spec := &kubeConfigSpec{
ClientName: clientName,
APIServer: masterEndpoint,
APIServer: controlPlaneEndpoint,
CACert: caCert,
ClientCertAuth: &clientCertAuth{
CAKey: caKey,
@ -317,14 +317,14 @@ func WriteKubeConfigWithToken(out io.Writer, cfg *kubeadmapi.InitConfiguration,
return errors.Wrap(err, "couldn't create a kubeconfig; the CA files couldn't be loaded")
}
masterEndpoint, err := kubeadmutil.GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, &cfg.LocalAPIEndpoint)
controlPlaneEndpoint, err := kubeadmutil.GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, &cfg.LocalAPIEndpoint)
if err != nil {
return err
}
spec := &kubeConfigSpec{
ClientName: clientName,
APIServer: masterEndpoint,
APIServer: controlPlaneEndpoint,
CACert: caCert,
TokenAuth: &tokenAuth{
Token: token,

View File

@ -162,11 +162,11 @@ func TestGetKubeConfigSpecs(t *testing.T) {
}
// Asserts InitConfiguration values injected into spec
masterEndpoint, err := kubeadmutil.GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, &cfg.LocalAPIEndpoint)
controlPlaneEndpoint, err := kubeadmutil.GetControlPlaneEndpoint(cfg.ControlPlaneEndpoint, &cfg.LocalAPIEndpoint)
if err != nil {
t.Error(err)
}
if spec.APIServer != masterEndpoint {
if spec.APIServer != controlPlaneEndpoint {
t.Errorf("getKubeConfigSpecs didn't injected cfg.APIServer endpoint into spec for %s", assertion.kubeConfigFile)
}

View File

@ -39,7 +39,7 @@ func MarkControlPlane(client clientset.Interface, controlPlaneName string, taint
}
return apiclient.PatchNode(client, controlPlaneName, func(n *v1.Node) {
markMasterNode(n, taints)
markControlPlaneNode(n, taints)
})
}
@ -53,7 +53,7 @@ func taintExists(taint v1.Taint, taints []v1.Taint) bool {
return false
}
func markMasterNode(n *v1.Node, taints []v1.Taint) {
func markControlPlaneNode(n *v1.Node, taints []v1.Taint) {
n.ObjectMeta.Labels[constants.LabelNodeRoleMaster] = ""
for _, nt := range n.Spec.Taints {

View File

@ -40,18 +40,18 @@ func GetDefaultMutators() map[string][]PodSpecMutatorFunc {
return map[string][]PodSpecMutatorFunc{
kubeadmconstants.KubeAPIServer: {
addNodeSelectorToPodSpec,
setMasterTolerationOnPodSpec,
setControlPlaneTolerationOnPodSpec,
setRightDNSPolicyOnPodSpec,
setHostIPOnPodSpec,
},
kubeadmconstants.KubeControllerManager: {
addNodeSelectorToPodSpec,
setMasterTolerationOnPodSpec,
setControlPlaneTolerationOnPodSpec,
setRightDNSPolicyOnPodSpec,
},
kubeadmconstants.KubeScheduler: {
addNodeSelectorToPodSpec,
setMasterTolerationOnPodSpec,
setControlPlaneTolerationOnPodSpec,
setRightDNSPolicyOnPodSpec,
},
}
@ -82,7 +82,7 @@ func mutatePodSpec(mutators map[string][]PodSpecMutatorFunc, name string, podSpe
}
}
// addNodeSelectorToPodSpec makes Pod require to be scheduled on a node marked with the master label
// addNodeSelectorToPodSpec makes Pod require to be scheduled on a node marked with the control-plane label
func addNodeSelectorToPodSpec(podSpec *v1.PodSpec) {
if podSpec.NodeSelector == nil {
podSpec.NodeSelector = map[string]string{kubeadmconstants.LabelNodeRoleMaster: ""}
@ -92,8 +92,8 @@ func addNodeSelectorToPodSpec(podSpec *v1.PodSpec) {
podSpec.NodeSelector[kubeadmconstants.LabelNodeRoleMaster] = ""
}
// setMasterTolerationOnPodSpec makes the Pod tolerate the master taint
func setMasterTolerationOnPodSpec(podSpec *v1.PodSpec) {
// setControlPlaneTolerationOnPodSpec makes the Pod tolerate the control-plane taint
func setControlPlaneTolerationOnPodSpec(podSpec *v1.PodSpec) {
if podSpec.Tolerations == nil {
podSpec.Tolerations = []v1.Toleration{kubeadmconstants.ControlPlaneToleration}
return

View File

@ -157,7 +157,7 @@ func TestAddNodeSelectorToPodSpec(t *testing.T) {
}
}
func TestSetMasterTolerationOnPodSpec(t *testing.T) {
func TestSetControlPlaneTolerationOnPodSpec(t *testing.T) {
var tests = []struct {
name string
podSpec *v1.PodSpec
@ -190,10 +190,10 @@ func TestSetMasterTolerationOnPodSpec(t *testing.T) {
for _, rt := range tests {
t.Run(rt.name, func(t *testing.T) {
setMasterTolerationOnPodSpec(rt.podSpec)
setControlPlaneTolerationOnPodSpec(rt.podSpec)
if !reflect.DeepEqual(*rt.podSpec, rt.expected) {
t.Errorf("failed setMasterTolerationOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
t.Errorf("failed setControlPlaneTolerationOnPodSpec:\nexpected:\n%v\nsaw:\n%v", rt.expected, *rt.podSpec)
}
})
}

View File

@ -56,7 +56,7 @@ func (c *healthCheck) Name() string {
// CheckClusterHealth makes sure:
// - the API /healthz endpoint is healthy
// - all master Nodes are Ready
// - all control-plane Nodes are Ready
// - (if self-hosted) that there are DaemonSets with at least one Pod for all control plane components
// - (if static pod-hosted) that all required Static Pod manifests exist on disk
func CheckClusterHealth(client clientset.Interface, ignoreChecksErrors sets.String) error {
@ -69,9 +69,9 @@ func CheckClusterHealth(client clientset.Interface, ignoreChecksErrors sets.Stri
f: apiServerHealthy,
},
&healthCheck{
name: "MasterNodesReady",
name: "ControlPlaneNodesReady",
client: client,
f: masterNodesReady,
f: controlPlaneNodesReady,
},
// TODO: Add a check for ComponentStatuses here?
}
@ -100,25 +100,25 @@ func apiServerHealthy(client clientset.Interface) error {
return nil
}
// masterNodesReady checks whether all master Nodes in the cluster are in the Running state
func masterNodesReady(client clientset.Interface) error {
// controlPlaneNodesReady checks whether all control-plane Nodes in the cluster are in the Running state
func controlPlaneNodesReady(client clientset.Interface) error {
selector := labels.SelectorFromSet(labels.Set(map[string]string{
constants.LabelNodeRoleMaster: "",
}))
masters, err := client.CoreV1().Nodes().List(metav1.ListOptions{
controlPlanes, err := client.CoreV1().Nodes().List(metav1.ListOptions{
LabelSelector: selector.String(),
})
if err != nil {
return errors.Wrap(err, "couldn't list control-planes in cluster")
}
if len(masters.Items) == 0 {
if len(controlPlanes.Items) == 0 {
return errors.New("failed to find any nodes with a control-plane role")
}
notReadyMasters := getNotReadyNodes(masters.Items)
if len(notReadyMasters) != 0 {
return errors.Errorf("there are NotReady control-planes in the cluster: %v", notReadyMasters)
notReadyControlPlanes := getNotReadyNodes(controlPlanes.Items)
if len(notReadyControlPlanes) != 0 {
return errors.Errorf("there are NotReady control-planes in the cluster: %v", notReadyControlPlanes)
}
return nil
}

View File

@ -49,7 +49,7 @@ import (
var expiry = 180 * 24 * time.Hour
// PerformPostUpgradeTasks runs nearly the same functions as 'kubeadm init' would do
// Note that the markmaster phase is left out, not needed, and no token is created as that doesn't belong to the upgrade
// Note that the mark-control-plane phase is left out, not needed, and no token is created as that doesn't belong to the upgrade
func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, newK8sVer *version.Version, dryRun bool) error {
errs := []error{}
@ -207,9 +207,9 @@ func writeKubeletConfigFiles(client clientset.Interface, cfg *kubeadmapi.InitCon
envFilePath := filepath.Join(kubeadmconstants.KubeletRunDirectory, kubeadmconstants.KubeletEnvFileName)
if _, err := os.Stat(envFilePath); os.IsNotExist(err) {
// Write env file with flags for the kubelet to use. We do not need to write the --register-with-taints for the master,
// as we handle that ourselves in the markmaster phase
// TODO: Maybe we want to do that some time in the future, in order to remove some logic from the markmaster phase?
// Write env file with flags for the kubelet to use. We do not need to write the --register-with-taints for the control-plane,
// as we handle that ourselves in the mark-control-plane phase
// TODO: Maybe we want to do that some time in the future, in order to remove some logic from the mark-control-plane phase?
if err := kubeletphase.WriteKubeletDynamicEnvFile(&cfg.ClusterConfiguration, &cfg.NodeRegistration, false, kubeletDir); err != nil {
errs = append(errs, errors.Wrap(err, "error writing a dynamic environment file for the kubelet"))
}

View File

@ -42,7 +42,7 @@ type Prepuller interface {
DeleteFunc(string) error
}
// DaemonSetPrepuller makes sure the control-plane images are available on all masters
// DaemonSetPrepuller makes sure the control-plane images are available on all control-planes
type DaemonSetPrepuller struct {
client clientset.Interface
cfg *kubeadmapi.ClusterConfiguration

View File

@ -120,11 +120,11 @@ func TestUploadConfiguration(t *testing.T) {
}
}
if tt.verifyResult {
masterCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeadmConfigConfigMap, metav1.GetOptions{})
controlPlaneCfg, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(kubeadmconstants.KubeadmConfigConfigMap, metav1.GetOptions{})
if err != nil {
t2.Fatalf("Fail to query ConfigMap error = %v", err)
}
configData := masterCfg.Data[kubeadmconstants.ClusterConfigurationConfigMapKey]
configData := controlPlaneCfg.Data[kubeadmconstants.ClusterConfigurationConfigMapKey]
if configData == "" {
t2.Fatal("Fail to find ClusterConfigurationConfigMapKey key")
}
@ -138,7 +138,7 @@ func TestUploadConfiguration(t *testing.T) {
t2.Errorf("the initial and decoded ClusterConfiguration didn't match")
}
statusData := masterCfg.Data[kubeadmconstants.ClusterStatusConfigMapKey]
statusData := controlPlaneCfg.Data[kubeadmconstants.ClusterStatusConfigMapKey]
if statusData == "" {
t2.Fatal("failed to find ClusterStatusConfigMapKey key")
}