mirror of https://github.com/k3s-io/k3s
Merge pull request #65151 from luxas/kubeadm_dryrun_bugs
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Fix kubeadm init/upgrade --dry-run mode **What this PR does / why we need it**: Split out from https://github.com/kubernetes/kubernetes/pull/65104. Fixes `kubeadm init --dry-run` and `kubeadm upgrade apply [version] --dry-run` **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes # **Special notes for your reviewer**: **Release note**: ```release-note NONE ``` @kubernetes/sig-cluster-lifecycle-pr-reviewspull/8/head
commit
410941b40c
|
@ -260,11 +260,15 @@ func NewInit(cfgPath string, externalcfg *kubeadmapiv1alpha2.MasterConfiguration
|
|||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Println("[preflight/images] Pulling images required for setting up a Kubernetes cluster")
|
||||
fmt.Println("[preflight/images] This might take a minute or two, depending on the speed of your internet connection")
|
||||
fmt.Println("[preflight/images] You can also perform this action in beforehand using 'kubeadm config images pull'")
|
||||
if err := preflight.RunPullImagesCheck(utilsexec.New(), cfg, ignorePreflightErrors); err != nil {
|
||||
return nil, err
|
||||
if !dryRun {
|
||||
fmt.Println("[preflight/images] Pulling images required for setting up a Kubernetes cluster")
|
||||
fmt.Println("[preflight/images] This might take a minute or two, depending on the speed of your internet connection")
|
||||
fmt.Println("[preflight/images] You can also perform this action in beforehand using 'kubeadm config images pull'")
|
||||
if err := preflight.RunPullImagesCheck(utilsexec.New(), cfg, ignorePreflightErrors); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
fmt.Println("[preflight/images] Would pull the required images (like 'kubeadm config images pull')")
|
||||
}
|
||||
|
||||
return &Init{cfg: cfg, skipTokenPrint: skipTokenPrint, dryRun: dryRun, ignorePreflightErrors: ignorePreflightErrors}, nil
|
||||
|
|
|
@ -292,6 +292,10 @@ func createDNSService(dnsService *v1.Service, serviceBytes []byte, client client
|
|||
// translateStubDomainOfKubeDNSToProxyCoreDNS translates StubDomain Data in kube-dns ConfigMap
|
||||
// in the form of Proxy for the CoreDNS Corefile.
|
||||
func translateStubDomainOfKubeDNSToProxyCoreDNS(dataField string, kubeDNSConfigMap *v1.ConfigMap) (string, error) {
|
||||
if kubeDNSConfigMap == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if proxy, ok := kubeDNSConfigMap.Data[dataField]; ok {
|
||||
stubDomainData := make(map[string][]string)
|
||||
err := json.Unmarshal([]byte(proxy), &stubDomainData)
|
||||
|
@ -328,6 +332,10 @@ func translateStubDomainOfKubeDNSToProxyCoreDNS(dataField string, kubeDNSConfigM
|
|||
// translateUpstreamNameServerOfKubeDNSToUpstreamProxyCoreDNS translates UpstreamNameServer Data in kube-dns ConfigMap
|
||||
// in the form of Proxy for the CoreDNS Corefile.
|
||||
func translateUpstreamNameServerOfKubeDNSToUpstreamProxyCoreDNS(dataField string, kubeDNSConfigMap *v1.ConfigMap) (string, error) {
|
||||
if kubeDNSConfigMap == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if upstreamValues, ok := kubeDNSConfigMap.Data[dataField]; ok {
|
||||
var upstreamProxyIP []string
|
||||
|
||||
|
@ -345,6 +353,10 @@ func translateUpstreamNameServerOfKubeDNSToUpstreamProxyCoreDNS(dataField string
|
|||
// translateFederationsofKubeDNSToCoreDNS translates Federations Data in kube-dns ConfigMap
|
||||
// to Federation for CoreDNS Corefile.
|
||||
func translateFederationsofKubeDNSToCoreDNS(dataField, coreDNSDomain string, kubeDNSConfigMap *v1.ConfigMap) (string, error) {
|
||||
if kubeDNSConfigMap == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if federation, ok := kubeDNSConfigMap.Data[dataField]; ok {
|
||||
var (
|
||||
federationStanza []interface{}
|
||||
|
|
|
@ -118,20 +118,9 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
|
|||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
certAndKeyDir := kubeadmapiv1alpha2.DefaultCertificatesDir
|
||||
shouldBackup, err := shouldBackupAPIServerCertAndKey(certAndKeyDir)
|
||||
// Don't fail the upgrade phase if failing to determine to backup kube-apiserver cert and key.
|
||||
if err != nil {
|
||||
fmt.Printf("[postupgrade] WARNING: failed to determine to backup kube-apiserver cert and key: %v", err)
|
||||
} else if shouldBackup {
|
||||
// TODO: Make sure this works in dry-run mode as well
|
||||
// Don't fail the upgrade phase if failing to backup kube-apiserver cert and key.
|
||||
if err := backupAPIServerCertAndKey(certAndKeyDir); err != nil {
|
||||
fmt.Printf("[postupgrade] WARNING: failed to backup kube-apiserver cert and key: %v", err)
|
||||
}
|
||||
if err := certsphase.CreateAPIServerCertAndKeyFiles(cfg); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// Rotate the kube-apiserver cert and key if needed
|
||||
if err := backupAPIServerCertIfNeeded(cfg, dryRun); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
// Upgrade kube-dns/CoreDNS and kube-proxy
|
||||
|
@ -139,10 +128,8 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
|
|||
errs = append(errs, err)
|
||||
}
|
||||
// Remove the old DNS deployment if a new DNS service is now used (kube-dns to CoreDNS or vice versa)
|
||||
if !dryRun { // TODO: Remove dryrun here and make it work
|
||||
if err := removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg, client); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if err := removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg, client, dryRun); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if err := proxy.EnsureProxyAddon(cfg, client); err != nil {
|
||||
|
@ -151,7 +138,7 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
|
|||
return errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
||||
func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, dryRun bool) error {
|
||||
return apiclient.TryRunCommand(func() error {
|
||||
installedDeploymentName := kubeadmconstants.KubeDNS
|
||||
deploymentToDelete := kubeadmconstants.CoreDNS
|
||||
|
@ -160,14 +147,21 @@ func removeOldDNSDeploymentIfAnotherDNSIsUsed(cfg *kubeadmapi.MasterConfiguratio
|
|||
installedDeploymentName = kubeadmconstants.CoreDNS
|
||||
deploymentToDelete = kubeadmconstants.KubeDNS
|
||||
}
|
||||
dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(installedDeploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
// If we're dry-running, we don't need to wait for the new DNS addon to become ready
|
||||
if !dryRun {
|
||||
dnsDeployment, err := client.AppsV1().Deployments(metav1.NamespaceSystem).Get(installedDeploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dnsDeployment.Status.ReadyReplicas == 0 {
|
||||
return fmt.Errorf("the DNS deployment isn't ready yet")
|
||||
}
|
||||
}
|
||||
if dnsDeployment.Status.ReadyReplicas == 0 {
|
||||
return fmt.Errorf("the DNS deployment isn't ready yet")
|
||||
}
|
||||
err = apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, deploymentToDelete)
|
||||
|
||||
// We don't want to wait for the DNS deployment above to become ready when dryrunning (as it never will)
|
||||
// but here we should execute the DELETE command against the dryrun clientset, as it will only be logged
|
||||
err := apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, deploymentToDelete)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -189,6 +183,32 @@ func upgradeToSelfHosting(client clientset.Interface, cfg *kubeadmapi.MasterConf
|
|||
return nil
|
||||
}
|
||||
|
||||
func backupAPIServerCertIfNeeded(cfg *kubeadmapi.MasterConfiguration, dryRun bool) error {
|
||||
certAndKeyDir := kubeadmapiv1alpha2.DefaultCertificatesDir
|
||||
shouldBackup, err := shouldBackupAPIServerCertAndKey(certAndKeyDir)
|
||||
if err != nil {
|
||||
// Don't fail the upgrade phase if failing to determine to backup kube-apiserver cert and key.
|
||||
return fmt.Errorf("[postupgrade] WARNING: failed to determine to backup kube-apiserver cert and key: %v", err)
|
||||
}
|
||||
|
||||
if !shouldBackup {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If dry-running, just say that this would happen to the user and exit
|
||||
if dryRun {
|
||||
fmt.Println("[postupgrade] Would rotate the API server certificate and key.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Don't fail the upgrade phase if failing to backup kube-apiserver cert and key, just continue rotating the cert
|
||||
// TODO: We might want to reconsider this choice.
|
||||
if err := backupAPIServerCertAndKey(certAndKeyDir); err != nil {
|
||||
fmt.Printf("[postupgrade] WARNING: failed to backup kube-apiserver cert and key: %v", err)
|
||||
}
|
||||
return certsphase.CreateAPIServerCertAndKeyFiles(cfg)
|
||||
}
|
||||
|
||||
// getWaiter gets the right waiter implementation for the right occasion
|
||||
// TODO: Consolidate this with what's in init.go?
|
||||
func getWaiter(dryRun bool, client clientset.Interface) apiclient.Waiter {
|
||||
|
|
Loading…
Reference in New Issue