Merge pull request #56513 from luxas/kubeadm_v19_nits

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

kubeadm: Fix bugs in the codebase related to upgrades/downgrades

**What this PR does / why we need it**:

Fixes bugs related to the upgrade / downgrade paths I found in the codebase
Hides the HighAvailability flag from help text as that feature didn't make it in fully
Fixes some small things in defaulting and the config JSON schema
Fixes a bug when cloud-config is referenced but not mounted into the static pod

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note
NONE
```
@kubernetes/sig-cluster-lifecycle-pr-reviews
pull/6/head
Kubernetes Submit Queue 2017-12-01 21:46:57 -08:00 committed by GitHub
commit 8c1ee761d2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 165 additions and 89 deletions

View File

@ -156,12 +156,9 @@ func SetDefaults_NodeConfiguration(obj *NodeConfiguration) {
} }
} }
// SetDefaultsEtcdSelfHosted sets defaults for self-hosted etcd // SetDefaultsEtcdSelfHosted sets defaults for self-hosted etcd if used
func SetDefaultsEtcdSelfHosted(obj *MasterConfiguration) { func SetDefaultsEtcdSelfHosted(obj *MasterConfiguration) {
if obj.Etcd.SelfHosted == nil { if obj.Etcd.SelfHosted != nil {
obj.Etcd.SelfHosted = &SelfHostedEtcd{}
}
if obj.Etcd.SelfHosted.ClusterServiceName == "" { if obj.Etcd.SelfHosted.ClusterServiceName == "" {
obj.Etcd.SelfHosted.ClusterServiceName = DefaultEtcdClusterServiceName obj.Etcd.SelfHosted.ClusterServiceName = DefaultEtcdClusterServiceName
} }
@ -178,6 +175,7 @@ func SetDefaultsEtcdSelfHosted(obj *MasterConfiguration) {
obj.Etcd.SelfHosted.CertificatesDir = DefaultEtcdCertDir obj.Etcd.SelfHosted.CertificatesDir = DefaultEtcdCertDir
} }
} }
}
// SetDefaults_KubeletConfiguration assigns default values to kubelet // SetDefaults_KubeletConfiguration assigns default values to kubelet
func SetDefaults_KubeletConfiguration(obj *MasterConfiguration) { func SetDefaults_KubeletConfiguration(obj *MasterConfiguration) {

View File

@ -147,7 +147,7 @@ type NodeConfiguration struct {
// KubeletConfiguration contains elements describing initial remote configuration of kubelet // KubeletConfiguration contains elements describing initial remote configuration of kubelet
type KubeletConfiguration struct { type KubeletConfiguration struct {
BaseConfig *kubeletconfigv1alpha1.KubeletConfiguration `json:"baseConfig"` BaseConfig *kubeletconfigv1alpha1.KubeletConfiguration `json:"baseConfig,omitempty"`
} }
// HostPathMount contains elements describing volumes that are mounted from the // HostPathMount contains elements describing volumes that are mounted from the

View File

@ -433,7 +433,7 @@ func (i *Init) Run(out io.Writer) error {
// Temporary control plane is up, now we create our self hosted control // Temporary control plane is up, now we create our self hosted control
// plane components and remove the static manifests: // plane components and remove the static manifests:
fmt.Println("[self-hosted] Creating self-hosted control plane.") fmt.Println("[self-hosted] Creating self-hosted control plane.")
if err := selfhostingphase.CreateSelfHostedControlPlane(manifestDir, kubeConfigDir, i.cfg, client, waiter); err != nil { if err := selfhostingphase.CreateSelfHostedControlPlane(manifestDir, kubeConfigDir, i.cfg, client, waiter, i.dryRun); err != nil {
return fmt.Errorf("error creating self hosted control plane: %v", err) return fmt.Errorf("error creating self hosted control plane: %v", err)
} }
} }

View File

@ -103,7 +103,7 @@ func getSelfhostingSubCommand() *cobra.Command {
// Converts the Static Pod-hosted control plane into a self-hosted one // Converts the Static Pod-hosted control plane into a self-hosted one
waiter := apiclient.NewKubeWaiter(client, 2*time.Minute, os.Stdout) waiter := apiclient.NewKubeWaiter(client, 2*time.Minute, os.Stdout)
err = selfhosting.CreateSelfHostedControlPlane(constants.GetStaticPodDirectory(), constants.KubernetesDir, internalcfg, client, waiter) err = selfhosting.CreateSelfHostedControlPlane(constants.GetStaticPodDirectory(), constants.KubernetesDir, internalcfg, client, waiter, false)
kubeadmutil.CheckErr(err) kubeadmutil.CheckErr(err)
}, },
} }

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
"k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane" "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade" "k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
@ -119,14 +120,11 @@ func NewCmdApply(parentFlags *cmdUpgradeFlags) *cobra.Command {
func RunApply(flags *applyFlags) error { func RunApply(flags *applyFlags) error {
// Start with the basics, verify that the cluster is healthy and get the configuration from the cluster (using the ConfigMap) // Start with the basics, verify that the cluster is healthy and get the configuration from the cluster (using the ConfigMap)
upgradeVars, err := enforceRequirements(flags.parent.featureGatesString, flags.parent.kubeConfigPath, flags.parent.cfgPath, flags.parent.printConfig, flags.dryRun, flags.parent.ignorePreflightErrorsSet) upgradeVars, err := enforceRequirements(flags.parent, flags.dryRun, flags.newK8sVersionStr)
if err != nil { if err != nil {
return err return err
} }
// Set the upgraded version on the external config object now
upgradeVars.cfg.KubernetesVersion = flags.newK8sVersionStr
// Grab the external, versioned configuration and convert it to the internal type for usage here later // Grab the external, versioned configuration and convert it to the internal type for usage here later
internalcfg := &kubeadmapi.MasterConfiguration{} internalcfg := &kubeadmapi.MasterConfiguration{}
legacyscheme.Scheme.Convert(upgradeVars.cfg, internalcfg, nil) legacyscheme.Scheme.Convert(upgradeVars.cfg, internalcfg, nil)
@ -144,6 +142,10 @@ func RunApply(flags *applyFlags) error {
} }
flags.newK8sVersion = k8sVer flags.newK8sVersion = k8sVer
if err := features.ValidateVersion(features.InitFeatureGates, internalcfg.FeatureGates, internalcfg.KubernetesVersion); err != nil {
return err
}
// Enforce the version skew policies // Enforce the version skew policies
if err := EnforceVersionPolicies(flags, upgradeVars.versionGetter); err != nil { if err := EnforceVersionPolicies(flags, upgradeVars.versionGetter); err != nil {
return fmt.Errorf("[upgrade/version] FATAL: %v", err) return fmt.Errorf("[upgrade/version] FATAL: %v", err)
@ -167,7 +169,7 @@ func RunApply(flags *applyFlags) error {
} }
// Upgrade RBAC rules and addons. // Upgrade RBAC rules and addons.
if err := upgrade.PerformPostUpgradeTasks(upgradeVars.client, internalcfg, flags.newK8sVersion); err != nil { if err := upgrade.PerformPostUpgradeTasks(upgradeVars.client, internalcfg, flags.newK8sVersion, flags.dryRun); err != nil {
return fmt.Errorf("[upgrade/postupgrade] FATAL post-upgrade error: %v", err) return fmt.Errorf("[upgrade/postupgrade] FATAL post-upgrade error: %v", err)
} }

View File

@ -48,35 +48,37 @@ type upgradeVariables struct {
} }
// enforceRequirements verifies that it's okay to upgrade and then returns the variables needed for the rest of the procedure // enforceRequirements verifies that it's okay to upgrade and then returns the variables needed for the rest of the procedure
func enforceRequirements(featureGatesString, kubeConfigPath, cfgPath string, printConfig, dryRun bool, ignoreChecksErrors sets.String) (*upgradeVariables, error) { func enforceRequirements(flags *cmdUpgradeFlags, dryRun bool, newK8sVersion string) (*upgradeVariables, error) {
client, err := getClient(kubeConfigPath, dryRun) client, err := getClient(flags.kubeConfigPath, dryRun)
if err != nil { if err != nil {
return nil, fmt.Errorf("couldn't create a Kubernetes client from file %q: %v", kubeConfigPath, err) return nil, fmt.Errorf("couldn't create a Kubernetes client from file %q: %v", flags.kubeConfigPath, err)
} }
// Run healthchecks against the cluster // Run healthchecks against the cluster
if err := upgrade.CheckClusterHealth(client, ignoreChecksErrors); err != nil { if err := upgrade.CheckClusterHealth(client, flags.ignorePreflightErrorsSet); err != nil {
return nil, fmt.Errorf("[upgrade/health] FATAL: %v", err) return nil, fmt.Errorf("[upgrade/health] FATAL: %v", err)
} }
// Fetch the configuration from a file or ConfigMap and validate it // Fetch the configuration from a file or ConfigMap and validate it
cfg, err := upgrade.FetchConfiguration(client, os.Stdout, cfgPath) cfg, err := upgrade.FetchConfiguration(client, os.Stdout, flags.cfgPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("[upgrade/config] FATAL: %v", err) return nil, fmt.Errorf("[upgrade/config] FATAL: %v", err)
} }
// If a new k8s version should be set, apply the change before printing the config
if len(newK8sVersion) != 0 {
cfg.KubernetesVersion = newK8sVersion
}
// If the user told us to print this information out; do it! // If the user told us to print this information out; do it!
if printConfig { if flags.printConfig {
printConfiguration(cfg, os.Stdout) printConfiguration(cfg, os.Stdout)
} }
cfg.FeatureGates, err = features.NewFeatureGate(&features.InitFeatureGates, featureGatesString) cfg.FeatureGates, err = features.NewFeatureGate(&features.InitFeatureGates, flags.featureGatesString)
if err != nil { if err != nil {
return nil, fmt.Errorf("[upgrade/config] FATAL: %v", err) return nil, fmt.Errorf("[upgrade/config] FATAL: %v", err)
} }
if err := features.ValidateVersion(features.InitFeatureGates, cfg.FeatureGates, cfg.KubernetesVersion); err != nil {
return nil, err
}
return &upgradeVariables{ return &upgradeVariables{
client: client, client: client,

View File

@ -52,8 +52,7 @@ func TestPrintConfiguration(t *testing.T) {
keyFile: "" keyFile: ""
imageRepository: "" imageRepository: ""
kubeProxy: {} kubeProxy: {}
kubeletConfiguration: kubeletConfiguration: {}
baseConfig: null
kubernetesVersion: v1.7.1 kubernetesVersion: v1.7.1
networking: networking:
dnsDomain: "" dnsDomain: ""
@ -86,8 +85,7 @@ func TestPrintConfiguration(t *testing.T) {
keyFile: "" keyFile: ""
imageRepository: "" imageRepository: ""
kubeProxy: {} kubeProxy: {}
kubeletConfiguration: kubeletConfiguration: {}
baseConfig: null
kubernetesVersion: v1.7.1 kubernetesVersion: v1.7.1
networking: networking:
dnsDomain: "" dnsDomain: ""
@ -130,8 +128,7 @@ func TestPrintConfiguration(t *testing.T) {
operatorVersion: v0.1.0 operatorVersion: v0.1.0
imageRepository: "" imageRepository: ""
kubeProxy: {} kubeProxy: {}
kubeletConfiguration: kubeletConfiguration: {}
baseConfig: null
kubernetesVersion: v1.7.1 kubernetesVersion: v1.7.1
networking: networking:
dnsDomain: "" dnsDomain: ""

View File

@ -54,8 +54,8 @@ func NewCmdPlan(parentFlags *cmdUpgradeFlags) *cobra.Command {
// RunPlan takes care of outputting available versions to upgrade to for the user // RunPlan takes care of outputting available versions to upgrade to for the user
func RunPlan(parentFlags *cmdUpgradeFlags) error { func RunPlan(parentFlags *cmdUpgradeFlags) error {
// Start with the basics, verify that the cluster is healthy, build a client and a versionGetter. Never set dry-run for plan. // Start with the basics, verify that the cluster is healthy, build a client and a versionGetter. Never dry-run when planning.
upgradeVars, err := enforceRequirements(parentFlags.featureGatesString, parentFlags.kubeConfigPath, parentFlags.cfgPath, parentFlags.printConfig, false, parentFlags.ignorePreflightErrorsSet) upgradeVars, err := enforceRequirements(parentFlags, false, "")
if err != nil { if err != nil {
return err return err
} }

View File

@ -33,10 +33,10 @@ const (
// CoreDNS is alpha in v1.9 // CoreDNS is alpha in v1.9
CoreDNS = "CoreDNS" CoreDNS = "CoreDNS"
// SelfHosting is beta in v1.8 // SelfHosting is beta in v1.9
SelfHosting = "SelfHosting" SelfHosting = "SelfHosting"
// StoreCertsInSecrets is alpha in v1.8 // StoreCertsInSecrets is alpha in v1.8 and v1.9
StoreCertsInSecrets = "StoreCertsInSecrets" StoreCertsInSecrets = "StoreCertsInSecrets"
// DynamicKubeletConfig is alpha in v1.9 // DynamicKubeletConfig is alpha in v1.9
@ -49,7 +49,8 @@ var v190 = version.MustParseSemantic("v1.9.0-alpha.1")
var InitFeatureGates = FeatureList{ var InitFeatureGates = FeatureList{
SelfHosting: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Beta}}, SelfHosting: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Beta}},
StoreCertsInSecrets: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}}, StoreCertsInSecrets: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}},
HighAvailability: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190}, // We don't want to advertise this feature gate exists in v1.9 to avoid confusion as it is not yet working
HighAvailability: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190, HiddenInHelpText: true},
CoreDNS: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190}, CoreDNS: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190},
DynamicKubeletConfig: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190}, DynamicKubeletConfig: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190},
} }
@ -58,6 +59,7 @@ var InitFeatureGates = FeatureList{
type Feature struct { type Feature struct {
utilfeature.FeatureSpec utilfeature.FeatureSpec
MinimumVersion *version.Version MinimumVersion *version.Version
HiddenInHelpText bool
} }
// FeatureList represents a list of feature gates // FeatureList represents a list of feature gates
@ -113,6 +115,10 @@ func Keys(featureList FeatureList) []string {
func KnownFeatures(f *FeatureList) []string { func KnownFeatures(f *FeatureList) []string {
var known []string var known []string
for k, v := range *f { for k, v := range *f {
if v.HiddenInHelpText {
continue
}
pre := "" pre := ""
if v.PreRelease != utilfeature.GA { if v.PreRelease != utilfeature.GA {
pre = fmt.Sprintf("%s - ", v.PreRelease) pre = fmt.Sprintf("%s - ", v.PreRelease)

View File

@ -43,7 +43,7 @@ import (
// CreateBaseKubeletConfiguration creates base kubelet configuration for dynamic kubelet configuration feature. // CreateBaseKubeletConfiguration creates base kubelet configuration for dynamic kubelet configuration feature.
func CreateBaseKubeletConfiguration(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error { func CreateBaseKubeletConfiguration(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
fmt.Printf("[kubelet] Uploading a ConfigMap %q in namespace %s with base configuration for the kubelets in the cluster", fmt.Printf("[kubelet] Uploading a ConfigMap %q in namespace %s with base configuration for the kubelets in the cluster\n",
kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem) kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem)
_, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs() _, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs()
@ -95,7 +95,7 @@ func ConsumeBaseKubeletConfiguration(nodeName string) error {
// updateNodeWithConfigMap updates node ConfigSource with KubeletBaseConfigurationConfigMap // updateNodeWithConfigMap updates node ConfigSource with KubeletBaseConfigurationConfigMap
func updateNodeWithConfigMap(client clientset.Interface, nodeName string) error { func updateNodeWithConfigMap(client clientset.Interface, nodeName string) error {
fmt.Printf("[kubelet] Using Dynamic Kubelet Config for node %q; config sourced from ConfigMap %q in namespace %s", fmt.Printf("[kubelet] Using Dynamic Kubelet Config for node %q; config sourced from ConfigMap %q in namespace %s\n",
nodeName, kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem) nodeName, kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem)
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned. // Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
@ -203,7 +203,7 @@ func getLocalNodeTLSBootstrappedClient() (clientset.Interface, error) {
// WriteInitKubeletConfigToDiskOnMaster writes base kubelet configuration to disk on master. // WriteInitKubeletConfigToDiskOnMaster writes base kubelet configuration to disk on master.
func WriteInitKubeletConfigToDiskOnMaster(cfg *kubeadmapi.MasterConfiguration) error { func WriteInitKubeletConfigToDiskOnMaster(cfg *kubeadmapi.MasterConfiguration) error {
fmt.Printf("[kubelet] Writing base configuration of kubelets to disk on master node %s", cfg.NodeName) fmt.Printf("[kubelet] Writing base configuration of kubelets to disk on master node %s\n", cfg.NodeName)
_, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs() _, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs()
if err != nil { if err != nil {

View File

@ -52,7 +52,7 @@ const (
// 8. In order to avoid race conditions, we have to make sure that static pod is deleted correctly before we continue // 8. In order to avoid race conditions, we have to make sure that static pod is deleted correctly before we continue
// Otherwise, there is a race condition when we proceed without kubelet having restarted the API server correctly and the next .Create call flakes // Otherwise, there is a race condition when we proceed without kubelet having restarted the API server correctly and the next .Create call flakes
// 9. Do that for the kube-apiserver, kube-controller-manager and kube-scheduler in a loop // 9. Do that for the kube-apiserver, kube-controller-manager and kube-scheduler in a loop
func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, waiter apiclient.Waiter) error { func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, waiter apiclient.Waiter, dryRun bool) error {
// Adjust the timeout slightly to something self-hosting specific // Adjust the timeout slightly to something self-hosting specific
waiter.SetTimeout(selfHostingWaitTimeout) waiter.SetTimeout(selfHostingWaitTimeout)
@ -104,10 +104,12 @@ func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubea
return err return err
} }
// Remove the old Static Pod manifest // Remove the old Static Pod manifest if not dryrunning
if !dryRun {
if err := os.RemoveAll(manifestPath); err != nil { if err := os.RemoveAll(manifestPath); err != nil {
return fmt.Errorf("unable to delete static pod manifest for %s [%v]", componentName, err) return fmt.Errorf("unable to delete static pod manifest for %s [%v]", componentName, err)
} }
}
// Wait for the mirror Pod hash to be removed; otherwise we'll run into race conditions here when the kubelet hasn't had time to // Wait for the mirror Pod hash to be removed; otherwise we'll run into race conditions here when the kubelet hasn't had time to
// remove the Static Pod (or the mirror Pod respectively). This implicitely also tests that the API server endpoint is healthy, // remove the Static Pod (or the mirror Pod respectively). This implicitely also tests that the API server endpoint is healthy,

View File

@ -36,6 +36,7 @@ go_library(
"//cmd/kubeadm/app/util:go_default_library", "//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library",
"//cmd/kubeadm/app/util/config:go_default_library", "//cmd/kubeadm/app/util/config:go_default_library",
"//cmd/kubeadm/app/util/dryrun:go_default_library",
"//pkg/api/legacyscheme:go_default_library", "//pkg/api/legacyscheme:go_default_library",
"//pkg/util/version:go_default_library", "//pkg/util/version:go_default_library",
"//pkg/version:go_default_library", "//pkg/version:go_default_library",

View File

@ -28,6 +28,9 @@ const (
// MaximumAllowedMinorVersionUpgradeSkew describes how many minor versions kubeadm can upgrade the control plane version in one go // MaximumAllowedMinorVersionUpgradeSkew describes how many minor versions kubeadm can upgrade the control plane version in one go
MaximumAllowedMinorVersionUpgradeSkew = 1 MaximumAllowedMinorVersionUpgradeSkew = 1
// MaximumAllowedMinorVersionDowngradeSkew describes how many minor versions kubeadm can upgrade the control plane version in one go
MaximumAllowedMinorVersionDowngradeSkew = 1
// MaximumAllowedMinorVersionKubeletSkew describes how many minor versions the control plane version and the kubelet can skew in a kubeadm cluster // MaximumAllowedMinorVersionKubeletSkew describes how many minor versions the control plane version and the kubelet can skew in a kubeadm cluster
MaximumAllowedMinorVersionKubeletSkew = 1 MaximumAllowedMinorVersionKubeletSkew = 1
) )
@ -72,23 +75,41 @@ func EnforceVersionPolicies(versionGetter VersionGetter, newK8sVersionStr string
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Specified version to upgrade to %q is equal to or lower than the minimum supported version %q. Please specify a higher version to upgrade to", newK8sVersionStr, clusterVersionStr)) skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Specified version to upgrade to %q is equal to or lower than the minimum supported version %q. Please specify a higher version to upgrade to", newK8sVersionStr, clusterVersionStr))
} }
// Make sure new version is higher than the current Kubernetes version // kubeadm doesn't support upgrades between two minor versions; e.g. a v1.7 -> v1.9 upgrade is not supported right away
if clusterVersion.AtLeast(newK8sVersion) {
// Even though we don't officially support downgrades, it "should work", and if user(s) need it and are willing to try; they can do so with --force
skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Specified version to upgrade to %q is equal to or lower than the cluster version %q. Downgrades are not supported yet", newK8sVersionStr, clusterVersionStr))
} else {
// If this code path runs, it's an upgrade (this code will run most of the time)
// kubeadm doesn't support upgrades between two minor versions; e.g. a v1.7 -> v1.9 upgrade is not supported. Enforce that here
if newK8sVersion.Minor() > clusterVersion.Minor()+MaximumAllowedMinorVersionUpgradeSkew { if newK8sVersion.Minor() > clusterVersion.Minor()+MaximumAllowedMinorVersionUpgradeSkew {
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Specified version to upgrade to %q is too high; kubeadm can upgrade only %d minor version at a time", newK8sVersionStr, MaximumAllowedMinorVersionUpgradeSkew)) tooLargeUpgradeSkewErr := fmt.Errorf("Specified version to upgrade to %q is too high; kubeadm can upgrade only %d minor version at a time", newK8sVersionStr, MaximumAllowedMinorVersionUpgradeSkew)
// If the version that we're about to upgrade to is a released version, we should fully enforce this policy
// If the version is a CI/dev/experimental version, it's okay to jump two minor version steps, but then require the -f flag
if len(newK8sVersion.PreRelease()) == 0 {
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeUpgradeSkewErr)
} else {
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeUpgradeSkewErr)
}
}
// kubeadm doesn't support downgrades between two minor versions; e.g. a v1.9 -> v1.7 downgrade is not supported right away
if newK8sVersion.Minor() < clusterVersion.Minor()-MaximumAllowedMinorVersionDowngradeSkew {
tooLargeDowngradeSkewErr := fmt.Errorf("Specified version to downgrade to %q is too low; kubeadm can downgrade only %d minor version at a time", newK8sVersionStr, MaximumAllowedMinorVersionDowngradeSkew)
// If the version that we're about to downgrade to is a released version, we should fully enforce this policy
// If the version is a CI/dev/experimental version, it's okay to jump two minor version steps, but then require the -f flag
if len(newK8sVersion.PreRelease()) == 0 {
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeDowngradeSkewErr)
} else {
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeDowngradeSkewErr)
} }
} }
// If the kubeadm version is lower than what we want to upgrade to; error // If the kubeadm version is lower than what we want to upgrade to; error
if kubeadmVersion.LessThan(newK8sVersion) { if kubeadmVersion.LessThan(newK8sVersion) {
if newK8sVersion.Minor() > kubeadmVersion.Minor() { if newK8sVersion.Minor() > kubeadmVersion.Minor() {
// This is totally unsupported; kubeadm has no idea how it should handle a newer minor release than itself tooLargeKubeadmSkew := fmt.Errorf("Specified version to upgrade to %q is at least one minor release higher than the kubeadm minor release (%d > %d). Such an upgrade is not supported", newK8sVersionStr, newK8sVersion.Minor(), kubeadmVersion.Minor())
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Specified version to upgrade to %q is one minor release higher than the kubeadm minor release (%d > %d). Such an upgrade is not supported", newK8sVersionStr, newK8sVersion.Minor(), kubeadmVersion.Minor())) // This is unsupported; kubeadm has no idea how it should handle a newer minor release than itself
// If the version is a CI/dev/experimental version though, lower the severity of this check, but then require the -f flag
if len(newK8sVersion.PreRelease()) == 0 {
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeKubeadmSkew)
} else {
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeKubeadmSkew)
}
} else { } else {
// Upgrading to a higher patch version than kubeadm is ok if the user specifies --force. Not recommended, but possible. // Upgrading to a higher patch version than kubeadm is ok if the user specifies --force. Not recommended, but possible.
skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Specified version to upgrade to %q is higher than the kubeadm version %q. Upgrade kubeadm first using the tool you used to install kubeadm", newK8sVersionStr, kubeadmVersionStr)) skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Specified version to upgrade to %q is higher than the kubeadm version %q. Upgrade kubeadm first using the tool you used to install kubeadm", newK8sVersionStr, kubeadmVersionStr))

View File

@ -46,23 +46,21 @@ func TestEnforceVersionPolicies(t *testing.T) {
}, },
newK8sVersion: "v1.9.0", newK8sVersion: "v1.9.0",
}, },
{ // downgrades not supported { // downgrades ok
vg: &fakeVersionGetter{ vg: &fakeVersionGetter{
clusterVersion: "v1.8.3", clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3", kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3", kubeadmVersion: "v1.8.3",
}, },
newK8sVersion: "v1.8.2", newK8sVersion: "v1.8.2",
expectedSkippableErrs: 1,
}, },
{ // upgrades without bumping the version number not supported yet. TODO: Change this? { // upgrades without bumping the version number ok
vg: &fakeVersionGetter{ vg: &fakeVersionGetter{
clusterVersion: "v1.8.3", clusterVersion: "v1.8.3",
kubeletVersion: "v1.8.3", kubeletVersion: "v1.8.3",
kubeadmVersion: "v1.8.3", kubeadmVersion: "v1.8.3",
}, },
newK8sVersion: "v1.8.3", newK8sVersion: "v1.8.3",
expectedSkippableErrs: 1,
}, },
{ // new version must be higher than v1.8.0 { // new version must be higher than v1.8.0
vg: &fakeVersionGetter{ vg: &fakeVersionGetter{
@ -72,7 +70,6 @@ func TestEnforceVersionPolicies(t *testing.T) {
}, },
newK8sVersion: "v1.7.10", newK8sVersion: "v1.7.10",
expectedMandatoryErrs: 1, // version must be higher than v1.8.0 expectedMandatoryErrs: 1, // version must be higher than v1.8.0
expectedSkippableErrs: 1, // version shouldn't be downgraded
}, },
{ // upgrading two minor versions in one go is not supported { // upgrading two minor versions in one go is not supported
vg: &fakeVersionGetter{ vg: &fakeVersionGetter{
@ -84,6 +81,15 @@ func TestEnforceVersionPolicies(t *testing.T) {
expectedMandatoryErrs: 1, // can't upgrade two minor versions expectedMandatoryErrs: 1, // can't upgrade two minor versions
expectedSkippableErrs: 1, // kubelet <-> apiserver skew too large expectedSkippableErrs: 1, // kubelet <-> apiserver skew too large
}, },
{ // downgrading two minor versions in one go is not supported
vg: &fakeVersionGetter{
clusterVersion: "v1.10.3",
kubeletVersion: "v1.10.3",
kubeadmVersion: "v1.10.0",
},
newK8sVersion: "v1.8.3",
expectedMandatoryErrs: 1, // can't downgrade two minor versions
},
{ // kubeadm version must be higher than the new kube version. However, patch version skews may be forced { // kubeadm version must be higher than the new kube version. However, patch version skews may be forced
vg: &fakeVersionGetter{ vg: &fakeVersionGetter{
clusterVersion: "v1.8.3", clusterVersion: "v1.8.3",

View File

@ -18,6 +18,8 @@ package upgrade
import ( import (
"fmt" "fmt"
"os"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/errors"
@ -31,14 +33,16 @@ import (
"k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo" "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo"
nodebootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node" nodebootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig" "k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun"
"k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/pkg/util/version"
) )
// PerformPostUpgradeTasks runs nearly the same functions as 'kubeadm init' would do // PerformPostUpgradeTasks runs nearly the same functions as 'kubeadm init' would do
// Note that the markmaster phase is left out, not needed, and no token is created as that doesn't belong to the upgrade // Note that the markmaster phase is left out, not needed, and no token is created as that doesn't belong to the upgrade
func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version) error { func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version, dryRun bool) error {
errs := []error{} errs := []error{}
// Upload currently used configuration to the cluster // Upload currently used configuration to the cluster
@ -63,6 +67,11 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
errs = append(errs, err) errs = append(errs, err)
} }
// Upgrade to a self-hosted control plane if possible
if err := upgradeToSelfHosting(client, cfg, newK8sVer, dryRun); err != nil {
errs = append(errs, err)
}
// TODO: Is this needed to do here? I think that updating cluster info should probably be separate from a normal upgrade // TODO: Is this needed to do here? I think that updating cluster info should probably be separate from a normal upgrade
// Create the cluster-info ConfigMap with the associated RBAC rules // Create the cluster-info ConfigMap with the associated RBAC rules
// if err := clusterinfo.CreateBootstrapConfigMapIfNotExists(client, kubeadmconstants.GetAdminKubeConfigPath()); err != nil { // if err := clusterinfo.CreateBootstrapConfigMapIfNotExists(client, kubeadmconstants.GetAdminKubeConfigPath()); err != nil {
@ -92,10 +101,12 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
if err := dns.EnsureDNSAddon(cfg, client); err != nil { if err := dns.EnsureDNSAddon(cfg, client); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
// Remove the old kube-dns deployment if coredns is now used
if err := coreDNSDeployment(cfg, client); err != nil { if !dryRun {
if err := removeOldKubeDNSDeploymentIfCoreDNSIsUsed(cfg, client); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
}
if err := proxy.EnsureProxyAddon(cfg, client); err != nil { if err := proxy.EnsureProxyAddon(cfg, client); err != nil {
errs = append(errs, err) errs = append(errs, err)
@ -103,22 +114,41 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
return errors.NewAggregate(errs) return errors.NewAggregate(errs)
} }
func coreDNSDeployment(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error { func removeOldKubeDNSDeploymentIfCoreDNSIsUsed(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
if features.Enabled(cfg.FeatureGates, features.CoreDNS) { if features.Enabled(cfg.FeatureGates, features.CoreDNS) {
return apiclient.TryRunCommand(func() error { return apiclient.TryRunCommand(func() error {
getCoreDNS, err := client.AppsV1beta2().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNS, metav1.GetOptions{}) coreDNSDeployment, err := client.AppsV1beta2().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNS, metav1.GetOptions{})
if err != nil { if err != nil {
return err return err
} }
if getCoreDNS.Status.ReadyReplicas == 0 { if coreDNSDeployment.Status.ReadyReplicas == 0 {
return fmt.Errorf("the CodeDNS deployment isn't ready yet") return fmt.Errorf("the CodeDNS deployment isn't ready yet")
} }
err = client.AppsV1beta2().Deployments(metav1.NamespaceSystem).Delete(kubeadmconstants.KubeDNS, nil) return apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, kubeadmconstants.KubeDNS)
if err != nil { }, 10)
return err
}
return nil
}, 5)
} }
return nil return nil
} }
func upgradeToSelfHosting(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version, dryRun bool) error {
if features.Enabled(cfg.FeatureGates, features.SelfHosting) && !IsControlPlaneSelfHosted(client) && newK8sVer.AtLeast(v190alpha3) {
waiter := getWaiter(dryRun, client)
// kubeadm will now convert the static Pod-hosted control plane into a self-hosted one
fmt.Println("[self-hosted] Creating self-hosted control plane.")
if err := selfhosting.CreateSelfHostedControlPlane(kubeadmconstants.GetStaticPodDirectory(), kubeadmconstants.KubernetesDir, cfg, client, waiter, dryRun); err != nil {
return fmt.Errorf("error creating self hosted control plane: %v", err)
}
}
return nil
}
// getWaiter gets the right waiter implementation for the right occasion
// TODO: Consolidate this with what's in init.go?
func getWaiter(dryRun bool, client clientset.Interface) apiclient.Waiter {
if dryRun {
return dryrunutil.NewWaiter()
}
return apiclient.NewKubeWaiter(client, 30*time.Minute, os.Stdout)
}

View File

@ -30,7 +30,9 @@ import (
"k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/pkg/util/version"
) )
// TODO: Maybe move these constants elsewhere in future releases
var v190 = version.MustParseSemantic("v1.9.0") var v190 = version.MustParseSemantic("v1.9.0")
var v190alpha3 = version.MustParseSemantic("v1.9.0-alpha.3")
var expiry = 180 * 24 * time.Hour var expiry = 180 * 24 * time.Hour
// backupAPIServerCertAndKey backups the old cert and key of kube-apiserver to a specified directory. // backupAPIServerCertAndKey backups the old cert and key of kube-apiserver to a specified directory.

View File

@ -107,6 +107,15 @@ func DeleteDaemonSetForeground(client clientset.Interface, namespace, name strin
return client.AppsV1beta2().DaemonSets(namespace).Delete(name, deleteOptions) return client.AppsV1beta2().DaemonSets(namespace).Delete(name, deleteOptions)
} }
// DeleteDeploymentForeground deletes the specified Deployment in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted
func DeleteDeploymentForeground(client clientset.Interface, namespace, name string) error {
foregroundDelete := metav1.DeletePropagationForeground
deleteOptions := &metav1.DeleteOptions{
PropagationPolicy: &foregroundDelete,
}
return client.AppsV1beta2().Deployments(namespace).Delete(name, deleteOptions)
}
// CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. // CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error { func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error {
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(role); err != nil { if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(role); err != nil {