mirror of https://github.com/k3s-io/k3s
kubeadm: Fix a couple of upgrade/downgrade-related bugs
parent
d0805881f5
commit
f7c494fe5b
|
@ -156,12 +156,9 @@ func SetDefaults_NodeConfiguration(obj *NodeConfiguration) {
|
|||
}
|
||||
}
|
||||
|
||||
// SetDefaultsEtcdSelfHosted sets defaults for self-hosted etcd
|
||||
// SetDefaultsEtcdSelfHosted sets defaults for self-hosted etcd if used
|
||||
func SetDefaultsEtcdSelfHosted(obj *MasterConfiguration) {
|
||||
if obj.Etcd.SelfHosted == nil {
|
||||
obj.Etcd.SelfHosted = &SelfHostedEtcd{}
|
||||
}
|
||||
|
||||
if obj.Etcd.SelfHosted != nil {
|
||||
if obj.Etcd.SelfHosted.ClusterServiceName == "" {
|
||||
obj.Etcd.SelfHosted.ClusterServiceName = DefaultEtcdClusterServiceName
|
||||
}
|
||||
|
@ -177,6 +174,7 @@ func SetDefaultsEtcdSelfHosted(obj *MasterConfiguration) {
|
|||
if obj.Etcd.SelfHosted.CertificatesDir == "" {
|
||||
obj.Etcd.SelfHosted.CertificatesDir = DefaultEtcdCertDir
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetDefaults_KubeletConfiguration assigns default values to kubelet
|
||||
|
|
|
@ -147,7 +147,7 @@ type NodeConfiguration struct {
|
|||
|
||||
// KubeletConfiguration contains elements describing initial remote configuration of kubelet
|
||||
type KubeletConfiguration struct {
|
||||
BaseConfig *kubeletconfigv1alpha1.KubeletConfiguration `json:"baseConfig"`
|
||||
BaseConfig *kubeletconfigv1alpha1.KubeletConfiguration `json:"baseConfig,omitempty"`
|
||||
}
|
||||
|
||||
// HostPathMount contains elements describing volumes that are mounted from the
|
||||
|
|
|
@ -433,7 +433,7 @@ func (i *Init) Run(out io.Writer) error {
|
|||
// Temporary control plane is up, now we create our self hosted control
|
||||
// plane components and remove the static manifests:
|
||||
fmt.Println("[self-hosted] Creating self-hosted control plane.")
|
||||
if err := selfhostingphase.CreateSelfHostedControlPlane(manifestDir, kubeConfigDir, i.cfg, client, waiter); err != nil {
|
||||
if err := selfhostingphase.CreateSelfHostedControlPlane(manifestDir, kubeConfigDir, i.cfg, client, waiter, i.dryRun); err != nil {
|
||||
return fmt.Errorf("error creating self hosted control plane: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ func getSelfhostingSubCommand() *cobra.Command {
|
|||
|
||||
// Converts the Static Pod-hosted control plane into a self-hosted one
|
||||
waiter := apiclient.NewKubeWaiter(client, 2*time.Minute, os.Stdout)
|
||||
err = selfhosting.CreateSelfHostedControlPlane(constants.GetStaticPodDirectory(), constants.KubernetesDir, internalcfg, client, waiter)
|
||||
err = selfhosting.CreateSelfHostedControlPlane(constants.GetStaticPodDirectory(), constants.KubernetesDir, internalcfg, client, waiter, false)
|
||||
kubeadmutil.CheckErr(err)
|
||||
},
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
|
||||
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/features"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
|
@ -119,14 +120,11 @@ func NewCmdApply(parentFlags *cmdUpgradeFlags) *cobra.Command {
|
|||
func RunApply(flags *applyFlags) error {
|
||||
|
||||
// Start with the basics, verify that the cluster is healthy and get the configuration from the cluster (using the ConfigMap)
|
||||
upgradeVars, err := enforceRequirements(flags.parent.featureGatesString, flags.parent.kubeConfigPath, flags.parent.cfgPath, flags.parent.printConfig, flags.dryRun, flags.parent.ignorePreflightErrorsSet)
|
||||
upgradeVars, err := enforceRequirements(flags.parent, flags.dryRun, flags.newK8sVersionStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the upgraded version on the external config object now
|
||||
upgradeVars.cfg.KubernetesVersion = flags.newK8sVersionStr
|
||||
|
||||
// Grab the external, versioned configuration and convert it to the internal type for usage here later
|
||||
internalcfg := &kubeadmapi.MasterConfiguration{}
|
||||
legacyscheme.Scheme.Convert(upgradeVars.cfg, internalcfg, nil)
|
||||
|
@ -144,6 +142,10 @@ func RunApply(flags *applyFlags) error {
|
|||
}
|
||||
flags.newK8sVersion = k8sVer
|
||||
|
||||
if err := features.ValidateVersion(features.InitFeatureGates, internalcfg.FeatureGates, internalcfg.KubernetesVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Enforce the version skew policies
|
||||
if err := EnforceVersionPolicies(flags, upgradeVars.versionGetter); err != nil {
|
||||
return fmt.Errorf("[upgrade/version] FATAL: %v", err)
|
||||
|
@ -167,7 +169,7 @@ func RunApply(flags *applyFlags) error {
|
|||
}
|
||||
|
||||
// Upgrade RBAC rules and addons.
|
||||
if err := upgrade.PerformPostUpgradeTasks(upgradeVars.client, internalcfg, flags.newK8sVersion); err != nil {
|
||||
if err := upgrade.PerformPostUpgradeTasks(upgradeVars.client, internalcfg, flags.newK8sVersion, flags.dryRun); err != nil {
|
||||
return fmt.Errorf("[upgrade/postupgrade] FATAL post-upgrade error: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -48,35 +48,37 @@ type upgradeVariables struct {
|
|||
}
|
||||
|
||||
// enforceRequirements verifies that it's okay to upgrade and then returns the variables needed for the rest of the procedure
|
||||
func enforceRequirements(featureGatesString, kubeConfigPath, cfgPath string, printConfig, dryRun bool, ignoreChecksErrors sets.String) (*upgradeVariables, error) {
|
||||
client, err := getClient(kubeConfigPath, dryRun)
|
||||
func enforceRequirements(flags *cmdUpgradeFlags, dryRun bool, newK8sVersion string) (*upgradeVariables, error) {
|
||||
client, err := getClient(flags.kubeConfigPath, dryRun)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create a Kubernetes client from file %q: %v", kubeConfigPath, err)
|
||||
return nil, fmt.Errorf("couldn't create a Kubernetes client from file %q: %v", flags.kubeConfigPath, err)
|
||||
}
|
||||
|
||||
// Run healthchecks against the cluster
|
||||
if err := upgrade.CheckClusterHealth(client, ignoreChecksErrors); err != nil {
|
||||
if err := upgrade.CheckClusterHealth(client, flags.ignorePreflightErrorsSet); err != nil {
|
||||
return nil, fmt.Errorf("[upgrade/health] FATAL: %v", err)
|
||||
}
|
||||
|
||||
// Fetch the configuration from a file or ConfigMap and validate it
|
||||
cfg, err := upgrade.FetchConfiguration(client, os.Stdout, cfgPath)
|
||||
cfg, err := upgrade.FetchConfiguration(client, os.Stdout, flags.cfgPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[upgrade/config] FATAL: %v", err)
|
||||
}
|
||||
|
||||
// If a new k8s version should be set, apply the change before printing the config
|
||||
if len(newK8sVersion) != 0 {
|
||||
cfg.KubernetesVersion = newK8sVersion
|
||||
}
|
||||
|
||||
// If the user told us to print this information out; do it!
|
||||
if printConfig {
|
||||
if flags.printConfig {
|
||||
printConfiguration(cfg, os.Stdout)
|
||||
}
|
||||
|
||||
cfg.FeatureGates, err = features.NewFeatureGate(&features.InitFeatureGates, featureGatesString)
|
||||
cfg.FeatureGates, err = features.NewFeatureGate(&features.InitFeatureGates, flags.featureGatesString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[upgrade/config] FATAL: %v", err)
|
||||
}
|
||||
if err := features.ValidateVersion(features.InitFeatureGates, cfg.FeatureGates, cfg.KubernetesVersion); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &upgradeVariables{
|
||||
client: client,
|
||||
|
|
|
@ -52,8 +52,7 @@ func TestPrintConfiguration(t *testing.T) {
|
|||
keyFile: ""
|
||||
imageRepository: ""
|
||||
kubeProxy: {}
|
||||
kubeletConfiguration:
|
||||
baseConfig: null
|
||||
kubeletConfiguration: {}
|
||||
kubernetesVersion: v1.7.1
|
||||
networking:
|
||||
dnsDomain: ""
|
||||
|
@ -86,8 +85,7 @@ func TestPrintConfiguration(t *testing.T) {
|
|||
keyFile: ""
|
||||
imageRepository: ""
|
||||
kubeProxy: {}
|
||||
kubeletConfiguration:
|
||||
baseConfig: null
|
||||
kubeletConfiguration: {}
|
||||
kubernetesVersion: v1.7.1
|
||||
networking:
|
||||
dnsDomain: ""
|
||||
|
@ -130,8 +128,7 @@ func TestPrintConfiguration(t *testing.T) {
|
|||
operatorVersion: v0.1.0
|
||||
imageRepository: ""
|
||||
kubeProxy: {}
|
||||
kubeletConfiguration:
|
||||
baseConfig: null
|
||||
kubeletConfiguration: {}
|
||||
kubernetesVersion: v1.7.1
|
||||
networking:
|
||||
dnsDomain: ""
|
||||
|
|
|
@ -54,8 +54,8 @@ func NewCmdPlan(parentFlags *cmdUpgradeFlags) *cobra.Command {
|
|||
|
||||
// RunPlan takes care of outputting available versions to upgrade to for the user
|
||||
func RunPlan(parentFlags *cmdUpgradeFlags) error {
|
||||
// Start with the basics, verify that the cluster is healthy, build a client and a versionGetter. Never set dry-run for plan.
|
||||
upgradeVars, err := enforceRequirements(parentFlags.featureGatesString, parentFlags.kubeConfigPath, parentFlags.cfgPath, parentFlags.printConfig, false, parentFlags.ignorePreflightErrorsSet)
|
||||
// Start with the basics, verify that the cluster is healthy, build a client and a versionGetter. Never dry-run when planning.
|
||||
upgradeVars, err := enforceRequirements(parentFlags, false, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -33,10 +33,10 @@ const (
|
|||
// CoreDNS is alpha in v1.9
|
||||
CoreDNS = "CoreDNS"
|
||||
|
||||
// SelfHosting is beta in v1.8
|
||||
// SelfHosting is beta in v1.9
|
||||
SelfHosting = "SelfHosting"
|
||||
|
||||
// StoreCertsInSecrets is alpha in v1.8
|
||||
// StoreCertsInSecrets is alpha in v1.8 and v1.9
|
||||
StoreCertsInSecrets = "StoreCertsInSecrets"
|
||||
|
||||
// DynamicKubeletConfig is alpha in v1.9
|
||||
|
@ -49,7 +49,8 @@ var v190 = version.MustParseSemantic("v1.9.0-alpha.1")
|
|||
var InitFeatureGates = FeatureList{
|
||||
SelfHosting: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Beta}},
|
||||
StoreCertsInSecrets: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}},
|
||||
HighAvailability: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190},
|
||||
// We don't want to advertise this feature gate exists in v1.9 to avoid confusion as it is not yet working
|
||||
HighAvailability: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190, HiddenInHelpText: true},
|
||||
CoreDNS: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190},
|
||||
DynamicKubeletConfig: {FeatureSpec: utilfeature.FeatureSpec{Default: false, PreRelease: utilfeature.Alpha}, MinimumVersion: v190},
|
||||
}
|
||||
|
@ -58,6 +59,7 @@ var InitFeatureGates = FeatureList{
|
|||
type Feature struct {
|
||||
utilfeature.FeatureSpec
|
||||
MinimumVersion *version.Version
|
||||
HiddenInHelpText bool
|
||||
}
|
||||
|
||||
// FeatureList represents a list of feature gates
|
||||
|
@ -113,6 +115,10 @@ func Keys(featureList FeatureList) []string {
|
|||
func KnownFeatures(f *FeatureList) []string {
|
||||
var known []string
|
||||
for k, v := range *f {
|
||||
if v.HiddenInHelpText {
|
||||
continue
|
||||
}
|
||||
|
||||
pre := ""
|
||||
if v.PreRelease != utilfeature.GA {
|
||||
pre = fmt.Sprintf("%s - ", v.PreRelease)
|
||||
|
|
|
@ -43,7 +43,7 @@ import (
|
|||
|
||||
// CreateBaseKubeletConfiguration creates base kubelet configuration for dynamic kubelet configuration feature.
|
||||
func CreateBaseKubeletConfiguration(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
||||
fmt.Printf("[kubelet] Uploading a ConfigMap %q in namespace %s with base configuration for the kubelets in the cluster",
|
||||
fmt.Printf("[kubelet] Uploading a ConfigMap %q in namespace %s with base configuration for the kubelets in the cluster\n",
|
||||
kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem)
|
||||
|
||||
_, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs()
|
||||
|
@ -95,7 +95,7 @@ func ConsumeBaseKubeletConfiguration(nodeName string) error {
|
|||
|
||||
// updateNodeWithConfigMap updates node ConfigSource with KubeletBaseConfigurationConfigMap
|
||||
func updateNodeWithConfigMap(client clientset.Interface, nodeName string) error {
|
||||
fmt.Printf("[kubelet] Using Dynamic Kubelet Config for node %q; config sourced from ConfigMap %q in namespace %s",
|
||||
fmt.Printf("[kubelet] Using Dynamic Kubelet Config for node %q; config sourced from ConfigMap %q in namespace %s\n",
|
||||
nodeName, kubeadmconstants.KubeletBaseConfigurationConfigMap, metav1.NamespaceSystem)
|
||||
|
||||
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
|
||||
|
@ -203,7 +203,7 @@ func getLocalNodeTLSBootstrappedClient() (clientset.Interface, error) {
|
|||
|
||||
// WriteInitKubeletConfigToDiskOnMaster writes base kubelet configuration to disk on master.
|
||||
func WriteInitKubeletConfigToDiskOnMaster(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
fmt.Printf("[kubelet] Writing base configuration of kubelets to disk on master node %s", cfg.NodeName)
|
||||
fmt.Printf("[kubelet] Writing base configuration of kubelets to disk on master node %s\n", cfg.NodeName)
|
||||
|
||||
_, kubeletCodecs, err := kubeletconfigscheme.NewSchemeAndCodecs()
|
||||
if err != nil {
|
||||
|
|
|
@ -52,7 +52,7 @@ const (
|
|||
// 8. In order to avoid race conditions, we have to make sure that static pod is deleted correctly before we continue
|
||||
// Otherwise, there is a race condition when we proceed without kubelet having restarted the API server correctly and the next .Create call flakes
|
||||
// 9. Do that for the kube-apiserver, kube-controller-manager and kube-scheduler in a loop
|
||||
func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, waiter apiclient.Waiter) error {
|
||||
func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, waiter apiclient.Waiter, dryRun bool) error {
|
||||
|
||||
// Adjust the timeout slightly to something self-hosting specific
|
||||
waiter.SetTimeout(selfHostingWaitTimeout)
|
||||
|
@ -104,10 +104,12 @@ func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubea
|
|||
return err
|
||||
}
|
||||
|
||||
// Remove the old Static Pod manifest
|
||||
// Remove the old Static Pod manifest if not dryrunning
|
||||
if !dryRun {
|
||||
if err := os.RemoveAll(manifestPath); err != nil {
|
||||
return fmt.Errorf("unable to delete static pod manifest for %s [%v]", componentName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for the mirror Pod hash to be removed; otherwise we'll run into race conditions here when the kubelet hasn't had time to
|
||||
// remove the Static Pod (or the mirror Pod respectively). This implicitely also tests that the API server endpoint is healthy,
|
||||
|
|
|
@ -36,6 +36,7 @@ go_library(
|
|||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//cmd/kubeadm/app/util/config:go_default_library",
|
||||
"//cmd/kubeadm/app/util/dryrun:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
|
|
|
@ -28,6 +28,9 @@ const (
|
|||
// MaximumAllowedMinorVersionUpgradeSkew describes how many minor versions kubeadm can upgrade the control plane version in one go
|
||||
MaximumAllowedMinorVersionUpgradeSkew = 1
|
||||
|
||||
// MaximumAllowedMinorVersionDowngradeSkew describes how many minor versions kubeadm can upgrade the control plane version in one go
|
||||
MaximumAllowedMinorVersionDowngradeSkew = 1
|
||||
|
||||
// MaximumAllowedMinorVersionKubeletSkew describes how many minor versions the control plane version and the kubelet can skew in a kubeadm cluster
|
||||
MaximumAllowedMinorVersionKubeletSkew = 1
|
||||
)
|
||||
|
@ -72,23 +75,41 @@ func EnforceVersionPolicies(versionGetter VersionGetter, newK8sVersionStr string
|
|||
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Specified version to upgrade to %q is equal to or lower than the minimum supported version %q. Please specify a higher version to upgrade to", newK8sVersionStr, clusterVersionStr))
|
||||
}
|
||||
|
||||
// Make sure new version is higher than the current Kubernetes version
|
||||
if clusterVersion.AtLeast(newK8sVersion) {
|
||||
// Even though we don't officially support downgrades, it "should work", and if user(s) need it and are willing to try; they can do so with --force
|
||||
skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Specified version to upgrade to %q is equal to or lower than the cluster version %q. Downgrades are not supported yet", newK8sVersionStr, clusterVersionStr))
|
||||
} else {
|
||||
// If this code path runs, it's an upgrade (this code will run most of the time)
|
||||
// kubeadm doesn't support upgrades between two minor versions; e.g. a v1.7 -> v1.9 upgrade is not supported. Enforce that here
|
||||
// kubeadm doesn't support upgrades between two minor versions; e.g. a v1.7 -> v1.9 upgrade is not supported right away
|
||||
if newK8sVersion.Minor() > clusterVersion.Minor()+MaximumAllowedMinorVersionUpgradeSkew {
|
||||
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Specified version to upgrade to %q is too high; kubeadm can upgrade only %d minor version at a time", newK8sVersionStr, MaximumAllowedMinorVersionUpgradeSkew))
|
||||
tooLargeUpgradeSkewErr := fmt.Errorf("Specified version to upgrade to %q is too high; kubeadm can upgrade only %d minor version at a time", newK8sVersionStr, MaximumAllowedMinorVersionUpgradeSkew)
|
||||
// If the version that we're about to upgrade to is a released version, we should fully enforce this policy
|
||||
// If the version is a CI/dev/experimental version, it's okay to jump two minor version steps, but then require the -f flag
|
||||
if len(newK8sVersion.PreRelease()) == 0 {
|
||||
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeUpgradeSkewErr)
|
||||
} else {
|
||||
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeUpgradeSkewErr)
|
||||
}
|
||||
}
|
||||
|
||||
// kubeadm doesn't support downgrades between two minor versions; e.g. a v1.9 -> v1.7 downgrade is not supported right away
|
||||
if newK8sVersion.Minor() < clusterVersion.Minor()-MaximumAllowedMinorVersionDowngradeSkew {
|
||||
tooLargeDowngradeSkewErr := fmt.Errorf("Specified version to downgrade to %q is too low; kubeadm can downgrade only %d minor version at a time", newK8sVersionStr, MaximumAllowedMinorVersionDowngradeSkew)
|
||||
// If the version that we're about to downgrade to is a released version, we should fully enforce this policy
|
||||
// If the version is a CI/dev/experimental version, it's okay to jump two minor version steps, but then require the -f flag
|
||||
if len(newK8sVersion.PreRelease()) == 0 {
|
||||
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeDowngradeSkewErr)
|
||||
} else {
|
||||
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeDowngradeSkewErr)
|
||||
}
|
||||
}
|
||||
|
||||
// If the kubeadm version is lower than what we want to upgrade to; error
|
||||
if kubeadmVersion.LessThan(newK8sVersion) {
|
||||
if newK8sVersion.Minor() > kubeadmVersion.Minor() {
|
||||
// This is totally unsupported; kubeadm has no idea how it should handle a newer minor release than itself
|
||||
skewErrors.Mandatory = append(skewErrors.Mandatory, fmt.Errorf("Specified version to upgrade to %q is one minor release higher than the kubeadm minor release (%d > %d). Such an upgrade is not supported", newK8sVersionStr, newK8sVersion.Minor(), kubeadmVersion.Minor()))
|
||||
tooLargeKubeadmSkew := fmt.Errorf("Specified version to upgrade to %q is at least one minor release higher than the kubeadm minor release (%d > %d). Such an upgrade is not supported", newK8sVersionStr, newK8sVersion.Minor(), kubeadmVersion.Minor())
|
||||
// This is unsupported; kubeadm has no idea how it should handle a newer minor release than itself
|
||||
// If the version is a CI/dev/experimental version though, lower the severity of this check, but then require the -f flag
|
||||
if len(newK8sVersion.PreRelease()) == 0 {
|
||||
skewErrors.Mandatory = append(skewErrors.Mandatory, tooLargeKubeadmSkew)
|
||||
} else {
|
||||
skewErrors.Skippable = append(skewErrors.Skippable, tooLargeKubeadmSkew)
|
||||
}
|
||||
} else {
|
||||
// Upgrading to a higher patch version than kubeadm is ok if the user specifies --force. Not recommended, but possible.
|
||||
skewErrors.Skippable = append(skewErrors.Skippable, fmt.Errorf("Specified version to upgrade to %q is higher than the kubeadm version %q. Upgrade kubeadm first using the tool you used to install kubeadm", newK8sVersionStr, kubeadmVersionStr))
|
||||
|
|
|
@ -46,23 +46,21 @@ func TestEnforceVersionPolicies(t *testing.T) {
|
|||
},
|
||||
newK8sVersion: "v1.9.0",
|
||||
},
|
||||
{ // downgrades not supported
|
||||
{ // downgrades ok
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.8.3",
|
||||
kubeletVersion: "v1.8.3",
|
||||
kubeadmVersion: "v1.8.3",
|
||||
},
|
||||
newK8sVersion: "v1.8.2",
|
||||
expectedSkippableErrs: 1,
|
||||
},
|
||||
{ // upgrades without bumping the version number not supported yet. TODO: Change this?
|
||||
{ // upgrades without bumping the version number ok
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.8.3",
|
||||
kubeletVersion: "v1.8.3",
|
||||
kubeadmVersion: "v1.8.3",
|
||||
},
|
||||
newK8sVersion: "v1.8.3",
|
||||
expectedSkippableErrs: 1,
|
||||
},
|
||||
{ // new version must be higher than v1.8.0
|
||||
vg: &fakeVersionGetter{
|
||||
|
@ -72,7 +70,6 @@ func TestEnforceVersionPolicies(t *testing.T) {
|
|||
},
|
||||
newK8sVersion: "v1.7.10",
|
||||
expectedMandatoryErrs: 1, // version must be higher than v1.8.0
|
||||
expectedSkippableErrs: 1, // version shouldn't be downgraded
|
||||
},
|
||||
{ // upgrading two minor versions in one go is not supported
|
||||
vg: &fakeVersionGetter{
|
||||
|
@ -84,6 +81,15 @@ func TestEnforceVersionPolicies(t *testing.T) {
|
|||
expectedMandatoryErrs: 1, // can't upgrade two minor versions
|
||||
expectedSkippableErrs: 1, // kubelet <-> apiserver skew too large
|
||||
},
|
||||
{ // downgrading two minor versions in one go is not supported
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.10.3",
|
||||
kubeletVersion: "v1.10.3",
|
||||
kubeadmVersion: "v1.10.0",
|
||||
},
|
||||
newK8sVersion: "v1.8.3",
|
||||
expectedMandatoryErrs: 1, // can't downgrade two minor versions
|
||||
},
|
||||
{ // kubeadm version must be higher than the new kube version. However, patch version skews may be forced
|
||||
vg: &fakeVersionGetter{
|
||||
clusterVersion: "v1.8.3",
|
||||
|
|
|
@ -18,6 +18,8 @@ package upgrade
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
|
@ -31,14 +33,16 @@ import (
|
|||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo"
|
||||
nodebootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
|
||||
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
|
||||
dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
// PerformPostUpgradeTasks runs nearly the same functions as 'kubeadm init' would do
|
||||
// Note that the markmaster phase is left out, not needed, and no token is created as that doesn't belong to the upgrade
|
||||
func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version) error {
|
||||
func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version, dryRun bool) error {
|
||||
errs := []error{}
|
||||
|
||||
// Upload currently used configuration to the cluster
|
||||
|
@ -63,6 +67,11 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
|
|||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
// Upgrade to a self-hosted control plane if possible
|
||||
if err := upgradeToSelfHosting(client, cfg, newK8sVer, dryRun); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
// TODO: Is this needed to do here? I think that updating cluster info should probably be separate from a normal upgrade
|
||||
// Create the cluster-info ConfigMap with the associated RBAC rules
|
||||
// if err := clusterinfo.CreateBootstrapConfigMapIfNotExists(client, kubeadmconstants.GetAdminKubeConfigPath()); err != nil {
|
||||
|
@ -92,10 +101,12 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
|
|||
if err := dns.EnsureDNSAddon(cfg, client); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if err := coreDNSDeployment(cfg, client); err != nil {
|
||||
// Remove the old kube-dns deployment if coredns is now used
|
||||
if !dryRun {
|
||||
if err := removeOldKubeDNSDeploymentIfCoreDNSIsUsed(cfg, client); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := proxy.EnsureProxyAddon(cfg, client); err != nil {
|
||||
errs = append(errs, err)
|
||||
|
@ -103,22 +114,41 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.MasterC
|
|||
return errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func coreDNSDeployment(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
||||
func removeOldKubeDNSDeploymentIfCoreDNSIsUsed(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
|
||||
if features.Enabled(cfg.FeatureGates, features.CoreDNS) {
|
||||
return apiclient.TryRunCommand(func() error {
|
||||
getCoreDNS, err := client.AppsV1beta2().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNS, metav1.GetOptions{})
|
||||
coreDNSDeployment, err := client.AppsV1beta2().Deployments(metav1.NamespaceSystem).Get(kubeadmconstants.CoreDNS, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if getCoreDNS.Status.ReadyReplicas == 0 {
|
||||
if coreDNSDeployment.Status.ReadyReplicas == 0 {
|
||||
return fmt.Errorf("the CodeDNS deployment isn't ready yet")
|
||||
}
|
||||
err = client.AppsV1beta2().Deployments(metav1.NamespaceSystem).Delete(kubeadmconstants.KubeDNS, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 5)
|
||||
return apiclient.DeleteDeploymentForeground(client, metav1.NamespaceSystem, kubeadmconstants.KubeDNS)
|
||||
}, 10)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func upgradeToSelfHosting(client clientset.Interface, cfg *kubeadmapi.MasterConfiguration, newK8sVer *version.Version, dryRun bool) error {
|
||||
if features.Enabled(cfg.FeatureGates, features.SelfHosting) && !IsControlPlaneSelfHosted(client) && newK8sVer.AtLeast(v190alpha3) {
|
||||
|
||||
waiter := getWaiter(dryRun, client)
|
||||
|
||||
// kubeadm will now convert the static Pod-hosted control plane into a self-hosted one
|
||||
fmt.Println("[self-hosted] Creating self-hosted control plane.")
|
||||
if err := selfhosting.CreateSelfHostedControlPlane(kubeadmconstants.GetStaticPodDirectory(), kubeadmconstants.KubernetesDir, cfg, client, waiter, dryRun); err != nil {
|
||||
return fmt.Errorf("error creating self hosted control plane: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getWaiter gets the right waiter implementation for the right occasion
|
||||
// TODO: Consolidate this with what's in init.go?
|
||||
func getWaiter(dryRun bool, client clientset.Interface) apiclient.Waiter {
|
||||
if dryRun {
|
||||
return dryrunutil.NewWaiter()
|
||||
}
|
||||
return apiclient.NewKubeWaiter(client, 30*time.Minute, os.Stdout)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,9 @@ import (
|
|||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
// TODO: Maybe move these constants elsewhere in future releases
|
||||
var v190 = version.MustParseSemantic("v1.9.0")
|
||||
var v190alpha3 = version.MustParseSemantic("v1.9.0-alpha.3")
|
||||
var expiry = 180 * 24 * time.Hour
|
||||
|
||||
// backupAPIServerCertAndKey backups the old cert and key of kube-apiserver to a specified directory.
|
||||
|
|
|
@ -107,6 +107,15 @@ func DeleteDaemonSetForeground(client clientset.Interface, namespace, name strin
|
|||
return client.AppsV1beta2().DaemonSets(namespace).Delete(name, deleteOptions)
|
||||
}
|
||||
|
||||
// DeleteDeploymentForeground deletes the specified Deployment in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted
|
||||
func DeleteDeploymentForeground(client clientset.Interface, namespace, name string) error {
|
||||
foregroundDelete := metav1.DeletePropagationForeground
|
||||
deleteOptions := &metav1.DeleteOptions{
|
||||
PropagationPolicy: &foregroundDelete,
|
||||
}
|
||||
return client.AppsV1beta2().Deployments(namespace).Delete(name, deleteOptions)
|
||||
}
|
||||
|
||||
// CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.
|
||||
func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error {
|
||||
if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(role); err != nil {
|
||||
|
|
|
@ -80,7 +80,7 @@ func checkErr(prefix string, err error, handleErr func(string, int)) {
|
|||
func FormatErrMsg(errs []error) string {
|
||||
var errMsg string
|
||||
for _, err := range errs {
|
||||
errMsg = fmt.Sprintf("%s\t-%s\n", errMsg, err.Error())
|
||||
errMsg = fmt.Sprintf("%s\t- %s\n", errMsg, err.Error())
|
||||
}
|
||||
return errMsg
|
||||
}
|
||||
|
|
|
@ -64,13 +64,13 @@ func TestFormatErrMsg(t *testing.T) {
|
|||
fmt.Errorf(errMsg1),
|
||||
fmt.Errorf(errMsg2),
|
||||
},
|
||||
expect: "\t-" + errMsg1 + "\n" + "\t-" + errMsg2 + "\n",
|
||||
expect: "\t- " + errMsg1 + "\n" + "\t- " + errMsg2 + "\n",
|
||||
},
|
||||
{
|
||||
errs: []error{
|
||||
fmt.Errorf(errMsg1),
|
||||
},
|
||||
expect: "\t-" + errMsg1 + "\n",
|
||||
expect: "\t- " + errMsg1 + "\n",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue