mirror of https://github.com/k3s-io/k3s
Merge pull request #73844 from rojkov/kubeadm-restructure-upgradeVariables
kubeadm: restructure upgradeVariablespull/564/head
commit
08d0522730
|
@ -151,37 +151,37 @@ func runApply(flags *applyFlags) error {
|
||||||
// Start with the basics, verify that the cluster is healthy and get the configuration from the cluster (using the ConfigMap)
|
// Start with the basics, verify that the cluster is healthy and get the configuration from the cluster (using the ConfigMap)
|
||||||
klog.V(1).Infof("[upgrade/apply] verifying health of cluster")
|
klog.V(1).Infof("[upgrade/apply] verifying health of cluster")
|
||||||
klog.V(1).Infof("[upgrade/apply] retrieving configuration from cluster")
|
klog.V(1).Infof("[upgrade/apply] retrieving configuration from cluster")
|
||||||
upgradeVars, err := enforceRequirements(flags.applyPlanFlags, flags.dryRun, flags.newK8sVersionStr)
|
client, versionGetter, cfg, err := enforceRequirements(flags.applyPlanFlags, flags.dryRun, flags.newK8sVersionStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(flags.criSocket) != 0 {
|
if len(flags.criSocket) != 0 {
|
||||||
fmt.Println("[upgrade/apply] Respecting the --cri-socket flag that is set with higher priority than the config file.")
|
fmt.Println("[upgrade/apply] Respecting the --cri-socket flag that is set with higher priority than the config file.")
|
||||||
upgradeVars.cfg.NodeRegistration.CRISocket = flags.criSocket
|
cfg.NodeRegistration.CRISocket = flags.criSocket
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate requested and validate actual version
|
// Validate requested and validate actual version
|
||||||
klog.V(1).Infof("[upgrade/apply] validating requested and actual version")
|
klog.V(1).Infof("[upgrade/apply] validating requested and actual version")
|
||||||
if err := configutil.NormalizeKubernetesVersion(&upgradeVars.cfg.ClusterConfiguration); err != nil {
|
if err := configutil.NormalizeKubernetesVersion(&cfg.ClusterConfiguration); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use normalized version string in all following code.
|
// Use normalized version string in all following code.
|
||||||
flags.newK8sVersionStr = upgradeVars.cfg.KubernetesVersion
|
flags.newK8sVersionStr = cfg.KubernetesVersion
|
||||||
k8sVer, err := version.ParseSemantic(flags.newK8sVersionStr)
|
k8sVer, err := version.ParseSemantic(flags.newK8sVersionStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("unable to parse normalized version %q as a semantic version", flags.newK8sVersionStr)
|
return errors.Errorf("unable to parse normalized version %q as a semantic version", flags.newK8sVersionStr)
|
||||||
}
|
}
|
||||||
flags.newK8sVersion = k8sVer
|
flags.newK8sVersion = k8sVer
|
||||||
|
|
||||||
if err := features.ValidateVersion(features.InitFeatureGates, upgradeVars.cfg.FeatureGates, upgradeVars.cfg.KubernetesVersion); err != nil {
|
if err := features.ValidateVersion(features.InitFeatureGates, cfg.FeatureGates, cfg.KubernetesVersion); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enforce the version skew policies
|
// Enforce the version skew policies
|
||||||
klog.V(1).Infof("[upgrade/version] enforcing version skew policies")
|
klog.V(1).Infof("[upgrade/version] enforcing version skew policies")
|
||||||
if err := EnforceVersionPolicies(flags, upgradeVars.versionGetter); err != nil {
|
if err := EnforceVersionPolicies(flags, versionGetter); err != nil {
|
||||||
return errors.Wrap(err, "[upgrade/version] FATAL")
|
return errors.Wrap(err, "[upgrade/version] FATAL")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,12 +192,14 @@ func runApply(flags *applyFlags) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
waiter := getWaiter(flags.dryRun, client)
|
||||||
|
|
||||||
// Use a prepuller implementation based on creating DaemonSets
|
// Use a prepuller implementation based on creating DaemonSets
|
||||||
// and block until all DaemonSets are ready; then we know for sure that all control plane images are cached locally
|
// and block until all DaemonSets are ready; then we know for sure that all control plane images are cached locally
|
||||||
klog.V(1).Infof("[upgrade/apply] creating prepuller")
|
klog.V(1).Infof("[upgrade/apply] creating prepuller")
|
||||||
prepuller := upgrade.NewDaemonSetPrepuller(upgradeVars.client, upgradeVars.waiter, &upgradeVars.cfg.ClusterConfiguration)
|
prepuller := upgrade.NewDaemonSetPrepuller(client, waiter, &cfg.ClusterConfiguration)
|
||||||
componentsToPrepull := constants.MasterComponents
|
componentsToPrepull := constants.MasterComponents
|
||||||
if upgradeVars.cfg.Etcd.External == nil && flags.etcdUpgrade {
|
if cfg.Etcd.External == nil && flags.etcdUpgrade {
|
||||||
componentsToPrepull = append(componentsToPrepull, constants.Etcd)
|
componentsToPrepull = append(componentsToPrepull, constants.Etcd)
|
||||||
}
|
}
|
||||||
if err := upgrade.PrepullImagesInParallel(prepuller, flags.imagePullTimeout, componentsToPrepull); err != nil {
|
if err := upgrade.PrepullImagesInParallel(prepuller, flags.imagePullTimeout, componentsToPrepull); err != nil {
|
||||||
|
@ -206,13 +208,13 @@ func runApply(flags *applyFlags) error {
|
||||||
|
|
||||||
// Now; perform the upgrade procedure
|
// Now; perform the upgrade procedure
|
||||||
klog.V(1).Infof("[upgrade/apply] performing upgrade")
|
klog.V(1).Infof("[upgrade/apply] performing upgrade")
|
||||||
if err := PerformControlPlaneUpgrade(flags, upgradeVars.client, upgradeVars.waiter, upgradeVars.cfg); err != nil {
|
if err := PerformControlPlaneUpgrade(flags, client, waiter, cfg); err != nil {
|
||||||
return errors.Wrap(err, "[upgrade/apply] FATAL")
|
return errors.Wrap(err, "[upgrade/apply] FATAL")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upgrade RBAC rules and addons.
|
// Upgrade RBAC rules and addons.
|
||||||
klog.V(1).Infof("[upgrade/postupgrade] upgrading RBAC rules and addons")
|
klog.V(1).Infof("[upgrade/postupgrade] upgrading RBAC rules and addons")
|
||||||
if err := upgrade.PerformPostUpgradeTasks(upgradeVars.client, upgradeVars.cfg, flags.newK8sVersion, flags.dryRun); err != nil {
|
if err := upgrade.PerformPostUpgradeTasks(client, cfg, flags.newK8sVersion, flags.dryRun); err != nil {
|
||||||
return errors.Wrap(err, "[upgrade/postupgrade] FATAL post-upgrade error")
|
return errors.Wrap(err, "[upgrade/postupgrade] FATAL post-upgrade error")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,31 +42,22 @@ import (
|
||||||
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
||||||
)
|
)
|
||||||
|
|
||||||
// upgradeVariables holds variables needed for performing an upgrade or planning to do so
|
|
||||||
// TODO - Restructure or rename upgradeVariables
|
|
||||||
type upgradeVariables struct {
|
|
||||||
client clientset.Interface
|
|
||||||
cfg *kubeadmapi.InitConfiguration
|
|
||||||
versionGetter upgrade.VersionGetter
|
|
||||||
waiter apiclient.Waiter
|
|
||||||
}
|
|
||||||
|
|
||||||
// enforceRequirements verifies that it's okay to upgrade and then returns the variables needed for the rest of the procedure
|
// enforceRequirements verifies that it's okay to upgrade and then returns the variables needed for the rest of the procedure
|
||||||
func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion string) (*upgradeVariables, error) {
|
func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion string) (clientset.Interface, upgrade.VersionGetter, *kubeadmapi.InitConfiguration, error) {
|
||||||
|
|
||||||
client, err := getClient(flags.kubeConfigPath, dryRun)
|
client, err := getClient(flags.kubeConfigPath, dryRun)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "couldn't create a Kubernetes client from file %q", flags.kubeConfigPath)
|
return nil, nil, nil, errors.Wrapf(err, "couldn't create a Kubernetes client from file %q", flags.kubeConfigPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the cluster is self-hosted
|
// Check if the cluster is self-hosted
|
||||||
if upgrade.IsControlPlaneSelfHosted(client) {
|
if upgrade.IsControlPlaneSelfHosted(client) {
|
||||||
return nil, errors.Errorf("cannot upgrade a self-hosted control plane")
|
return nil, nil, nil, errors.Errorf("cannot upgrade a self-hosted control plane")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run healthchecks against the cluster
|
// Run healthchecks against the cluster
|
||||||
if err := upgrade.CheckClusterHealth(client, flags.ignorePreflightErrorsSet); err != nil {
|
if err := upgrade.CheckClusterHealth(client, flags.ignorePreflightErrorsSet); err != nil {
|
||||||
return nil, errors.Wrap(err, "[upgrade/health] FATAL")
|
return nil, nil, nil, errors.Wrap(err, "[upgrade/health] FATAL")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch the configuration from a file or ConfigMap and validate it
|
// Fetch the configuration from a file or ConfigMap and validate it
|
||||||
|
@ -91,7 +82,7 @@ func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion strin
|
||||||
fmt.Println("")
|
fmt.Println("")
|
||||||
err = errors.Errorf("the ConfigMap %q in the %s namespace used for getting configuration information was not found", constants.KubeadmConfigConfigMap, metav1.NamespaceSystem)
|
err = errors.Errorf("the ConfigMap %q in the %s namespace used for getting configuration information was not found", constants.KubeadmConfigConfigMap, metav1.NamespaceSystem)
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "[upgrade/config] FATAL")
|
return nil, nil, nil, errors.Wrap(err, "[upgrade/config] FATAL")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a new k8s version should be set, apply the change before printing the config
|
// If a new k8s version should be set, apply the change before printing the config
|
||||||
|
@ -103,7 +94,7 @@ func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion strin
|
||||||
if flags.featureGatesString != "" {
|
if flags.featureGatesString != "" {
|
||||||
cfg.FeatureGates, err = features.NewFeatureGate(&features.InitFeatureGates, flags.featureGatesString)
|
cfg.FeatureGates, err = features.NewFeatureGate(&features.InitFeatureGates, flags.featureGatesString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "[upgrade/config] FATAL")
|
return nil, nil, nil, errors.Wrap(err, "[upgrade/config] FATAL")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,7 +103,7 @@ func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion strin
|
||||||
for _, m := range msg {
|
for _, m := range msg {
|
||||||
fmt.Printf("[upgrade/config] %s\n", m)
|
fmt.Printf("[upgrade/config] %s\n", m)
|
||||||
}
|
}
|
||||||
return nil, errors.New("[upgrade/config] FATAL. Unable to upgrade a cluster using deprecated feature-gate flags. Please see the release notes")
|
return nil, nil, nil, errors.New("[upgrade/config] FATAL. Unable to upgrade a cluster using deprecated feature-gate flags. Please see the release notes")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the user told us to print this information out; do it!
|
// If the user told us to print this information out; do it!
|
||||||
|
@ -120,14 +111,8 @@ func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion strin
|
||||||
printConfiguration(&cfg.ClusterConfiguration, os.Stdout)
|
printConfiguration(&cfg.ClusterConfiguration, os.Stdout)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &upgradeVariables{
|
|
||||||
client: client,
|
|
||||||
cfg: cfg,
|
|
||||||
// Use a real version getter interface that queries the API server, the kubeadm client and the Kubernetes CI system for latest versions
|
// Use a real version getter interface that queries the API server, the kubeadm client and the Kubernetes CI system for latest versions
|
||||||
versionGetter: upgrade.NewOfflineVersionGetter(upgrade.NewKubeVersionGetter(client, os.Stdout), newK8sVersion),
|
return client, upgrade.NewOfflineVersionGetter(upgrade.NewKubeVersionGetter(client, os.Stdout), cfg.KubernetesVersion), cfg, nil
|
||||||
// Use the waiter conditionally based on the dryrunning variable
|
|
||||||
waiter: getWaiter(dryRun, client),
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// printConfiguration prints the external version of the API to yaml
|
// printConfiguration prints the external version of the API to yaml
|
||||||
|
|
|
@ -88,7 +88,7 @@ func runPlan(flags *planFlags) error {
|
||||||
// Start with the basics, verify that the cluster is healthy, build a client and a versionGetter. Never dry-run when planning.
|
// Start with the basics, verify that the cluster is healthy, build a client and a versionGetter. Never dry-run when planning.
|
||||||
klog.V(1).Infof("[upgrade/plan] verifying health of cluster")
|
klog.V(1).Infof("[upgrade/plan] verifying health of cluster")
|
||||||
klog.V(1).Infof("[upgrade/plan] retrieving configuration from cluster")
|
klog.V(1).Infof("[upgrade/plan] retrieving configuration from cluster")
|
||||||
upgradeVars, err := enforceRequirements(flags.applyPlanFlags, false, flags.newK8sVersionStr)
|
client, versionGetter, cfg, err := enforceRequirements(flags.applyPlanFlags, false, flags.newK8sVersionStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -97,20 +97,20 @@ func runPlan(flags *planFlags) error {
|
||||||
|
|
||||||
// Currently this is the only method we have for distinguishing
|
// Currently this is the only method we have for distinguishing
|
||||||
// external etcd vs static pod etcd
|
// external etcd vs static pod etcd
|
||||||
isExternalEtcd := upgradeVars.cfg.Etcd.External != nil
|
isExternalEtcd := cfg.Etcd.External != nil
|
||||||
if isExternalEtcd {
|
if isExternalEtcd {
|
||||||
client, err := etcdutil.New(
|
client, err := etcdutil.New(
|
||||||
upgradeVars.cfg.Etcd.External.Endpoints,
|
cfg.Etcd.External.Endpoints,
|
||||||
upgradeVars.cfg.Etcd.External.CAFile,
|
cfg.Etcd.External.CAFile,
|
||||||
upgradeVars.cfg.Etcd.External.CertFile,
|
cfg.Etcd.External.CertFile,
|
||||||
upgradeVars.cfg.Etcd.External.KeyFile)
|
cfg.Etcd.External.KeyFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
etcdClient = client
|
etcdClient = client
|
||||||
} else {
|
} else {
|
||||||
// Connects to local/stacked etcd existing in the cluster
|
// Connects to local/stacked etcd existing in the cluster
|
||||||
client, err := etcdutil.NewFromCluster(upgradeVars.client, upgradeVars.cfg.CertificatesDir)
|
client, err := etcdutil.NewFromCluster(client, cfg.CertificatesDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -119,7 +119,7 @@ func runPlan(flags *planFlags) error {
|
||||||
|
|
||||||
// Compute which upgrade possibilities there are
|
// Compute which upgrade possibilities there are
|
||||||
klog.V(1).Infof("[upgrade/plan] computing upgrade possibilities")
|
klog.V(1).Infof("[upgrade/plan] computing upgrade possibilities")
|
||||||
availUpgrades, err := upgrade.GetAvailableUpgrades(upgradeVars.versionGetter, flags.allowExperimentalUpgrades, flags.allowRCUpgrades, etcdClient, upgradeVars.cfg.DNS.Type, upgradeVars.client)
|
availUpgrades, err := upgrade.GetAvailableUpgrades(versionGetter, flags.allowExperimentalUpgrades, flags.allowRCUpgrades, etcdClient, cfg.DNS.Type, client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "[upgrade/versions] FATAL")
|
return errors.Wrap(err, "[upgrade/versions] FATAL")
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue