mirror of https://github.com/k3s-io/k3s
Merge pull request #70055 from bart0sh/PR0034-kubeadm-upgrade-misc-fixes
2 minor fixes for kubeadm upgradepull/58/head
commit
db4c86f560
|
@ -194,7 +194,11 @@ func RunApply(flags *applyFlags) error {
|
|||
// and block until all DaemonSets are ready; then we know for sure that all control plane images are cached locally
|
||||
glog.V(1).Infof("[upgrade/apply] creating prepuller")
|
||||
prepuller := upgrade.NewDaemonSetPrepuller(upgradeVars.client, upgradeVars.waiter, &upgradeVars.cfg.ClusterConfiguration)
|
||||
if err := upgrade.PrepullImagesInParallel(prepuller, flags.imagePullTimeout); err != nil {
|
||||
componentsToPrepull := constants.MasterComponents
|
||||
if upgradeVars.cfg.Etcd.External != nil {
|
||||
componentsToPrepull = append(componentsToPrepull, constants.Etcd)
|
||||
}
|
||||
if err := upgrade.PrepullImagesInParallel(prepuller, flags.imagePullTimeout, componentsToPrepull); err != nil {
|
||||
return fmt.Errorf("[upgrade/prepull] Failed prepulled the images for the control plane components error: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -91,8 +91,7 @@ func (d *DaemonSetPrepuller) DeleteFunc(component string) error {
|
|||
}
|
||||
|
||||
// PrepullImagesInParallel creates DaemonSets synchronously but waits in parallel for the images to pull
|
||||
func PrepullImagesInParallel(kubePrepuller Prepuller, timeout time.Duration) error {
|
||||
componentsToPrepull := append(constants.MasterComponents, constants.Etcd)
|
||||
func PrepullImagesInParallel(kubePrepuller Prepuller, timeout time.Duration, componentsToPrepull []string) error {
|
||||
fmt.Printf("[upgrade/prepull] Will prepull images for components %v\n", componentsToPrepull)
|
||||
|
||||
timeoutChan := time.After(timeout)
|
||||
|
|
|
@ -20,6 +20,8 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
//"k8s.io/apimachinery/pkg/util/version"
|
||||
)
|
||||
|
||||
|
@ -133,7 +135,7 @@ func TestPrepullImagesInParallel(t *testing.T) {
|
|||
|
||||
for _, rt := range tests {
|
||||
|
||||
actualErr := PrepullImagesInParallel(rt.p, rt.timeout)
|
||||
actualErr := PrepullImagesInParallel(rt.p, rt.timeout, append(constants.MasterComponents, constants.Etcd))
|
||||
if (actualErr != nil) != rt.expectedErr {
|
||||
t.Errorf(
|
||||
"failed TestPrepullImagesInParallel\n\texpected error: %t\n\tgot: %t",
|
||||
|
|
|
@ -226,7 +226,7 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP
|
|||
|
||||
if waitForComponentRestart {
|
||||
fmt.Println("[upgrade/staticpods] Waiting for the kubelet to restart the component")
|
||||
fmt.Printf("[upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout %v\n", UpgradeManifestTimeout)
|
||||
fmt.Printf("[upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout %v)\n", UpgradeManifestTimeout)
|
||||
|
||||
// Wait for the mirror Pod hash to change; otherwise we'll run into race conditions here when the kubelet hasn't had time to
|
||||
// notice the removal of the Static Pod, leading to a false positive below where we check that the API endpoint is healthy
|
||||
|
|
Loading…
Reference in New Issue