mirror of https://github.com/k3s-io/k3s
Merge pull request #19513 from ihmccreery/sleep-tunnels
Add 5-minute sleep in GKE for dead tunnels to be removedpull/6/head
commit
6cbedf2f0b
|
@ -68,6 +68,17 @@ var _ = Describe("Reboot [Disruptive]", func() {
|
||||||
Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
|
Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
|
||||||
|
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. Most tests
|
||||||
|
// make use of some proxy feature to verify functionality. So, if a reboot test runs right before a test
|
||||||
|
// that tries to get logs, for example, we may get unlucky and try to use a closed tunnel to a node that
|
||||||
|
// was recently rebooted. There's no good way to poll for proxies being closed, so we sleep.
|
||||||
|
//
|
||||||
|
// TODO(cjcullen) reduce this sleep (#19314)
|
||||||
|
if providerIs("gke") {
|
||||||
|
By("waiting 5 minutes for all dead tunnels to be dropped")
|
||||||
|
time.Sleep(5 * time.Minute)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
f = NewFramework("reboot")
|
f = NewFramework("reboot")
|
||||||
|
|
|
@ -422,6 +422,18 @@ var _ = Describe("Nodes [Disruptive]", func() {
|
||||||
if err := resizeGroup(testContext.CloudConfig.NumNodes); err != nil {
|
if err := resizeGroup(testContext.CloudConfig.NumNodes); err != nil {
|
||||||
Failf("Couldn't restore the original node instance group size: %v", err)
|
Failf("Couldn't restore the original node instance group size: %v", err)
|
||||||
}
|
}
|
||||||
|
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
|
||||||
|
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
|
||||||
|
// Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs
|
||||||
|
// right before a test that tries to get logs, for example, we may get unlucky and try to use a
|
||||||
|
// closed tunnel to a node that was recently rebooted. There's no good way to poll for proxies
|
||||||
|
// being closed, so we sleep.
|
||||||
|
//
|
||||||
|
// TODO(cjcullen) reduce this sleep (#19314)
|
||||||
|
if providerIs("gke") {
|
||||||
|
By("waiting 5 minutes for all dead tunnels to be dropped")
|
||||||
|
time.Sleep(5 * time.Minute)
|
||||||
|
}
|
||||||
if err := waitForGroupSize(testContext.CloudConfig.NumNodes); err != nil {
|
if err := waitForGroupSize(testContext.CloudConfig.NumNodes); err != nil {
|
||||||
Failf("Couldn't restore the original node instance group size: %v", err)
|
Failf("Couldn't restore the original node instance group size: %v", err)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue