mirror of https://github.com/k3s-io/k3s
Merge pull request #49712 from ironcladlou/gc-e2e-timeout-fix
Automatic merge from submit-queue (batch tested with PRs 49712, 49694, 49714, 49670, 49717) Reduce GC e2e test flakiness Increase GC wait timeout in a flaky e2e test. The test expects a GC operation to be performed within 30s, while in practice the operation often takes longer due to a delay between the enqueueing of the owner's delete operation and the GC's actual processing of that event. Doubling the time seems to stabilize the test. The test's assumptions can be revisited, and the processing delay under load can be investigated in the future. Extracted from https://github.com/kubernetes/kubernetes/pull/47665 per https://github.com/kubernetes/kubernetes/pull/47665#issuecomment-318219099. /cc @sttts @caesarxuchao @deads2k @kubernetes/sig-api-machinery-bugs ```release-note NONE ```pull/6/head
commit
46e159219e
|
@ -535,7 +535,11 @@ var _ = SIGDescribe("Garbage collector", func() {
|
|||
}
|
||||
By("wait for the rc to be deleted")
|
||||
// default client QPS is 20, deleting each pod requires 2 requests, so 30s should be enough
|
||||
if err := wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
|
||||
// TODO: 30s is enough assuming immediate processing of dependents following
|
||||
// owner deletion, but in practice there can be a long delay between owner
|
||||
// deletion and dependent deletion processing. For now, increase the timeout
|
||||
// and investigate the processing delay.
|
||||
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
|
||||
_, err := rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
pods, _ := podClient.List(metav1.ListOptions{})
|
||||
|
|
Loading…
Reference in New Issue