mirror of https://github.com/k3s-io/k3s
Merge pull request #34462 from yujuhong/ignore_pods
Automatic merge from submit-queue Ignore mirror pods with RestartPolicy == Never in restart tests Kubelet does not sync the mirror pods once they have terminated. If, for some reason, that such mirror pods get deleted once they have terminated (either by the node controller or by users), kubelet will not attempt to recreate them. However, when kubelet restarts, it will examine the static pods, sync once, and create a mirror pod. This has led to unexpected pod counts in disruptive tests where kubelet gets restarted on purpose (see #34003). This change disregard such mirror pods when totaling the pods to fix the test flake until we have time to implement a long-term solution. This PR addresses #34003pull/6/head
commit
71249bb82b
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
@ -30,6 +31,28 @@ import (
|
|||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func isRestartNeverMirrorPod(p *api.Pod) bool {
|
||||
if !kubepod.IsMirrorPod(p) {
|
||||
return false
|
||||
}
|
||||
return p.Spec.RestartPolicy == api.RestartPolicyNever
|
||||
}
|
||||
|
||||
func filterIrrelevantPods(pods []*api.Pod) []*api.Pod {
|
||||
var results []*api.Pod
|
||||
for _, p := range pods {
|
||||
if isRestartNeverMirrorPod(p) {
|
||||
// Mirror pods with restart policy == Never will not get
|
||||
// recreated if they are deleted after the pods have
|
||||
// terminated. For now, we discount such pods.
|
||||
// https://github.com/kubernetes/kubernetes/issues/34003
|
||||
continue
|
||||
}
|
||||
results = append(results, p)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("restart")
|
||||
var ps *framework.PodStore
|
||||
|
@ -57,7 +80,9 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
|
|||
framework.Logf("Got the following nodes before restart: %v", nodeNamesBefore)
|
||||
|
||||
By("ensuring all pods are running and ready")
|
||||
pods := ps.List()
|
||||
allPods := ps.List()
|
||||
pods := filterIrrelevantPods(allPods)
|
||||
|
||||
podNamesBefore := make([]string, len(pods))
|
||||
for i, p := range pods {
|
||||
podNamesBefore[i] = p.ObjectMeta.Name
|
||||
|
@ -105,7 +130,8 @@ func waitForNPods(ps *framework.PodStore, expect int, timeout time.Duration) ([]
|
|||
var pods []*api.Pod
|
||||
var errLast error
|
||||
found := wait.Poll(framework.Poll, timeout, func() (bool, error) {
|
||||
pods = ps.List()
|
||||
allPods := ps.List()
|
||||
pods := filterIrrelevantPods(allPods)
|
||||
if len(pods) != expect {
|
||||
errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods))
|
||||
framework.Logf("Error getting pods: %v", errLast)
|
||||
|
|
Loading…
Reference in New Issue