mirror of https://github.com/k3s-io/k3s
Merge pull request #28521 from Random-Liu/fix-flake-pod-test
Automatic merge from submit-queue E2E: Add UpdatePod function in e2e framework and change the test to use it. Fix https://github.com/kubernetes/kubernetes/issues/28096. Some e2e tests need to update pod, but the pod update is a bit complex because of potential conflict. #28096 happened just because the test only called pod `Update` once. This PR move the update pod logic into a util function `UpdatePod` in e2e framework, and change the tests to use it. Mark P2 because the original issue is P0, but in fact happens not quite frequently. :) [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/.github/PULL_REQUEST_TEMPLATE.md?pixel)]() the test to use it.pull/6/head
commit
6de30e64d3
|
@ -75,25 +75,20 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
|
|||
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod.Name, f.Namespace.Name))
|
||||
|
||||
pod, err = f.Client.Pods(f.Namespace.Name).Get(pod.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName)
|
||||
return framework.GetPodLogs(f.Client, f.Namespace.Name, podName, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
|
||||
|
||||
//modify labels
|
||||
pod.Labels["key3"] = "value3"
|
||||
pod.ResourceVersion = "" // to force update
|
||||
_, err = f.Client.Pods(f.Namespace.Name).Update(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.UpdatePod(podName, func(pod *api.Pod) {
|
||||
pod.Labels["key3"] = "value3"
|
||||
})
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n"))
|
||||
|
||||
})
|
||||
|
||||
It("should update annotations on modification [Conformance]", func() {
|
||||
|
@ -121,16 +116,14 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
|
|||
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
|
||||
|
||||
//modify annotations
|
||||
pod.Annotations["builder"] = "foo"
|
||||
pod.ResourceVersion = "" // to force update
|
||||
_, err = f.Client.Pods(f.Namespace.Name).Update(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.UpdatePod(podName, func(pod *api.Pod) {
|
||||
pod.Annotations["builder"] = "foo"
|
||||
})
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n"))
|
||||
|
||||
})
|
||||
|
||||
It("should provide container's cpu limit", func() {
|
||||
|
|
|
@ -17,10 +17,14 @@ limitations under the License.
|
|||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
@ -70,3 +74,26 @@ func (f *Framework) MungePodSpec(pod *api.Pod) {
|
|||
pod.Spec.NodeName = TestContext.NodeName
|
||||
}
|
||||
}
|
||||
|
||||
// UpdatePod updates the pod object. It retries if there is a conflict, throw out error if
|
||||
// there is any other errors. name is the pod name, updateFn is the function updating the
|
||||
// pod object.
|
||||
func (f *Framework) UpdatePod(name string, updateFn func(pod *api.Pod)) {
|
||||
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
pod, err := f.PodClient().Get(name)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
|
||||
}
|
||||
updateFn(pod)
|
||||
_, err = f.PodClient().Update(pod)
|
||||
if err == nil {
|
||||
Logf("Successfully updated pod %q", name)
|
||||
return true, nil
|
||||
}
|
||||
if errors.IsConflict(err) {
|
||||
Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to update pod %q: %v", name, err)
|
||||
}))
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"golang.org/x/net/websocket"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/kubelet"
|
||||
|
@ -462,29 +461,11 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
pods, err := podClient.List(options)
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
// Standard get, update retry loop
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
By("updating the pod")
|
||||
By("updating the pod")
|
||||
f.UpdatePod(name, func(pod *api.Pod) {
|
||||
value = strconv.Itoa(time.Now().Nanosecond())
|
||||
if pod == nil { // on retries we need to re-get
|
||||
pod, err = podClient.Get(name)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod: %v", err)
|
||||
}
|
||||
}
|
||||
pod.Labels["time"] = value
|
||||
pod, err = podClient.Update(pod)
|
||||
if err == nil {
|
||||
framework.Logf("Successfully updated pod")
|
||||
return true, nil
|
||||
}
|
||||
if errors.IsConflict(err) {
|
||||
framework.Logf("Conflicting update to pod, re-get and re-update: %v", err)
|
||||
pod = nil // re-get it when we retry
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to update pod: %v", err)
|
||||
}))
|
||||
})
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
|
@ -548,30 +529,11 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
pods, err := podClient.List(options)
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
// Standard get, update retry loop
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
By("updating the pod")
|
||||
value = strconv.Itoa(time.Now().Nanosecond())
|
||||
if pod == nil { // on retries we need to re-get
|
||||
pod, err = podClient.Get(name)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod: %v", err)
|
||||
}
|
||||
}
|
||||
By("updating the pod")
|
||||
f.UpdatePod(name, func(pod *api.Pod) {
|
||||
newDeadline := int64(5)
|
||||
pod.Spec.ActiveDeadlineSeconds = &newDeadline
|
||||
pod, err = podClient.Update(pod)
|
||||
if err == nil {
|
||||
framework.Logf("Successfully updated pod")
|
||||
return true, nil
|
||||
}
|
||||
if errors.IsConflict(err) {
|
||||
framework.Logf("Conflicting update to pod, re-get and re-update: %v", err)
|
||||
pod = nil // re-get it when we retry
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to update pod: %v", err)
|
||||
}))
|
||||
})
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodTerminated(pod.Name, "DeadlineExceeded"))
|
||||
})
|
||||
|
@ -1347,15 +1309,10 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||
delay1, delay2 := startPodAndGetBackOffs(f, pod, podName, containerName, buildBackOffDuration)
|
||||
|
||||
By("updating the image")
|
||||
pod, err := podClient.Get(pod.Name)
|
||||
if err != nil {
|
||||
framework.Failf("failed to get pod: %v", err)
|
||||
}
|
||||
pod.Spec.Containers[0].Image = "gcr.io/google_containers/nginx-slim:0.7"
|
||||
pod, err = podClient.Update(pod)
|
||||
if err != nil {
|
||||
framework.Failf("error updating pod=%s/%s %v", podName, containerName, err)
|
||||
}
|
||||
f.UpdatePod(podName, func(pod *api.Pod) {
|
||||
pod.Spec.Containers[0].Image = "gcr.io/google_containers/nginx-slim:0.7"
|
||||
})
|
||||
|
||||
time.Sleep(syncLoopFrequency)
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
|
|
Loading…
Reference in New Issue