mirror of https://github.com/k3s-io/k3s
Update tests to prepare for graceful deletion
For cases where we want to immediately cleanup the pod, start using gracePeriod 0 in test cases.pull/6/head
parent
3fce3433d9
commit
9267f829eb
|
@ -867,7 +867,7 @@ func runSchedulerNoPhantomPodsTest(client *client.Client) {
|
|||
|
||||
// Delete a pod to free up room.
|
||||
glog.Infof("Deleting pod %v", bar.Name)
|
||||
err = client.Pods(api.NamespaceDefault).Delete(bar.Name, nil)
|
||||
err = client.Pods(api.NamespaceDefault).Delete(bar.Name, api.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
|
||||
}
|
||||
|
@ -878,8 +878,12 @@ func runSchedulerNoPhantomPodsTest(client *client.Client) {
|
|||
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
|
||||
}
|
||||
if err := wait.Poll(time.Second, time.Second*60, podRunning(client, baz.Namespace, baz.Name)); err != nil {
|
||||
if pod, perr := client.Pods(api.NamespaceDefault).Get("phantom.bar"); perr == nil {
|
||||
glog.Fatalf("FAILED: 'phantom.bar' was never deleted: %#v", pod)
|
||||
} else {
|
||||
glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
glog.Info("Scheduler doesn't make phantom pods: test passed.")
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ readonly red=$(tput setaf 1)
|
|||
readonly green=$(tput setaf 2)
|
||||
|
||||
kube::test::clear_all() {
|
||||
kubectl delete "${kube_flags[@]}" rc,pods --all
|
||||
kubectl delete "${kube_flags[@]}" rc,pods --all --grace-period=0
|
||||
}
|
||||
|
||||
kube::test::get_object_assert() {
|
||||
|
|
|
@ -246,7 +246,7 @@ runTests() {
|
|||
# Pre-condition: valid-pod POD is running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
# Command
|
||||
kubectl delete pod valid-pod "${kube_flags[@]}"
|
||||
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0
|
||||
# Post-condition: no POD is running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
|
@ -262,7 +262,7 @@ runTests() {
|
|||
# Pre-condition: valid-pod POD is running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
# Command
|
||||
kubectl delete -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
|
||||
kubectl delete -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" --grace-period=0
|
||||
# Post-condition: no POD is running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
|
@ -278,7 +278,7 @@ runTests() {
|
|||
# Pre-condition: valid-pod POD is running
|
||||
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:'
|
||||
# Command
|
||||
kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}"
|
||||
kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0
|
||||
# Post-condition: no POD is running
|
||||
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
|
||||
|
||||
|
@ -310,7 +310,7 @@ runTests() {
|
|||
# Pre-condition: valid-pod POD is running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
# Command
|
||||
kubectl delete --all pods "${kube_flags[@]}" # --all remove all the pods
|
||||
kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 # --all remove all the pods
|
||||
# Post-condition: no POD is running
|
||||
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
|
||||
|
||||
|
@ -327,7 +327,7 @@ runTests() {
|
|||
# Pre-condition: valid-pod and redis-proxy PODs are running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
|
||||
# Command
|
||||
kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" # delete multiple pods at once
|
||||
kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # delete multiple pods at once
|
||||
# Post-condition: no POD is running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
|
@ -344,7 +344,7 @@ runTests() {
|
|||
# Pre-condition: valid-pod and redis-proxy PODs are running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
|
||||
# Command
|
||||
kubectl stop pods valid-pod redis-proxy "${kube_flags[@]}" # stop multiple pods at once
|
||||
kubectl stop pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # stop multiple pods at once
|
||||
# Post-condition: no POD is running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
|
@ -368,7 +368,7 @@ runTests() {
|
|||
# Pre-condition: valid-pod POD is running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
# Command
|
||||
kubectl delete pods -lnew-name=new-valid-pod "${kube_flags[@]}"
|
||||
kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 "${kube_flags[@]}"
|
||||
# Post-condition: no POD is running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
|
@ -419,7 +419,7 @@ runTests() {
|
|||
# Pre-condition: valid-pod POD is running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
# Command
|
||||
kubectl delete pods -l'name in (valid-pod-super-sayan)' "${kube_flags[@]}"
|
||||
kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 "${kube_flags[@]}"
|
||||
# Post-condition: no POD is running
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
|
@ -455,7 +455,7 @@ runTests() {
|
|||
# Pre-condition: valid-pod POD is running
|
||||
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
# Command
|
||||
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod
|
||||
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0
|
||||
# Post-condition: no POD is running
|
||||
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
|
|
|
@ -791,7 +791,7 @@ func getUDData(jpgExpected string, ns string) func(*client.Client, string) error
|
|||
if strings.Contains(data.Image, jpgExpected) {
|
||||
return nil
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("data served up in container is innaccurate, %s didn't contain %s", data, jpgExpected))
|
||||
return errors.New(fmt.Sprintf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -78,8 +78,8 @@ var _ = Describe("Pod Disks", func() {
|
|||
By("cleaning up PD-RW test environment")
|
||||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
podClient.Delete(host0Pod.Name, nil)
|
||||
podClient.Delete(host1Pod.Name, nil)
|
||||
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
|
||||
podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0))
|
||||
detachPD(host0Name, diskName)
|
||||
detachPD(host1Name, diskName)
|
||||
deletePD(diskName)
|
||||
|
@ -98,7 +98,7 @@ var _ = Describe("Pod Disks", func() {
|
|||
Logf("Wrote value: %v", testFileContents)
|
||||
|
||||
By("deleting host0Pod")
|
||||
expectNoError(podClient.Delete(host0Pod.Name, nil), "Failed to delete host0Pod")
|
||||
expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
|
||||
By("submitting host1Pod to kubernetes")
|
||||
_, err = podClient.Create(host1Pod)
|
||||
|
@ -113,7 +113,7 @@ var _ = Describe("Pod Disks", func() {
|
|||
Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(testFileContents)))
|
||||
|
||||
By("deleting host1Pod")
|
||||
expectNoError(podClient.Delete(host1Pod.Name, nil), "Failed to delete host1Pod")
|
||||
expectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod")
|
||||
|
||||
By(fmt.Sprintf("deleting PD %q", diskName))
|
||||
deletePDWithRetry(diskName)
|
||||
|
@ -136,9 +136,9 @@ var _ = Describe("Pod Disks", func() {
|
|||
By("cleaning up PD-RO test environment")
|
||||
// Teardown pods, PD. Ignore errors.
|
||||
// Teardown should do nothing unless test failed.
|
||||
podClient.Delete(rwPod.Name, nil)
|
||||
podClient.Delete(host0ROPod.Name, nil)
|
||||
podClient.Delete(host1ROPod.Name, nil)
|
||||
podClient.Delete(rwPod.Name, api.NewDeleteOptions(0))
|
||||
podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0))
|
||||
podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0))
|
||||
|
||||
detachPD(host0Name, diskName)
|
||||
detachPD(host1Name, diskName)
|
||||
|
@ -149,7 +149,7 @@ var _ = Describe("Pod Disks", func() {
|
|||
_, err = podClient.Create(rwPod)
|
||||
expectNoError(err, "Failed to create rwPod")
|
||||
expectNoError(framework.WaitForPodRunning(rwPod.Name))
|
||||
expectNoError(podClient.Delete(rwPod.Name, nil), "Failed to delete host0Pod")
|
||||
expectNoError(podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||
expectNoError(waitForPDDetach(diskName, host0Name))
|
||||
|
||||
By("submitting host0ROPod to kubernetes")
|
||||
|
@ -165,10 +165,10 @@ var _ = Describe("Pod Disks", func() {
|
|||
expectNoError(framework.WaitForPodRunning(host1ROPod.Name))
|
||||
|
||||
By("deleting host0ROPod")
|
||||
expectNoError(podClient.Delete(host0ROPod.Name, nil), "Failed to delete host0ROPod")
|
||||
expectNoError(podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0ROPod")
|
||||
|
||||
By("deleting host1ROPod")
|
||||
expectNoError(podClient.Delete(host1ROPod.Name, nil), "Failed to delete host1ROPod")
|
||||
expectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod")
|
||||
|
||||
By(fmt.Sprintf("deleting PD %q", diskName))
|
||||
deletePDWithRetry(diskName)
|
||||
|
|
|
@ -43,7 +43,7 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectResta
|
|||
// At the end of the test, clean up by removing the pod.
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
c.Pods(ns).Delete(podDescr.Name, nil)
|
||||
c.Pods(ns).Delete(podDescr.Name, api.NewDeleteOptions(0))
|
||||
}()
|
||||
|
||||
// Wait until the pod is not pending. (Here we need to check for something other than
|
||||
|
@ -86,15 +86,14 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectResta
|
|||
func testHostIP(c *client.Client, ns string, pod *api.Pod) {
|
||||
podClient := c.Pods(ns)
|
||||
By("creating pod")
|
||||
defer podClient.Delete(pod.Name, nil)
|
||||
_, err := podClient.Create(pod)
|
||||
if err != nil {
|
||||
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
if _, err := podClient.Create(pod); err != nil {
|
||||
Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
By("ensuring that pod is running and has a hostIP")
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
err = waitForPodRunningInNamespace(c, pod.Name, ns)
|
||||
err := waitForPodRunningInNamespace(c, pod.Name, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Try to make sure we get a hostIP for each pod.
|
||||
hostIPTimeout := 2 * time.Minute
|
||||
|
@ -222,7 +221,7 @@ var _ = Describe("Pods", func() {
|
|||
// We call defer here in case there is a problem with
|
||||
// the test so we can ensure that we clean up after
|
||||
// ourselves
|
||||
defer podClient.Delete(pod.Name, nil)
|
||||
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
_, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
Failf("Failed to create pod: %v", err)
|
||||
|
@ -235,7 +234,7 @@ var _ = Describe("Pods", func() {
|
|||
}
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
By("veryfying pod creation was observed")
|
||||
By("verifying pod creation was observed")
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
if event.Type != watch.Added {
|
||||
|
@ -312,7 +311,7 @@ var _ = Describe("Pods", func() {
|
|||
By("submitting the pod to kubernetes")
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
podClient.Delete(pod.Name, nil)
|
||||
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
}()
|
||||
pod, err := podClient.Create(pod)
|
||||
if err != nil {
|
||||
|
@ -376,7 +375,7 @@ var _ = Describe("Pods", func() {
|
|||
},
|
||||
},
|
||||
}
|
||||
defer framework.Client.Pods(framework.Namespace.Name).Delete(serverPod.Name, nil)
|
||||
defer framework.Client.Pods(framework.Namespace.Name).Delete(serverPod.Name, api.NewDeleteOptions(0))
|
||||
_, err := framework.Client.Pods(framework.Namespace.Name).Create(serverPod)
|
||||
if err != nil {
|
||||
Failf("Failed to create serverPod: %v", err)
|
||||
|
@ -600,7 +599,7 @@ var _ = Describe("Pods", func() {
|
|||
// We call defer here in case there is a problem with
|
||||
// the test so we can ensure that we clean up after
|
||||
// ourselves
|
||||
podClient.Delete(pod.Name)
|
||||
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
}()
|
||||
|
||||
By("waiting for the pod to start running")
|
||||
|
@ -673,7 +672,7 @@ var _ = Describe("Pods", func() {
|
|||
// We call defer here in case there is a problem with
|
||||
// the test so we can ensure that we clean up after
|
||||
// ourselves
|
||||
podClient.Delete(pod.Name)
|
||||
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
|
||||
}()
|
||||
|
||||
By("waiting for the pod to start running")
|
||||
|
|
|
@ -831,20 +831,24 @@ func expectNoError(err error, explain ...interface{}) {
|
|||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
// Stops everything from filePath from namespace ns and checks if everything maching selectors from the given namespace is correctly stopped.
|
||||
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
|
||||
func cleanup(filePath string, ns string, selectors ...string) {
|
||||
By("using stop to clean up resources")
|
||||
By("using delete to clean up resources")
|
||||
var nsArg string
|
||||
if ns != "" {
|
||||
nsArg = fmt.Sprintf("--namespace=%s", ns)
|
||||
}
|
||||
runKubectl("stop", "-f", filePath, nsArg)
|
||||
runKubectl("stop", "--grace-period=0", "-f", filePath, nsArg)
|
||||
|
||||
for _, selector := range selectors {
|
||||
resources := runKubectl("get", "pods,rc,svc", "-l", selector, "--no-headers", nsArg)
|
||||
resources := runKubectl("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
|
||||
if resources != "" {
|
||||
Failf("Resources left running after stop:\n%s", resources)
|
||||
}
|
||||
pods := runKubectl("get", "pods", "-l", selector, nsArg, "-t", "{{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
|
||||
if pods != "" {
|
||||
Failf("Pods left unterminated after stop:\n%s", pods)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -232,7 +232,7 @@ var deleteNow string = `
|
|||
{
|
||||
"kind": "DeleteOptions",
|
||||
"apiVersion": "` + testapi.Version() + `",
|
||||
"gracePeriodSeconds": null%s
|
||||
"gracePeriodSeconds": 0%s
|
||||
}
|
||||
`
|
||||
|
||||
|
|
|
@ -277,7 +277,7 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore
|
|||
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
|
||||
}
|
||||
|
||||
err = restClient.Pods(api.NamespaceDefault).Delete(myPod.Name, nil)
|
||||
err = restClient.Pods(api.NamespaceDefault).Delete(myPod.Name, api.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue