mirror of https://github.com/k3s-io/k3s
Refactor subpath reconstruction tests to use util test
parent
a5c3c8d16c
commit
7aca917ea4
|
@ -70,7 +70,7 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
|
|||
func(t disruptiveTest) {
|
||||
It(t.testItStmt, func() {
|
||||
By("Executing Spec")
|
||||
t.runTest(c, f, clientPod, pvc)
|
||||
t.runTest(c, f, clientPod)
|
||||
})
|
||||
}(test)
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim)
|
||||
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod)
|
||||
type disruptiveTest struct {
|
||||
testItStmt string
|
||||
runTest testBody
|
||||
|
@ -234,7 +234,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
|
|||
func(t disruptiveTest) {
|
||||
It(t.testItStmt, func() {
|
||||
By("Executing Spec")
|
||||
t.runTest(c, f, clientPod, pvc)
|
||||
t.runTest(c, f, clientPod)
|
||||
})
|
||||
}(test)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
@ -553,6 +552,11 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete
|
|||
pod.Spec.Containers[1].Image = "busybox"
|
||||
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
|
||||
|
||||
// If grace period is too short, then there is not enough time for the volume
|
||||
// manager to cleanup the volumes
|
||||
gracePeriod := int64(30)
|
||||
pod.Spec.TerminationGracePeriodSeconds = &gracePeriod
|
||||
|
||||
By(fmt.Sprintf("Creating pod %s", pod.Name))
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).ToNot(HaveOccurred(), "while creating pod")
|
||||
|
@ -563,64 +567,7 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete
|
|||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).ToNot(HaveOccurred(), "while getting pod")
|
||||
|
||||
nodeIP, err := framework.GetHostExternalAddress(f.ClientSet, pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "while getting node IP")
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
By("Expecting the volume mount to be found.")
|
||||
result, err := framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
|
||||
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
|
||||
By("Expecting the subpath volume mount to be found.")
|
||||
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
|
||||
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
|
||||
By("Stopping the kubelet.")
|
||||
utils.KubeletCommand(utils.KStop, f.ClientSet, pod)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
utils.KubeletCommand(utils.KStart, f.ClientSet, pod)
|
||||
}
|
||||
}()
|
||||
|
||||
By(fmt.Sprintf("Deleting Pod %q", pod.Name))
|
||||
if forceDelete {
|
||||
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
} else {
|
||||
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Starting the kubelet and waiting for pod to delete.")
|
||||
utils.KubeletCommand(utils.KStart, f.ClientSet, pod)
|
||||
err = f.WaitForPodTerminated(pod.Name, "")
|
||||
if !apierrs.IsNotFound(err) && err != nil {
|
||||
Expect(err).NotTo(HaveOccurred(), "Expected pod to terminate.")
|
||||
}
|
||||
|
||||
if forceDelete {
|
||||
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
|
||||
// so wait some time to finish
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
|
||||
By("Expecting the volume mount not to be found.")
|
||||
result, err = framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
|
||||
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
|
||||
framework.Logf("Volume unmounted on node %s", pod.Spec.NodeName)
|
||||
|
||||
By("Expecting the subpath volume mount not to be found.")
|
||||
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
|
||||
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
|
||||
framework.Logf("Subpath volume unmounted on node %s", pod.Spec.NodeName)
|
||||
utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
|
||||
}
|
||||
|
||||
func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) {
|
||||
|
|
|
@ -138,7 +138,7 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
|
|||
}
|
||||
|
||||
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
|
||||
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim) {
|
||||
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
By("Writing to the volume.")
|
||||
file := "/mnt/_SUCCESS"
|
||||
out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file))
|
||||
|
@ -156,18 +156,26 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
|
|||
}
|
||||
|
||||
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
||||
// forceDelete is true indicating whether the pod is forcelly deleted.
|
||||
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, forceDelete bool) {
|
||||
// forceDelete is true indicating whether the pod is forcefully deleted.
|
||||
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
|
||||
nodeIP, err := framework.GetHostExternalAddress(c, clientPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
By("Expecting the volume mount to be found.")
|
||||
result, err := framework.SSH(fmt.Sprintf("mount | grep %s", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
result, err := framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
|
||||
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
|
||||
if checkSubpath {
|
||||
By("Expecting the volume subpath mount to be found.")
|
||||
result, err := framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
|
||||
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
}
|
||||
|
||||
By("Stopping the kubelet.")
|
||||
KubeletCommand(KStop, c, clientPod)
|
||||
defer func() {
|
||||
|
@ -182,8 +190,10 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
|||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for pod to enter "Terminating state"
|
||||
time.Sleep(30 * time.Second)
|
||||
|
||||
By("Starting the kubelet and waiting for pod to delete.")
|
||||
KubeletCommand(KStart, c, clientPod)
|
||||
err = f.WaitForPodTerminated(clientPod.Name, "")
|
||||
|
@ -196,22 +206,32 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
|||
// so wait some time to finish
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
|
||||
By("Expecting the volume mount not to be found.")
|
||||
result, err = framework.SSH(fmt.Sprintf("mount | grep %s", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
result, err = framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
|
||||
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
|
||||
framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
|
||||
|
||||
if checkSubpath {
|
||||
By("Expecting the volume subpath mount not to be found.")
|
||||
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
|
||||
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
|
||||
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
||||
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, false)
|
||||
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false)
|
||||
}
|
||||
|
||||
// TestVolumeUnmountsFromFoceDeletedPod tests that a volume unmounts if the client pod was forcelly deleted while the kubelet was down.
|
||||
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, true)
|
||||
// TestVolumeUnmountsFromFoceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
|
||||
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
|
||||
}
|
||||
|
||||
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
|
||||
|
|
|
@ -178,7 +178,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
|
|||
3. Verify that written file is accessible after kubelet restart
|
||||
*/
|
||||
It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() {
|
||||
utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod, pvc)
|
||||
utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod)
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
|
|||
5. Verify that volume mount not to be found.
|
||||
*/
|
||||
It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() {
|
||||
utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod, pvc)
|
||||
utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod)
|
||||
})
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue