enhancements to run volume e2e test:

in e2e/volumes.go: give time to allow pod cleanup and volume unmount happen before volume server exit;
skip cinder volume test if not running with openstack provider

comment on why pause before containerized server is stopped in volume e2e tests, fix #24100

updates NFS server image to 0.6, per #22529

fix persistent_volume e2e test: test cleanup doesn't expect client pod; delete PV after test

Signed-off-by: Huamin Chen <hchen@redhat.com>
pull/6/head
Huamin Chen 2016-04-11 15:12:17 +00:00
parent 75d9b36a2a
commit ee9ed4dd7f
2 changed files with 39 additions and 10 deletions

View File

@ -29,6 +29,20 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
)
// Clean both server and client pods.
func persistentVolumeTestCleanup(client *client.Client, config VolumeTestConfig) {
defer GinkgoRecover()
podClient := client.Pods(config.namespace)
if config.serverImage != "" {
err := podClient.Delete(config.prefix+"-server", nil)
if err != nil {
framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
}
}
}
// This test needs privileged containers, which are disabled by default. Run
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
var _ = framework.KubeDescribe("PersistentVolumes [Feature:Volumes]", func() {
@ -45,12 +59,12 @@ var _ = framework.KubeDescribe("PersistentVolumes [Feature:Volumes]", func() {
config := VolumeTestConfig{
namespace: ns,
prefix: "nfs",
serverImage: "gcr.io/google_containers/volume-nfs:0.4",
serverImage: "gcr.io/google_containers/volume-nfs:0.6",
serverPorts: []int{2049},
}
defer func() {
volumeTestCleanup(c, config)
persistentVolumeTestCleanup(c, config)
}()
pod := startVolumeServer(c, config)
@ -97,6 +111,10 @@ var _ = framework.KubeDescribe("PersistentVolumes [Feature:Volumes]", func() {
framework.ExpectNoError(err, "Failed to create checker pod: %v", err)
err = framework.WaitForPodSuccessInNamespace(c, checkpod.Name, checkpod.Spec.Containers[0].Name, checkpod.Namespace)
Expect(err).NotTo(HaveOccurred())
// must delete PV, otherwise the PV is available and next time a PVC may bind to it and cause new PV fails to bind
framework.Logf("Deleting PersistentVolume")
err = c.PersistentVolumes().Delete(pv.Name)
Expect(err).NotTo(HaveOccurred())
})
})

View File

@ -47,6 +47,7 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
@ -155,11 +156,12 @@ func startVolumeServer(client *client.Client, config VolumeTestConfig) *api.Pod
}
// Clean both server and client pods.
func volumeTestCleanup(client *client.Client, config VolumeTestConfig) {
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
By(fmt.Sprint("cleaning the environment after ", config.prefix))
defer GinkgoRecover()
client := f.Client
podClient := client.Pods(config.namespace)
err := podClient.Delete(config.prefix+"-client", nil)
@ -171,6 +173,14 @@ func volumeTestCleanup(client *client.Client, config VolumeTestConfig) {
}
if config.serverImage != "" {
if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
// See issue #24100.
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
By("sleeping a bit so client can stop and unmount")
time.Sleep(20 * time.Second)
err = podClient.Delete(config.prefix+"-server", nil)
if err != nil {
glog.Warningf("Failed to delete server pod: %v", err)
@ -356,13 +366,13 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
config := VolumeTestConfig{
namespace: namespace.Name,
prefix: "nfs",
serverImage: "gcr.io/google_containers/volume-nfs:0.4",
serverImage: "gcr.io/google_containers/volume-nfs:0.6",
serverPorts: []int{2049},
}
defer func() {
if clean {
volumeTestCleanup(c, config)
volumeTestCleanup(f, config)
}
}()
pod := startVolumeServer(c, config)
@ -396,7 +406,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
defer func() {
if clean {
volumeTestCleanup(c, config)
volumeTestCleanup(f, config)
}
}()
pod := startVolumeServer(c, config)
@ -479,7 +489,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
defer func() {
if clean {
volumeTestCleanup(c, config)
volumeTestCleanup(f, config)
}
}()
pod := startVolumeServer(c, config)
@ -522,7 +532,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
defer func() {
if clean {
volumeTestCleanup(c, config)
volumeTestCleanup(f, config)
}
}()
pod := startVolumeServer(c, config)
@ -590,7 +600,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
defer func() {
if clean {
volumeTestCleanup(c, config)
volumeTestCleanup(f, config)
}
}()
pod := startVolumeServer(c, config)
@ -652,6 +662,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
framework.KubeDescribe("Cinder", func() {
It("should be mountable", func() {
framework.SkipUnlessProviderIs("openstack")
config := VolumeTestConfig{
namespace: namespace.Name,
prefix: "cinder",
@ -694,7 +705,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
defer func() {
if clean {
framework.Logf("Running volumeTestCleanup")
volumeTestCleanup(c, config)
volumeTestCleanup(f, config)
}
}()
volume := api.VolumeSource{