diff --git a/test/e2e/lifecycle/cluster_upgrade.go b/test/e2e/lifecycle/cluster_upgrade.go index c7744935a7..e6e3a7be19 100644 --- a/test/e2e/lifecycle/cluster_upgrade.go +++ b/test/e2e/lifecycle/cluster_upgrade.go @@ -58,6 +58,7 @@ var upgradeTests = []upgrades.Test{ &apps.DaemonSetUpgradeTest{}, &upgrades.IngressUpgradeTest{}, &upgrades.AppArmorUpgradeTest{}, + &storage.VolumeModeDowngradeTest{}, } var gpuUpgradeTests = []upgrades.Test{ diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index d016f79619..31e76a0fb4 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -305,10 +305,10 @@ func testVolumeModeSuccessForPreprovisionedPV(input *volumeModeTestInput) { Expect(err).NotTo(HaveOccurred()) By("Checking if persistent volume exists as expected volume mode") - checkVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") + utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") By("Checking if read/write to persistent volume works properly") - checkReadWriteToPath(pod, input.volMode, "/mnt/volume1") + utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1") }) // TODO(mkimuram): Add more tests } @@ -370,10 +370,10 @@ func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) { Expect(err).NotTo(HaveOccurred()) By("Checking if persistent volume exists as expected volume mode") - checkVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") + utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") By("Checking if read/write to persistent volume works properly") - checkReadWriteToPath(pod, input.volMode, "/mnt/volume1") + utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1") }) // TODO(mkimuram): Add more tests } @@ -405,45 +405,3 @@ func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1 return scConfig, pvConfig, pvcConfig } - -func checkVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { - if volMode == v1.PersistentVolumeBlock { - // Check if block exists - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path)) - - // Double check that it's not directory - utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1) - } else { - // Check if directory exists - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path)) - - // Double check that it's not block - utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1) - } -} - -func checkReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { - if volMode == v1.PersistentVolumeBlock { - // random -> file1 - utils.VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1") - // file1 -> dev (write to dev) - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path)) - // dev -> file2 (read from dev) - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path)) - // file1 == file2 (check contents) - utils.VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2") - // Clean up temp files - utils.VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2") - - // Check that writing file to block volume fails - utils.VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1) - } else { - // text -> file1 (write to file) - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path)) - // grep file1 (read from file and check contents) - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path)) - - // Check that writing to directory as block volume fails - utils.VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1) - } -} diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index 7772132362..5c212ba329 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -441,3 +441,45 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface, } } + +func CheckVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { + if volMode == v1.PersistentVolumeBlock { + // Check if block exists + VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path)) + + // Double check that it's not directory + VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1) + } else { + // Check if directory exists + VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path)) + + // Double check that it's not block + VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1) + } +} + +func CheckReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { + if volMode == v1.PersistentVolumeBlock { + // random -> file1 + VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1") + // file1 -> dev (write to dev) + VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path)) + // dev -> file2 (read from dev) + VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path)) + // file1 == file2 (check contents) + VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2") + // Clean up temp files + VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2") + + // Check that writing file to block volume fails + VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1) + } else { + // text -> file1 (write to file) + VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path)) + // grep file1 (read from file and check contents) + VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path)) + + // Check that writing to directory as block volume fails + VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1) + } +} diff --git a/test/e2e/upgrades/storage/BUILD b/test/e2e/upgrades/storage/BUILD index ed5351c7a5..dfe4773f3c 100644 --- a/test/e2e/upgrades/storage/BUILD +++ b/test/e2e/upgrades/storage/BUILD @@ -7,12 +7,18 @@ load( go_library( name = "go_default_library", - srcs = ["persistent_volumes.go"], + srcs = [ + "persistent_volumes.go", + "volume_mode.go", + ], importpath = "k8s.io/kubernetes/test/e2e/upgrades/storage", deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/storage/utils:go_default_library", "//test/e2e/upgrades:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", diff --git a/test/e2e/upgrades/storage/volume_mode.go b/test/e2e/upgrades/storage/volume_mode.go new file mode 100644 index 0000000000..1ef316bb99 --- /dev/null +++ b/test/e2e/upgrades/storage/volume_mode.go @@ -0,0 +1,125 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/version" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" + "k8s.io/kubernetes/test/e2e/upgrades" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +const devicePath = "/mnt/volume1" + +// VolumeModeDowngradeTest tests that a VolumeMode Block PV is not mistakenly +// formatted and mounted like a nil/Filesystem PV after a downgrade to a version +// where the BlockVolume feature is disabled +type VolumeModeDowngradeTest struct { + pvSource *v1.PersistentVolumeSource + pv *v1.PersistentVolume + pvc *v1.PersistentVolumeClaim + pod *v1.Pod +} + +func (VolumeModeDowngradeTest) Name() string { + return "[sig-storage] volume-mode-downgrade" +} + +func (t *VolumeModeDowngradeTest) Skip(upgCtx upgrades.UpgradeContext) bool { + if !framework.ProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") { + return true + } + + // Only run when downgrading from >= 1.13 to < 1.13 + blockVersion := version.MustParseSemantic("1.13.0-alpha.0") + if upgCtx.Versions[0].Version.LessThan(blockVersion) { + return true + } + if !upgCtx.Versions[1].Version.LessThan(blockVersion) { + return true + } + + return false +} + +// Setup creates a block pv and then verifies that a pod can consume it. The pod writes data to the volume. +func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) { + + var err error + + cs := f.ClientSet + ns := f.Namespace.Name + + By("Creating a PVC") + block := v1.PersistentVolumeBlock + pvcConfig := framework.PersistentVolumeClaimConfig{ + StorageClassName: nil, + VolumeMode: &block, + } + t.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns) + t.pvc, err = framework.CreatePVC(cs, ns, t.pvc) + Expect(err).NotTo(HaveOccurred()) + + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred()) + + t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(t.pvc.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + t.pv, err = cs.CoreV1().PersistentVolumes().Get(t.pvc.Spec.VolumeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("Consuming the PVC before downgrade") + t.pod, err = framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Checking if PV exists as expected volume mode") + utils.CheckVolumeModeOfPath(t.pod, block, devicePath) + + By("Checking if read/write to PV works properly") + utils.CheckReadWriteToPath(t.pod, block, devicePath) +} + +// Test waits for the downgrade to complete, and then verifies that a pod can no +// longer consume the pv as it is not mapped nor mounted into the pod +func (t *VolumeModeDowngradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { + By("Waiting for downgrade to finish") + <-done + + By("Verifying that nothing exists at the device path in the pod") + utils.VerifyExecInPodFail(t.pod, fmt.Sprintf("test -e %s", devicePath), 1) +} + +// Teardown cleans up any remaining resources. +func (t *VolumeModeDowngradeTest) Teardown(f *framework.Framework) { + By("Deleting the pod") + framework.ExpectNoError(framework.DeletePodWithWait(f, f.ClientSet, t.pod)) + + By("Deleting the PVC") + framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(t.pvc.Name, nil)) + + By("Waiting for the PV to be deleted") + framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(f.ClientSet, t.pv.Name, 5*time.Second, 20*time.Minute)) +}