Merge pull request #61373 from msau42/subpath-reconstruct

Automatic merge from submit-queue (batch tested with PRs 61453, 61393, 61379, 61373, 61494). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Use inner volume name instead of outer volume name for subpath directory

**What this PR does / why we need it**:
Fixes volume reconstruction for PVCs with subpath

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #61372

**Special notes for your reviewer**:


**Release note**:

```release-note
ACTION REQUIRED: In-place node upgrades to this release from versions 1.7.14, 1.8.9, and 1.9.4 are not supported if using subpath volumes with PVCs.  Such pods should be drained from the node first.
```
pull/8/head
Kubernetes Submit Queue 2018-03-22 06:20:28 -07:00 committed by GitHub
commit d23066e8b3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 103 additions and 7 deletions

View File

@ -470,6 +470,9 @@ type VolumeInfo struct {
// Whether the volume permission is set to read-only or not
// This value is passed from volume.spec
ReadOnly bool
// Inner volume spec name, which is the PV name if used, otherwise
// it is the same as the outer volume spec name.
InnerVolumeSpecName string
}
type VolumeMap map[string]VolumeInfo

View File

@ -235,7 +235,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
hostPath, cleanupAction, err = mounter.PrepareSafeSubpath(mountutil.Subpath{
VolumeMountIndex: i,
Path: hostPath,
VolumeName: mount.Name,
VolumeName: vol.InnerVolumeSpecName,
VolumePath: volumePath,
PodDir: podDir,
ContainerName: container.Name,

View File

@ -255,9 +255,10 @@ func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) co
podVolumes := make(container.VolumeMap)
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{
Mounter: mountedVolume.Mounter,
BlockVolumeMapper: mountedVolume.BlockVolumeMapper,
ReadOnly: mountedVolume.VolumeSpec.ReadOnly,
Mounter: mountedVolume.Mounter,
BlockVolumeMapper: mountedVolume.BlockVolumeMapper,
ReadOnly: mountedVolume.VolumeSpec.ReadOnly,
InnerVolumeSpecName: mountedVolume.InnerVolumeSpecName,
}
}
return podVolumes

View File

@ -815,9 +815,10 @@ func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error {
// This implementation is shared between Linux and NsEnterMounter
func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error {
glog.V(4).Infof("Cleaning up subpath mounts for %s", podDir)
// scan /var/lib/kubelet/pods/<uid>/volume-subpaths/<volume>/*
subPathDir := filepath.Join(podDir, containerSubPathDirectoryName, volumeName)
glog.V(4).Infof("Cleaning up subpath mounts for %s", subPathDir)
containerDirs, err := ioutil.ReadDir(subPathDir)
if err != nil {
if os.IsNotExist(err) {

View File

@ -672,7 +672,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc(
// Remove all bind-mounts for subPaths
podDir := path.Join(podsDir, string(volumeToUnmount.PodUID))
if err := mounter.CleanSubPaths(podDir, volumeToUnmount.OuterVolumeSpecName); err != nil {
if err := mounter.CleanSubPaths(podDir, volumeToUnmount.InnerVolumeSpecName); err != nil {
return volumeToUnmount.GenerateError("error cleaning subPath mounts", err)
}

View File

@ -23,6 +23,7 @@ import (
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
@ -60,7 +61,7 @@ var initVolSources = map[string]func() volSource{
"hostPath": initHostpath,
"hostPathSymlink": initHostpathSymlink,
"emptyDir": initEmptydir,
"gcePD": initGCEPD,
"gcePDPVC": initGCEPD,
"gcePDPartitioned": initGCEPDPartition,
"nfs": initNFS,
"nfsPVC": initNFSPVC,
@ -307,6 +308,17 @@ var _ = utils.SIGDescribe("Subpath", func() {
testPodContainerRestart(f, pod, filePathInVolume, filePathInSubpath)
})
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
testSubpathReconstruction(f, pod, false)
})
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
if curVolType == "hostPath" || curVolType == "hostPathSymlink" {
framework.Skipf("%s volume type does not support reconstruction, skipping", curVolType)
}
testSubpathReconstruction(f, pod, true)
})
})
}
@ -549,6 +561,85 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod, fileInVolume,
Expect(strings.TrimSpace(out)).To(BeEquivalentTo("test-after"))
}
func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
// Change to busybox
pod.Spec.Containers[0].Image = "busybox"
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
pod.Spec.Containers[1].Image = "busybox"
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred())
err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, time.Minute)
Expect(err).ToNot(HaveOccurred())
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
nodeIP, err := framework.GetHostExternalAddress(f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred())
nodeIP = nodeIP + ":22"
By("Expecting the volume mount to be found.")
result, err := framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
By("Expecting the subpath volume mount to be found.")
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
By("Stopping the kubelet.")
utils.KubeletCommand(utils.KStop, f.ClientSet, pod)
defer func() {
if err != nil {
utils.KubeletCommand(utils.KStart, f.ClientSet, pod)
}
}()
By(fmt.Sprintf("Deleting Pod %q", pod.Name))
if forceDelete {
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
} else {
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
}
Expect(err).NotTo(HaveOccurred())
By("Starting the kubelet and waiting for pod to delete.")
utils.KubeletCommand(utils.KStart, f.ClientSet, pod)
err = f.WaitForPodTerminated(pod.Name, "")
if !apierrs.IsNotFound(err) && err != nil {
Expect(err).NotTo(HaveOccurred(), "Expected pod to terminate.")
}
if forceDelete {
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
// so wait some time to finish
time.Sleep(30 * time.Second)
}
By("Expecting the volume mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
framework.Logf("Volume unmounted on node %s", pod.Spec.NodeName)
By("Expecting the subpath volume mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
framework.Logf("Subpath volume unmounted on node %s", pod.Spec.NodeName)
}
func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) {
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", "/bin/sh", "-c", bashExec)
}