mirror of https://github.com/k3s-io/k3s
Merge pull request #68945 from gnufied/fix-mount-options
Make sure we pass mount options while creating bind mountspull/58/head
commit
ecfd1a3e56
|
@ -83,11 +83,8 @@ func (f *FakeMounter) Mount(source string, target string, fstype string, options
|
|||
}
|
||||
}
|
||||
}
|
||||
// find 'ro' option
|
||||
if option == "ro" {
|
||||
// reuse MountPoint.Opts field to mark mount as readonly
|
||||
opts = append(opts, "ro")
|
||||
}
|
||||
// reuse MountPoint.Opts field to mark mount as readonly
|
||||
opts = append(opts, option)
|
||||
}
|
||||
|
||||
// If target is a symlink, get its absolute path
|
||||
|
@ -95,7 +92,6 @@ func (f *FakeMounter) Mount(source string, target string, fstype string, options
|
|||
if err != nil {
|
||||
absTarget = target
|
||||
}
|
||||
|
||||
f.MountPoints = append(f.MountPoints, MountPoint{Device: source, Path: absTarget, Type: fstype, Opts: opts})
|
||||
glog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget)
|
||||
f.Log = append(f.Log, FakeAction{Action: FakeActionMount, Target: absTarget, Source: source, FSType: fstype})
|
||||
|
|
|
@ -175,9 +175,11 @@ func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec,
|
|||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
|
||||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
|
||||
mountOptions: util.MountOptionFromSpec(spec),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
|
@ -343,7 +345,8 @@ type awsElasticBlockStoreMounter struct {
|
|||
// Specifies whether the disk will be attached as read-only.
|
||||
readOnly bool
|
||||
// diskMounter provides the interface that is used to mount the actual block device.
|
||||
diskMounter *mount.SafeFormatAndMount
|
||||
diskMounter *mount.SafeFormatAndMount
|
||||
mountOptions []string
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &awsElasticBlockStoreMounter{}
|
||||
|
@ -392,7 +395,8 @@ func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error
|
|||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", options)
|
||||
mountOptions := util.JoinMountOptions(options, b.mountOptions)
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", mountOptions)
|
||||
if err != nil {
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
|
@ -325,3 +326,53 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
|
|||
t.Errorf("Volume Unmounter can be type-assert to Mounter")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountOptions(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("aws-ebs")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
MountOptions: []string{"_netdev"},
|
||||
},
|
||||
}
|
||||
|
||||
fakeManager := &fakePDManager{}
|
||||
fakeMounter := &mount.FakeMounter{}
|
||||
|
||||
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromPersistentVolume(pv, false), types.UID("poduid"), fakeManager, fakeMounter)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
mountOptions := fakeMounter.MountPoints[0].Opts
|
||||
expectedMountOptions := []string{"bind", "_netdev"}
|
||||
if !reflect.DeepEqual(mountOptions, expectedMountOptions) {
|
||||
t.Errorf("Expected mount options to be %v got %v", expectedMountOptions, mountOptions)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -148,7 +148,9 @@ func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.U
|
|||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
blockDeviceMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||
blockDeviceMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
|
||||
mountOptions: util.MountOptionFromSpec(spec),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
|
@ -288,6 +290,7 @@ type cinderVolumeMounter struct {
|
|||
fsType string
|
||||
readOnly bool
|
||||
blockDeviceMounter *mount.SafeFormatAndMount
|
||||
mountOptions []string
|
||||
}
|
||||
|
||||
// cinderPersistentDisk volumes are disk resources provided by C3
|
||||
|
@ -358,8 +361,9 @@ func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
mountOptions := util.JoinMountOptions(options, b.mountOptions)
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
|
||||
glog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, options)
|
||||
glog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, mountOptions)
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", options)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Mount failed: %v", err)
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// Abstract interface to disk operations.
|
||||
|
@ -57,7 +58,8 @@ func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mou
|
|||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
err = mounter.Mount(globalPDPath, volPath, "", options)
|
||||
mountOptions := util.JoinMountOptions(options, b.mountOptions)
|
||||
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err)
|
||||
noMnt, mntErr := b.mounter.IsLikelyNotMountPoint(volPath)
|
||||
|
|
|
@ -139,20 +139,22 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID,
|
|||
}
|
||||
glog.V(5).Infof("fc: newMounterInternal volumeMode %s", volumeMode)
|
||||
return &fcDiskMounter{
|
||||
fcDisk: fcDisk,
|
||||
fsType: fc.FSType,
|
||||
volumeMode: volumeMode,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
fcDisk: fcDisk,
|
||||
fsType: fc.FSType,
|
||||
volumeMode: volumeMode,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
mountOptions: []string{},
|
||||
}, nil
|
||||
}
|
||||
return &fcDiskMounter{
|
||||
fcDisk: fcDisk,
|
||||
fsType: fc.FSType,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
fcDisk: fcDisk,
|
||||
fsType: fc.FSType,
|
||||
readOnly: readOnly,
|
||||
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
|
||||
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
|
||||
mountOptions: util.MountOptionFromSpec(spec),
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
@ -374,11 +376,12 @@ func (fc *fcDisk) fcPodDeviceMapPath() (string, string) {
|
|||
|
||||
type fcDiskMounter struct {
|
||||
*fcDisk
|
||||
readOnly bool
|
||||
fsType string
|
||||
volumeMode v1.PersistentVolumeMode
|
||||
mounter *mount.SafeFormatAndMount
|
||||
deviceUtil util.DeviceUtil
|
||||
readOnly bool
|
||||
fsType string
|
||||
volumeMode v1.PersistentVolumeMode
|
||||
mounter *mount.SafeFormatAndMount
|
||||
deviceUtil util.DeviceUtil
|
||||
mountOptions []string
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &fcDiskMounter{}
|
||||
|
|
|
@ -211,7 +211,8 @@ func (plugin *gcePersistentDiskPlugin) newMounterInternal(spec *volume.Spec, pod
|
|||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
|
||||
},
|
||||
readOnly: readOnly}, nil
|
||||
mountOptions: util.MountOptionFromSpec(spec),
|
||||
readOnly: readOnly}, nil
|
||||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
|
@ -329,7 +330,8 @@ type gcePersistentDisk struct {
|
|||
type gcePersistentDiskMounter struct {
|
||||
*gcePersistentDisk
|
||||
// Specifies whether the disk will be mounted as read-only.
|
||||
readOnly bool
|
||||
readOnly bool
|
||||
mountOptions []string
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &gcePersistentDiskMounter{}
|
||||
|
@ -381,7 +383,9 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName)
|
||||
glog.V(4).Infof("attempting to mount %s", dir)
|
||||
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", options)
|
||||
mountOptions := util.JoinMountOptions(b.mountOptions, options)
|
||||
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", mountOptions)
|
||||
if err != nil {
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
|
|
|
@ -241,6 +241,56 @@ func TestPlugin(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMountOptions(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvA",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
Name: "claimA",
|
||||
},
|
||||
MountOptions: []string{"_netdev"},
|
||||
},
|
||||
}
|
||||
|
||||
fakeManager := &fakePDManager{}
|
||||
fakeMounter := &mount.FakeMounter{}
|
||||
|
||||
mounter, err := plug.(*gcePersistentDiskPlugin).newMounterInternal(volume.NewSpecFromPersistentVolume(pv, false), types.UID("poduid"), fakeManager, fakeMounter)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
mountOptions := fakeMounter.MountPoints[0].Opts
|
||||
expectedMountOptions := []string{"_netdev", "bind"}
|
||||
if !reflect.DeepEqual(mountOptions, expectedMountOptions) {
|
||||
t.Errorf("Expected mount options to be %v got %v", expectedMountOptions, mountOptions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
|
@ -114,7 +114,9 @@ func (plugin *photonPersistentDiskPlugin) newMounterInternal(spec *volume.Spec,
|
|||
plugin: plugin,
|
||||
},
|
||||
fsType: fsType,
|
||||
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
|
||||
mountOption: util.MountOptionFromSpec(spec),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *photonPersistentDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
|
@ -177,6 +179,7 @@ type photonPersistentDiskMounter struct {
|
|||
*photonPersistentDisk
|
||||
fsType string
|
||||
diskMounter *mount.SafeFormatAndMount
|
||||
mountOption []string
|
||||
}
|
||||
|
||||
func (b *photonPersistentDiskMounter) GetAttributes() volume.Attributes {
|
||||
|
@ -222,7 +225,8 @@ func (b *photonPersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error
|
|||
globalPDPath := makeGlobalPDPath(b.plugin.host, b.pdID)
|
||||
glog.V(4).Infof("attempting to mount %s", dir)
|
||||
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", options)
|
||||
mountOptions := util.JoinMountOptions(options, b.mountOption)
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", mountOptions)
|
||||
if err != nil {
|
||||
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
|
|
|
@ -118,8 +118,10 @@ func (plugin *vsphereVolumePlugin) newMounterInternal(spec *volume.Spec, podUID
|
|||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
|
||||
},
|
||||
fsType: fsType,
|
||||
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
|
||||
fsType: fsType,
|
||||
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
|
||||
mountOptions: util.MountOptionFromSpec(spec),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *vsphereVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
|
@ -186,8 +188,9 @@ var _ volume.Mounter = &vsphereVolumeMounter{}
|
|||
|
||||
type vsphereVolumeMounter struct {
|
||||
*vsphereVolume
|
||||
fsType string
|
||||
diskMounter *mount.SafeFormatAndMount
|
||||
fsType string
|
||||
diskMounter *mount.SafeFormatAndMount
|
||||
mountOptions []string
|
||||
}
|
||||
|
||||
func (b *vsphereVolumeMounter) GetAttributes() volume.Attributes {
|
||||
|
@ -233,7 +236,8 @@ func (b *vsphereVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
|
||||
globalPDPath := makeGlobalPDPath(b.plugin.host, b.volPath)
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", options)
|
||||
mountOptions := util.JoinMountOptions(options, b.mountOptions)
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", mountOptions)
|
||||
if err != nil {
|
||||
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
|
|
Loading…
Reference in New Issue