CSI fix for proper fsgroup application to volume

pull/8/head
Vladimir Vivien 2018-08-10 17:40:14 -04:00 committed by Vladimir Vivien
parent 94e59f1636
commit e3bc731143
3 changed files with 189 additions and 23 deletions

View File

@ -206,31 +206,24 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
}
// apply volume ownership
if !c.readOnly && fsGroup != nil {
err := volume.SetVolumeOwnership(c, fsGroup)
if err != nil {
// attempt to rollback mount.
glog.Error(log("mounter.SetupAt failed to set fsgroup volume ownership for [%s]: %v", c.volumeID, err))
glog.V(4).Info(log("mounter.SetupAt attempting to unpublish volume %s due to previous error", c.volumeID))
if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil {
glog.Error(log(
"mounter.SetupAt failed to unpublish volume [%s]: %v (caused by previous NodePublish error: %v)",
c.volumeID, unpubErr, err,
))
return fmt.Errorf("%v (caused by %v)", unpubErr, err)
}
// The following logic is derived from https://github.com/kubernetes/kubernetes/issues/66323
// if fstype is "", then skip fsgroup (could be indication of non-block filesystem)
// if fstype is provided and pv.AccessMode == ReadWriteOnly, then apply fsgroup
if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil {
glog.Error(log(
"mounter.SetupAt failed to clean mount dir [%s]: %v (caused by previous NodePublish error: %v)",
dir, unmountErr, err,
))
return fmt.Errorf("%v (caused by %v)", unmountErr, err)
}
return err
err = c.applyFSGroup(fsType, fsGroup)
if err != nil {
// attempt to rollback mount.
fsGrpErr := fmt.Errorf("applyFSGroup failed for vol %s: %v", c.volumeID, err)
if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil {
glog.Error(log("NodeUnpublishVolume failed for [%s]: %v", c.volumeID, unpubErr))
return fsGrpErr
}
glog.V(4).Info(log("mounter.SetupAt sets fsGroup to [%d] for %s", *fsGroup, c.volumeID))
if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil {
glog.Error(log("removeMountDir failed for [%s]: %v", dir, unmountErr))
return fsGrpErr
}
return fsGrpErr
}
glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
@ -330,6 +323,43 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
return nil
}
// applyFSGroup applies the volume ownership it derives its logic
// from https://github.com/kubernetes/kubernetes/issues/66323
// 1) if fstype is "", then skip fsgroup (could be indication of non-block filesystem)
// 2) if fstype is provided and pv.AccessMode == ReadWriteOnly and !c.spec.ReadOnly then apply fsgroup
func (c *csiMountMgr) applyFSGroup(fsType string, fsGroup *int64) error {
if fsGroup != nil {
if fsType == "" {
glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, fsType not provided"))
return nil
}
accessModes := c.spec.PersistentVolume.Spec.AccessModes
if c.spec.PersistentVolume.Spec.AccessModes == nil {
glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, access modes not provided"))
return nil
}
if !hasReadWriteOnce(accessModes) {
glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, only support ReadWriteOnce access mode"))
return nil
}
if c.readOnly {
glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, volume is readOnly"))
return nil
}
err := volume.SetVolumeOwnership(c, fsGroup)
if err != nil {
return err
}
glog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *fsGroup, c.volumeID))
}
return nil
}
// isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check
func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
mounter := plug.host.GetMounter(plug.GetPluginName())

View File

@ -262,6 +262,129 @@ func TestMounterSetUp(t *testing.T) {
MounterSetUpTests(t, false)
})
}
func TestMounterSetUpWithFSGroup(t *testing.T) {
fakeClient := fakeclient.NewSimpleClientset()
plug, tmpDir := newTestPlugin(t, fakeClient, nil)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
accessModes []api.PersistentVolumeAccessMode
readOnly bool
fsType string
setFsGroup bool
fsGroup int64
}{
{
name: "default fstype, with no fsgroup (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: false,
fsType: "",
},
{
name: "default fstype with fsgroup (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: false,
fsType: "",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWM, ROM provided (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteMany,
api.ReadOnlyMany,
},
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWO, but readOnly (should not apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
readOnly: true,
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
{
name: "fstype, fsgroup, RWO provided (should apply fsgroup)",
accessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
fsType: "ext4",
setFsGroup: true,
fsGroup: 3000,
},
}
for i, tc := range testCases {
t.Logf("Running test %s", tc.name)
volName := fmt.Sprintf("test-vol-%d", i)
pv := makeTestPV("test-pv", 10, testDriver, volName)
pv.Spec.AccessModes = tc.accessModes
pvName := pv.GetName()
spec := volume.NewSpecFromPersistentVolume(pv, tc.readOnly)
if tc.fsType != "" {
spec.PersistentVolume.Spec.CSI.FSType = tc.fsType
}
mounter, err := plug.NewMounter(
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatal("failed to create CSI mounter")
}
csiMounter := mounter.(*csiMountMgr)
csiMounter.csiClient = setupClient(t, true)
attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName()))
attachment := makeTestAttachment(attachID, "test-node", pvName)
_, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment)
if err != nil {
t.Errorf("failed to setup VolumeAttachment: %v", err)
continue
}
// Mounter.SetUp()
var fsGroupPtr *int64
if tc.setFsGroup {
fsGroup := tc.fsGroup
fsGroupPtr = &fsGroup
}
if err := csiMounter.SetUp(fsGroupPtr); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
//Test the default value of file system type is not overridden
if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != len(tc.fsType) {
t.Errorf("file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
if pubs[csiMounter.volumeID].Path != csiMounter.GetPath() {
t.Error("csi server may not have received NodePublishVolume call")
}
}
}
func TestUnmounterTeardown(t *testing.T) {
plug, tmpDir := newTestPlugin(t, nil, nil)

View File

@ -127,3 +127,16 @@ func getVolumeDeviceDataDir(specVolID string, host volume.VolumeHost) string {
sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID)
return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "data")
}
// hasReadWriteOnce returns true if modes contains v1.ReadWriteOnce
func hasReadWriteOnce(modes []api.PersistentVolumeAccessMode) bool {
if modes == nil {
return false
}
for _, mode := range modes {
if mode == api.ReadWriteOnce {
return true
}
}
return false
}