Merge pull request #46458 from jsafrane/mount-prep

Automatic merge from submit-queue (batch tested with PRs 46458, 50934, 50766, 50970, 47698)

Prepare VolumeHost for running mount tools in containers

This is the first part of implementation of https://github.com/kubernetes/features/issues/278 - running mount utilities in containers.

It updates `VolumeHost` interface:

*  `GetMounter()` now requires volume plugin name, as it is going to return different mounter to different volume plugings, because mount utilities for these plugins can be on different places.
* New `GetExec()` method that should volume plugins use to execute any utilities. This new `Exec` interface will execute them on proper place.
* `SafeFormatAndMount` is updated to the new `Exec` interface.

This is just a preparation, `GetExec` right now leads to simple `os.Exec` and mount utilities are executed on the same place as before. Also, the volume plugins will be updated in subsequent PRs (split into separate PRs, some plugins required lot of changes).

```release-note
NONE
```

@kubernetes/sig-storage-pr-reviews 
@rootfs @gnufied
pull/6/head
Kubernetes Submit Queue 2017-08-21 18:11:16 -07:00 committed by GitHub
commit 198e83588b
58 changed files with 240 additions and 180 deletions

View File

@ -539,7 +539,7 @@ func (adc *attachDetachController) GetCloudProvider() cloudprovider.Interface {
return adc.cloud
}
func (adc *attachDetachController) GetMounter() mount.Interface {
func (adc *attachDetachController) GetMounter(pluginName string) mount.Interface {
return nil
}
@ -571,6 +571,10 @@ func (adc *attachDetachController) GetConfigMapFunc() func(namespace, name strin
}
}
func (adc *attachDetachController) GetExec(pluginName string) mount.Exec {
return mount.NewOsExec()
}
func (adc *attachDetachController) addNodeToDswp(node *v1.Node, nodeName types.NodeName) {
if _, exists := node.Annotations[volumehelper.ControllerManagedAttachAnnotation]; exists {
keepTerminatedPodVolumes := false

View File

@ -61,7 +61,7 @@ func (ctrl *PersistentVolumeController) GetCloudProvider() cloudprovider.Interfa
return ctrl.cloud
}
func (ctrl *PersistentVolumeController) GetMounter() mount.Interface {
func (ctrl *PersistentVolumeController) GetMounter(pluginName string) mount.Interface {
return nil
}
@ -93,6 +93,10 @@ func (adc *PersistentVolumeController) GetConfigMapFunc() func(namespace, name s
}
}
func (adc *PersistentVolumeController) GetExec(pluginName string) mount.Exec {
return mount.NewOsExec()
}
func (ctrl *PersistentVolumeController) GetNodeLabels() (map[string]string, error) {
return nil, fmt.Errorf("GetNodeLabels() unsupported in PersistentVolumeController")
}

View File

@ -117,7 +117,7 @@ func (kvh *kubeletVolumeHost) GetCloudProvider() cloudprovider.Interface {
return kvh.kubelet.cloud
}
func (kvh *kubeletVolumeHost) GetMounter() mount.Interface {
func (kvh *kubeletVolumeHost) GetMounter(pluginName string) mount.Interface {
return kvh.kubelet.mounter
}
@ -156,3 +156,7 @@ func (kvh *kubeletVolumeHost) GetNodeLabels() (map[string]string, error) {
}
return node.Labels, nil
}
func (kvh *kubeletVolumeHost) GetExec(pluginName string) mount.Exec {
return mount.NewOsExec()
}

View File

@ -10,6 +10,7 @@ go_library(
name = "go_default_library",
srcs = [
"doc.go",
"exec.go",
"fake.go",
"mount.go",
"mount_unsupported.go",
@ -45,10 +46,7 @@ go_test(
"//conditions:default": [],
}),
library = ":go_default_library",
deps = [
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
deps = ["//vendor/k8s.io/utils/exec/testing:go_default_library"],
)
filegroup(

50
pkg/util/mount/exec.go Normal file
View File

@ -0,0 +1,50 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import "k8s.io/utils/exec"
func NewOsExec() Exec {
return &osExec{}
}
// Real implementation of Exec interface that uses simple util.Exec
type osExec struct{}
var _ Exec = &osExec{}
func (e *osExec) Run(cmd string, args ...string) ([]byte, error) {
exe := exec.New()
return exe.Command(cmd, args...).CombinedOutput()
}
func NewFakeExec(run runHook) *FakeExec {
return &FakeExec{runHook: run}
}
// Fake for testing.
type FakeExec struct {
runHook runHook
}
type runHook func(cmd string, args ...string) ([]byte, error)
func (f *FakeExec) Run(cmd string, args ...string) ([]byte, error) {
if f.runHook != nil {
return f.runHook(cmd, args...)
}
return nil, nil
}

View File

@ -25,7 +25,6 @@ import (
"strings"
"github.com/golang/glog"
"k8s.io/utils/exec"
)
const (
@ -70,6 +69,16 @@ type Interface interface {
GetDeviceNameFromMount(mountPath, pluginDir string) (string, error)
}
// Exec executes command where mount utilities are. This can be either the host,
// container where kubelet runs or even a remote pod with mount utilities.
// Usual pkg/util/exec interface is not used because kubelet.RunInContainer does
// not provide stdin/stdout/stderr streams.
type Exec interface {
// Run executes a command and returns its stdout + stderr combined in one
// stream.
Run(cmd string, args ...string) ([]byte, error)
}
// Compile-time check to ensure all Mounter implementations satisfy
// the mount interface
var _ Interface = &Mounter{}
@ -89,7 +98,7 @@ type MountPoint struct {
// mounts it otherwise the device is formatted first then mounted.
type SafeFormatAndMount struct {
Interface
Runner exec.Interface
Exec
}
// FormatAndMount formats the given disk, if needed, and mounts it.

View File

@ -412,8 +412,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string,
// Run fsck on the disk to fix repairable issues
glog.V(4).Infof("Checking for issues with fsck on disk: %s", source)
args := []string{"-a", source}
cmd := mounter.Runner.Command("fsck", args...)
out, err := cmd.CombinedOutput()
out, err := mounter.Exec.Run("fsck", args...)
if err != nil {
ee, isExitError := err.(utilexec.ExitError)
switch {
@ -450,8 +449,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string,
args = []string{"-F", source}
}
glog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args)
cmd := mounter.Runner.Command("mkfs."+fstype, args...)
_, err := cmd.CombinedOutput()
_, err := mounter.Exec.Run("mkfs."+fstype, args...)
if err == nil {
// the disk has been formatted successfully try to mount it again.
glog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target)
@ -476,9 +474,8 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string,
// diskLooksUnformatted uses 'lsblk' to see if the given disk is unformated
func (mounter *SafeFormatAndMount) getDiskFormat(disk string) (string, error) {
args := []string{"-n", "-o", "FSTYPE", disk}
cmd := mounter.Runner.Command("lsblk", args...)
glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args)
dataOut, err := cmd.CombinedOutput()
dataOut, err := mounter.Exec.Run("lsblk", args...)
output := string(dataOut)
glog.V(4).Infof("Output: %q", output)

View File

@ -21,7 +21,6 @@ import (
"runtime"
"testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
)
@ -181,40 +180,30 @@ func TestSafeFormatAndMount(t *testing.T) {
}
for _, test := range tests {
commandScripts := []fakeexec.FakeCommandAction{}
for _, expected := range test.execScripts {
ecmd := expected.command
eargs := expected.args
output := expected.output
err := expected.err
commandScript := func(cmd string, args ...string) exec.Cmd {
if cmd != ecmd {
t.Errorf("Unexpected command %s. Expecting %s", cmd, ecmd)
}
for j := range args {
if args[j] != eargs[j] {
t.Errorf("Unexpected args %v. Expecting %v", args, eargs)
}
}
fake := fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
func() ([]byte, error) { return []byte(output), err },
},
}
return fakeexec.InitFakeCmd(&fake, cmd, args...)
execCallCount := 0
execCallback := func(cmd string, args ...string) ([]byte, error) {
if len(test.execScripts) <= execCallCount {
t.Errorf("Unexpected command: %s %v", cmd, args)
return nil, nil
}
commandScripts = append(commandScripts, commandScript)
}
fake := fakeexec.FakeExec{
CommandScript: commandScripts,
script := test.execScripts[execCallCount]
execCallCount++
if script.command != cmd {
t.Errorf("Unexpected command %s. Expecting %s", cmd, script.command)
}
for j := range args {
if args[j] != script.args[j] {
t.Errorf("Unexpected args %v. Expecting %v", args, script.args)
}
}
return []byte(script.output), script.err
}
fakeMounter := ErrorMounter{&FakeMounter{}, 0, test.mountErrs}
fakeExec := NewFakeExec(execCallback)
mounter := SafeFormatAndMount{
Interface: &fakeMounter,
Runner: &fake,
Exec: fakeExec,
}
device := "/dev/foo"

View File

@ -27,7 +27,6 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)

View File

@ -29,7 +29,7 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/utils/exec"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
type awsElasticBlockStoreAttacher struct {
@ -54,7 +54,7 @@ func (plugin *awsElasticBlockStorePlugin) NewAttacher() (volume.Attacher, error)
}
func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter()
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
@ -199,7 +199,7 @@ func (attacher *awsElasticBlockStoreAttacher) GetDeviceMountPath(
// FIXME: this method can be further pruned.
func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter()
mounter := attacher.host.GetMounter(awsElasticBlockStorePluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
@ -222,7 +222,7 @@ func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, dev
options = append(options, "ro")
}
if notMnt {
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(awsElasticBlockStorePluginName, attacher.host)
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
@ -247,7 +247,7 @@ func (plugin *awsElasticBlockStorePlugin) NewDetacher() (volume.Detacher, error)
}
return &awsElasticBlockStoreDetacher{
mounter: plugin.host.GetMounter(),
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
awsVolumes: awsCloud,
}, nil
}

View File

@ -35,7 +35,6 @@ import (
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/utils/exec"
)
// This is the primary entrypoint for volume plugins.
@ -104,7 +103,7 @@ func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []v1.PersistentVolume
func (plugin *awsElasticBlockStorePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &AWSDiskUtil{}, plugin.host.GetMounter())
return plugin.newMounterInternal(spec, pod.UID, &AWSDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Mounter, error) {
@ -135,12 +134,12 @@ func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec,
},
fsType: fsType,
readOnly: readOnly,
diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
}
func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &AWSDiskUtil{}, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, &AWSDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *awsElasticBlockStorePlugin) newUnmounterInternal(volName string, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Unmounter, error) {
@ -199,7 +198,7 @@ func getVolumeSource(
}
func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter()
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
volumeID, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {

View File

@ -24,6 +24,7 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",

View File

@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/utils/exec"
)
@ -211,7 +212,7 @@ func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error
}
func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.plugin.host.GetMounter()
mounter := attacher.plugin.host.GetMounter(azureDataDiskPluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
@ -232,7 +233,7 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str
options := []string{}
if notMnt {
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, attacher.plugin.host)
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions)
if err != nil {
@ -277,7 +278,7 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro
// UnmountDevice unmounts the volume on the node
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
err := volumeutil.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter())
err := volumeutil.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName()))
if err == nil {
glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
} else {

View File

@ -190,7 +190,7 @@ func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID
}
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter()
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
@ -210,6 +210,6 @@ func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath str
}
func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
m := plugin.host.GetMounter()
m := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(m, deviceMountPath)
}

View File

@ -63,7 +63,7 @@ func (m *azureDiskMounter) GetPath() string {
}
func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
mounter := m.plugin.host.GetMounter()
mounter := m.plugin.host.GetMounter(m.plugin.GetPluginName())
volumeSource, err := getVolumeSource(m.spec)
if err != nil {
@ -154,7 +154,7 @@ func (u *azureDiskUnmounter) TearDownAt(dir string) error {
}
glog.V(4).Infof("azureDisk - TearDownAt: %s", dir)
mounter := u.plugin.host.GetMounter()
mounter := u.plugin.host.GetMounter(u.plugin.GetPluginName())
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do IsLikelyNotMountPoint %s", dir, err)

View File

@ -98,7 +98,7 @@ func (plugin *azureFilePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode
}
func (plugin *azureFilePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod, &azureSvc{}, plugin.host.GetMounter())
return plugin.newMounterInternal(spec, pod, &azureSvc{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *azureFilePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, util azureUtil, mounter mount.Interface) (volume.Mounter, error) {
@ -124,7 +124,7 @@ func (plugin *azureFilePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod
}
func (plugin *azureFilePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *azureFilePlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {

View File

@ -110,7 +110,7 @@ func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.
glog.V(4).Infof("found ceph secret info: %s", name)
}
}
return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter(), secret)
return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter(plugin.GetPluginName()), secret)
}
func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Mounter, error) {
@ -153,7 +153,7 @@ func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.U
}
func (plugin *cephfsPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *cephfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {

View File

@ -29,7 +29,7 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/utils/exec"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
type cinderDiskAttacher struct {
@ -66,7 +66,7 @@ func (plugin *cinderPlugin) NewAttacher() (volume.Attacher, error) {
}
func (plugin *cinderPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter()
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
@ -262,7 +262,7 @@ func (attacher *cinderDiskAttacher) GetDeviceMountPath(
// FIXME: this method can be further pruned.
func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter()
mounter := attacher.host.GetMounter(cinderVolumePluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
@ -285,7 +285,7 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st
options = append(options, "ro")
}
if notMnt {
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
@ -309,7 +309,7 @@ func (plugin *cinderPlugin) NewDetacher() (volume.Detacher, error) {
return nil, err
}
return &cinderDiskDetacher{
mounter: plugin.host.GetMounter(),
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
cinderProvider: cinder,
}, nil
}

View File

@ -36,7 +36,6 @@ import (
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/utils/exec"
)
// This is the primary entrypoint for volume plugins.
@ -116,7 +115,7 @@ func (plugin *cinderPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
}
func (plugin *cinderPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, &CinderDiskUtil{}, plugin.host.GetMounter())
return plugin.newMounterInternal(spec, pod.UID, &CinderDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Mounter, error) {
@ -139,11 +138,11 @@ func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.U
},
fsType: fsType,
readOnly: readOnly,
blockDeviceMounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}}, nil
blockDeviceMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
}
func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, &CinderDiskUtil{}, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, &CinderDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *cinderPlugin) newUnmounterInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Unmounter, error) {
@ -216,7 +215,7 @@ func (plugin *cinderPlugin) getCloudProvider() (CinderProvider, error) {
}
func (plugin *cinderPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter()
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {

View File

@ -92,7 +92,7 @@ func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts v
spec.Name(),
pod.UID,
plugin,
plugin.host.GetMounter(),
plugin.host.GetMounter(plugin.GetPluginName()),
plugin.host.GetWriter(),
volume.MetricsNil{},
},
@ -109,7 +109,7 @@ func (plugin *configMapPlugin) NewUnmounter(volName string, podUID types.UID) (v
volName,
podUID,
plugin,
plugin.host.GetMounter(),
plugin.host.GetMounter(plugin.GetPluginName()),
plugin.host.GetWriter(),
volume.MetricsNil{},
},

View File

@ -99,7 +99,7 @@ func (plugin *emptyDirPlugin) SupportsBulkVolumeVerification() bool {
}
func (plugin *emptyDirPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(), &realMountDetector{plugin.host.GetMounter()}, opts)
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(plugin.GetPluginName()), &realMountDetector{plugin.host.GetMounter(plugin.GetPluginName())}, opts)
}
func (plugin *emptyDirPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, mounter mount.Interface, mountDetector mountDetector, opts volume.VolumeOptions) (volume.Mounter, error) {
@ -120,7 +120,7 @@ func (plugin *emptyDirPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod,
func (plugin *emptyDirPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(), &realMountDetector{plugin.host.GetMounter()})
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()), &realMountDetector{plugin.host.GetMounter(plugin.GetPluginName())})
}
func (plugin *emptyDirPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface, mountDetector mountDetector) (volume.Unmounter, error) {

View File

@ -20,6 +20,7 @@ go_library(
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",

View File

@ -28,6 +28,7 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/utils/exec"
)
@ -50,7 +51,7 @@ func (plugin *fcPlugin) NewAttacher() (volume.Attacher, error) {
}
func (plugin *fcPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter()
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
@ -88,7 +89,7 @@ func (attacher *fcAttacher) GetDeviceMountPath(
}
func (attacher *fcAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter()
mounter := attacher.host.GetMounter(fcPluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
@ -111,7 +112,7 @@ func (attacher *fcAttacher) MountDevice(spec *volume.Spec, devicePath string, de
options = append(options, "ro")
}
if notMnt {
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(fcPluginName)}
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
@ -132,7 +133,7 @@ var _ volume.Detacher = &fcDetacher{}
func (plugin *fcPlugin) NewDetacher() (volume.Detacher, error) {
return &fcDetacher{
mounter: plugin.host.GetMounter(),
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
manager: &FCUtil{},
exe: exec.New(),
}, nil
@ -192,7 +193,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun
},
fsType: fc.FSType,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: host.GetMounter(), Runner: exec.New()},
mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host),
}, nil
}

View File

@ -103,10 +103,10 @@ func (plugin *fcPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
func (plugin *fcPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &FCUtil{}, plugin.host.GetMounter())
return plugin.newMounterInternal(spec, pod.UID, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
}
func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Mounter, error) {
func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.Mounter, error) {
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
fc, readOnly, err := getVolumeSource(spec)
@ -138,13 +138,13 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID,
plugin: plugin},
fsType: fc.FSType,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
}, nil
}
func (plugin *fcPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &FCUtil{}, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Unmounter, error) {

View File

@ -141,7 +141,8 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
fakeManager := NewFakeDiskManager()
defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{}
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter)
fakeExec := mount.NewFakeExec(nil)
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
@ -210,7 +211,8 @@ func doTestPluginNilMounter(t *testing.T, spec *volume.Spec) {
fakeManager := NewFakeDiskManager()
defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{}
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter)
fakeExec := mount.NewFakeExec(nil)
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec)
if err == nil {
t.Errorf("Error failed to make a new Mounter is expected: %v", err)
}

View File

@ -24,7 +24,6 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/utils/exec"
)
type attacherDefaults flexVolumeAttacher
@ -59,7 +58,7 @@ func (a *attacherDefaults) MountDevice(spec *volume.Spec, devicePath string, dev
options = append(options, "rw")
}
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: a.plugin.host.GetExec(a.plugin.GetPluginName())}
return diskMounter.FormatAndMount(devicePath, deviceMountPath, volSource.FSType, options)
}

View File

@ -69,7 +69,7 @@ func (a *flexVolumeAttacher) GetDeviceMountPath(spec *volume.Spec) (string, erro
// MountDevice is part of the volume.Attacher interface
func (a *flexVolumeAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
// Mount only once.
alreadyMounted, err := prepareForMount(a.plugin.host.GetMounter(), deviceMountPath)
alreadyMounted, err := prepareForMount(a.plugin.host.GetMounter(a.plugin.GetPluginName()), deviceMountPath)
if err != nil {
return err
}
@ -87,7 +87,7 @@ func (a *flexVolumeAttacher) MountDevice(spec *volume.Spec, devicePath string, d
// Devicepath is empty if the plugin does not support attach calls. Ignore mountDevice calls if the
// plugin does not implement attach interface.
if devicePath != "" {
return (*attacherDefaults)(a).MountDevice(spec, devicePath, deviceMountPath, a.plugin.host.GetMounter())
return (*attacherDefaults)(a).MountDevice(spec, devicePath, deviceMountPath, a.plugin.host.GetMounter(a.plugin.GetPluginName()))
} else {
return nil
}

View File

@ -41,5 +41,5 @@ func (d *detacherDefaults) WaitForDetach(devicePath string, timeout time.Duratio
// UnmountDevice is part of the volume.Detacher interface.
func (d *detacherDefaults) UnmountDevice(deviceMountPath string) error {
glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default UnmountDevice for device mount path ", deviceMountPath)
return util.UnmountPath(deviceMountPath, d.plugin.host.GetMounter())
return util.UnmountPath(deviceMountPath, d.plugin.host.GetMounter(d.plugin.GetPluginName()))
}

View File

@ -69,7 +69,7 @@ func (d *flexVolumeDetacher) UnmountDevice(deviceMountPath string) error {
return nil
}
notmnt, err := isNotMounted(d.plugin.host.GetMounter(), deviceMountPath)
notmnt, err := isNotMounted(d.plugin.host.GetMounter(d.plugin.GetPluginName()), deviceMountPath)
if err != nil {
return err
}

View File

@ -19,7 +19,6 @@ package flexvolume
import (
"strconv"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/utils/exec"
)
@ -29,9 +28,6 @@ type flexVolumeMounter struct {
*flexVolume
// Runner used to setup the volume.
runner exec.Interface
// blockDeviceMounter provides the interface to create filesystem if the
// filesystem doesn't exist.
blockDeviceMounter mount.Interface
// the considered volume spec
spec *volume.Spec
readOnly bool

View File

@ -158,7 +158,7 @@ func (plugin *flexVolumePlugin) GetAccessModes() []api.PersistentVolumeAccessMod
// NewMounter is part of the volume.VolumePlugin interface.
func (plugin *flexVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(), plugin.runner)
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(plugin.GetPluginName()), plugin.runner)
}
// newMounterInternal is the internal mounter routine to build the volume.
@ -176,16 +176,15 @@ func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *api.P
podServiceAccountName: pod.Spec.ServiceAccountName,
volName: spec.Name(),
},
runner: runner,
spec: spec,
readOnly: readOnly,
blockDeviceMounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: runner},
runner: runner,
spec: spec,
readOnly: readOnly,
}, nil
}
// NewUnmounter is part of the volume.VolumePlugin interface.
func (plugin *flexVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(), plugin.runner)
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()), plugin.runner)
}
// newUnmounterInternal is the internal unmounter routine to clean the volume.
@ -254,7 +253,7 @@ func (plugin *flexVolumePlugin) isUnsupported(command string) bool {
}
func (plugin *flexVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter()
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}

View File

@ -138,7 +138,7 @@ func (p *flockerPlugin) getFlockerVolumeSource(spec *volume.Spec) (*v1.FlockerVo
func (plugin *flockerPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &FlockerUtil{}, plugin.host.GetMounter())
return plugin.newMounterInternal(spec, pod.UID, &FlockerUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *flockerPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager volumeManager, mounter mount.Interface) (volume.Mounter, error) {
@ -166,7 +166,7 @@ func (plugin *flockerPlugin) newMounterInternal(spec *volume.Spec, podUID types.
func (p *flockerPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return p.newUnmounterInternal(volName, podUID, &FlockerUtil{}, p.host.GetMounter())
return p.newUnmounterInternal(volName, podUID, &FlockerUtil{}, p.host.GetMounter(p.GetPluginName()))
}
func (p *flockerPlugin) newUnmounterInternal(volName string, podUID types.UID, manager volumeManager, mounter mount.Interface) (volume.Unmounter, error) {

View File

@ -31,7 +31,7 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/utils/exec"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
type gcePersistentDiskAttacher struct {
@ -56,7 +56,7 @@ func (plugin *gcePersistentDiskPlugin) NewAttacher() (volume.Attacher, error) {
}
func (plugin *gcePersistentDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter()
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
@ -185,7 +185,7 @@ func (attacher *gcePersistentDiskAttacher) GetDeviceMountPath(
func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
// Only mount the PD globally once.
mounter := attacher.host.GetMounter()
mounter := attacher.host.GetMounter(gcePersistentDiskPluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
@ -208,7 +208,7 @@ func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, device
options = append(options, "ro")
}
if notMnt {
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(gcePersistentDiskPluginName, attacher.host)
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
@ -272,5 +272,5 @@ func (detacher *gcePersistentDiskDetacher) Detach(deviceMountPath string, nodeNa
}
func (detacher *gcePersistentDiskDetacher) UnmountDevice(deviceMountPath string) error {
return volumeutil.UnmountPath(deviceMountPath, detacher.host.GetMounter())
return volumeutil.UnmountPath(deviceMountPath, detacher.host.GetMounter(gcePersistentDiskPluginName))
}

View File

@ -100,7 +100,7 @@ func (plugin *gcePersistentDiskPlugin) GetAccessModes() []v1.PersistentVolumeAcc
func (plugin *gcePersistentDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &GCEDiskUtil{}, plugin.host.GetMounter())
return plugin.newMounterInternal(spec, pod.UID, &GCEDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func getVolumeSource(
@ -145,7 +145,7 @@ func (plugin *gcePersistentDiskPlugin) newMounterInternal(spec *volume.Spec, pod
func (plugin *gcePersistentDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &GCEDiskUtil{}, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, &GCEDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *gcePersistentDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Unmounter, error) {
@ -191,7 +191,7 @@ func (plugin *gcePersistentDiskPlugin) newProvisionerInternal(options volume.Vol
}
func (plugin *gcePersistentDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter()
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {

View File

@ -152,7 +152,7 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu
return nil, err
}
glog.V(1).Infof("glusterfs: endpoints %v", ep)
return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(), exec.New())
return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(plugin.GetPluginName()), exec.New())
}
func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*v1.GlusterfsVolumeSource, bool) {
@ -182,7 +182,7 @@ func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endp
}
func (plugin *glusterfsPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *glusterfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {

View File

@ -112,10 +112,10 @@ func (plugin *iscsiPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.V
}
}
return plugin.newMounterInternal(spec, pod.UID, &ISCSIUtil{}, plugin.host.GetMounter(), secret)
return plugin.newMounterInternal(spec, pod.UID, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()), secret)
}
func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, secret map[string]string) (volume.Mounter, error) {
func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec, secret map[string]string) (volume.Mounter, error) {
// iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author.
// iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
iscsi, readOnly, err := getVolumeSource(spec)
@ -147,7 +147,7 @@ func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UI
plugin: plugin},
fsType: iscsi.FSType,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()),
mountOptions: volume.MountOptionFromSpec(spec),
}, nil
@ -155,10 +155,10 @@ func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UI
func (plugin *iscsiPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &ISCSIUtil{}, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, &ISCSIUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
}
func (plugin *iscsiPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Unmounter, error) {
func (plugin *iscsiPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.Unmounter, error) {
return &iscsiDiskUnmounter{
iscsiDisk: &iscsiDisk{
podUID: podUID,

View File

@ -141,7 +141,8 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
fakeManager := NewFakeDiskManager()
defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{}
mounter, err := plug.(*iscsiPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, nil)
fakeExec := mount.NewFakeExec(nil)
mounter, err := plug.(*iscsiPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec, nil)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
@ -178,7 +179,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
fakeManager2 := NewFakeDiskManager()
defer fakeManager2.Cleanup()
unmounter, err := plug.(*iscsiPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter)
unmounter, err := plug.(*iscsiPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter, fakeExec)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}

View File

@ -104,7 +104,7 @@ func (plugin *localVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ vo
localVolume: &localVolume{
podUID: pod.UID,
volName: spec.Name(),
mounter: plugin.host.GetMounter(),
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
plugin: plugin,
globalPath: volumeSource.Path,
MetricsProvider: volume.NewMetricsStatFS(volumeSource.Path),
@ -119,7 +119,7 @@ func (plugin *localVolumePlugin) NewUnmounter(volName string, podUID types.UID)
localVolume: &localVolume{
podUID: podUID,
volName: volName,
mounter: plugin.host.GetMounter(),
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
plugin: plugin,
},
}, nil

View File

@ -105,7 +105,7 @@ func (plugin *nfsPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
}
func (plugin *nfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter())
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *nfsPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, mounter mount.Interface) (volume.Mounter, error) {
@ -129,7 +129,7 @@ func (plugin *nfsPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, moun
}
func (plugin *nfsPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *nfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {

View File

@ -26,7 +26,6 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)

View File

@ -30,7 +30,7 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/utils/exec"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
type photonPersistentDiskAttacher struct {
@ -180,13 +180,13 @@ func (attacher *photonPersistentDiskAttacher) GetDeviceMountPath(spec *volume.Sp
// GetMountDeviceRefs finds all other references to the device referenced
// by deviceMountPath; returns a list of paths.
func (plugin *photonPersistentDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter()
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
// MountDevice mounts device to global mount point.
func (attacher *photonPersistentDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter()
mounter := attacher.host.GetMounter(photonPersistentDiskPluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
@ -209,7 +209,7 @@ func (attacher *photonPersistentDiskAttacher) MountDevice(spec *volume.Spec, dev
options := []string{}
if notMnt {
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(photonPersistentDiskPluginName, attacher.host)
mountOptions := volume.MountOptionFromSpec(spec)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
@ -236,7 +236,7 @@ func (plugin *photonPersistentDiskPlugin) NewDetacher() (volume.Detacher, error)
}
return &photonPersistentDiskDetacher{
mounter: plugin.host.GetMounter(),
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
photonDisks: photonCloud,
}, nil
}

View File

@ -31,7 +31,6 @@ import (
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/utils/exec"
)
// This is the primary entrypoint for volume plugins.
@ -89,11 +88,11 @@ func (plugin *photonPersistentDiskPlugin) SupportsBulkVolumeVerification() bool
}
func (plugin *photonPersistentDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, &PhotonDiskUtil{}, plugin.host.GetMounter())
return plugin.newMounterInternal(spec, pod.UID, &PhotonDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *photonPersistentDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, &PhotonDiskUtil{}, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, &PhotonDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *photonPersistentDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Mounter, error) {
@ -116,7 +115,7 @@ func (plugin *photonPersistentDiskPlugin) newMounterInternal(spec *volume.Spec,
plugin: plugin,
},
fsType: fsType,
diskMounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}}, nil
diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
}
func (plugin *photonPersistentDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Unmounter, error) {
@ -131,7 +130,7 @@ func (plugin *photonPersistentDiskPlugin) newUnmounterInternal(volName string, p
}
func (plugin *photonPersistentDiskPlugin) ConstructVolumeSpec(volumeSpecName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter()
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
pdID, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {

View File

@ -224,7 +224,7 @@ type VolumeHost interface {
GetCloudProvider() cloudprovider.Interface
// Get mounter interface.
GetMounter() mount.Interface
GetMounter(pluginName string) mount.Interface
// Get writer interface for writing data to disk.
GetWriter() io.Writer
@ -244,6 +244,9 @@ type VolumeHost interface {
// Returns a function that returns a configmap.
GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error)
// Returns an interface that should be used to execute any utilities in volume plugins
GetExec(pluginName string) mount.Exec
// Returns the labels on the node
GetNodeLabels() (map[string]string, error)
}

View File

@ -43,7 +43,6 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)

View File

@ -29,7 +29,6 @@ import (
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/utils/exec"
)
// This is the primary entrypoint for volume plugins.
@ -91,7 +90,7 @@ func (plugin *portworxVolumePlugin) GetAccessModes() []v1.PersistentVolumeAccess
}
func (plugin *portworxVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, plugin.util, plugin.host.GetMounter())
return plugin.newMounterInternal(spec, pod.UID, plugin.util, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *portworxVolumePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager portworxManager, mounter mount.Interface) (volume.Mounter, error) {
@ -115,11 +114,11 @@ func (plugin *portworxVolumePlugin) newMounterInternal(spec *volume.Spec, podUID
},
fsType: fsType,
readOnly: readOnly,
diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
}
func (plugin *portworxVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.util, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, plugin.util, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *portworxVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager portworxManager,

View File

@ -92,7 +92,7 @@ func (plugin *quobytePlugin) CanSupport(spec *volume.Spec) bool {
}
// If Quobyte is already mounted we don't need to check if the binary is installed
if mounter, err := plugin.newMounterInternal(spec, nil, plugin.host.GetMounter()); err == nil {
if mounter, err := plugin.newMounterInternal(spec, nil, plugin.host.GetMounter(plugin.GetPluginName())); err == nil {
qm, _ := mounter.(*quobyteMounter)
pluginDir := plugin.host.GetPluginDir(strings.EscapeQualifiedNameForDisk(quobytePluginName))
if mounted, err := qm.pluginDirIsMounted(pluginDir); mounted && err == nil {
@ -155,7 +155,7 @@ func (plugin *quobytePlugin) ConstructVolumeSpec(volumeName, mountPath string) (
}
func (plugin *quobytePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter())
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *quobytePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, mounter mount.Interface) (volume.Mounter, error) {
@ -181,7 +181,7 @@ func (plugin *quobytePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod,
}
func (plugin *quobytePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *quobytePlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {

View File

@ -131,7 +131,7 @@ func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.Vol
}
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &RBDUtil{}, plugin.host.GetMounter(), secret)
return plugin.newMounterInternal(spec, pod.UID, &RBDUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), secret)
}
func (plugin *rbdPlugin) getRBDVolumeSource(spec *volume.Spec) (*v1.RBDVolumeSource, bool) {
@ -158,7 +158,7 @@ func (plugin *rbdPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID,
Pool: pool,
ReadOnly: readOnly,
manager: manager,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
},
@ -173,7 +173,7 @@ func (plugin *rbdPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID,
func (plugin *rbdPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &RBDUtil{}, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, &RBDUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *rbdPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Unmounter, error) {
@ -183,7 +183,7 @@ func (plugin *rbdPlugin) newUnmounterInternal(volName string, podUID types.UID,
podUID: podUID,
volName: volName,
manager: manager,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
},

View File

@ -52,7 +52,6 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)

View File

@ -53,7 +53,7 @@ var _ volume.VolumePlugin = &sioPlugin{}
func (p *sioPlugin) Init(host volume.VolumeHost) error {
p.host = host
p.mounter = host.GetMounter()
p.mounter = host.GetMounter(p.GetPluginName())
p.volumeMtx = keymutex.NewKeyMutex()
return nil
}

View File

@ -34,7 +34,6 @@ import (
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/utils/exec"
)
type sioVolume struct {
@ -143,10 +142,7 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error {
}
glog.V(4).Info(log("setup created mount point directory %s", dir))
diskMounter := &mount.SafeFormatAndMount{
Interface: v.plugin.mounter,
Runner: exec.New(),
}
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(v.plugin.GetPluginName(), v.plugin.host)
err = diskMounter.FormatAndMount(devicePath, dir, v.fsType, options)
if err != nil {

View File

@ -99,7 +99,7 @@ func (plugin *secretPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volu
spec.Name(),
pod.UID,
plugin,
plugin.host.GetMounter(),
plugin.host.GetMounter(plugin.GetPluginName()),
plugin.host.GetWriter(),
volume.NewCachedMetrics(volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host))),
},
@ -116,7 +116,7 @@ func (plugin *secretPlugin) NewUnmounter(volName string, podUID types.UID) (volu
volName,
podUID,
plugin,
plugin.host.GetMounter(),
plugin.host.GetMounter(plugin.GetPluginName()),
plugin.host.GetWriter(),
volume.NewCachedMetrics(volume.NewMetricsDu(getPath(podUID, volName, plugin.host))),
},

View File

@ -36,7 +36,6 @@ import (
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/utils/exec"
)
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
@ -111,7 +110,7 @@ func (plugin *storageosPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu
return nil, err
}
return plugin.newMounterInternal(spec, pod, apiCfg, &storageosUtil{}, plugin.host.GetMounter())
return plugin.newMounterInternal(spec, pod, apiCfg, &storageosUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *storageosPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, apiCfg *storageosAPIConfig, manager storageosManager, mounter mount.Interface) (volume.Mounter, error) {
@ -137,12 +136,12 @@ func (plugin *storageosPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod
MetricsProvider: volume.NewMetricsStatFS(getPath(pod.UID, volNamespace, volName, spec.Name(), plugin.host)),
},
devicePath: storageosDevicePath,
diskMounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
diskMounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: plugin.host.GetExec(plugin.GetPluginName())},
}, nil
}
func (plugin *storageosPlugin) NewUnmounter(pvName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(pvName, podUID, &storageosUtil{}, plugin.host.GetMounter())
return plugin.newUnmounterInternal(pvName, podUID, &storageosUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *storageosPlugin) newUnmounterInternal(pvName string, podUID types.UID, manager storageosManager, mounter mount.Interface) (volume.Unmounter, error) {

View File

@ -49,6 +49,7 @@ type fakeVolumeHost struct {
pluginMgr VolumePluginMgr
cloud cloudprovider.Interface
mounter mount.Interface
exec mount.Exec
writer io.Writer
nodeLabels map[string]string
}
@ -71,6 +72,7 @@ func newFakeVolumeHost(rootDir string, kubeClient clientset.Interface, plugins [
host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: cloud}
host.mounter = &mount.FakeMounter{}
host.writer = &io.StdWriter{}
host.exec = mount.NewFakeExec(nil)
host.pluginMgr.InitPlugins(plugins, host)
return host
}
@ -95,7 +97,7 @@ func (f *fakeVolumeHost) GetCloudProvider() cloudprovider.Interface {
return f.cloud
}
func (f *fakeVolumeHost) GetMounter() mount.Interface {
func (f *fakeVolumeHost) GetMounter(pluginName string) mount.Interface {
return f.mounter
}
@ -149,6 +151,10 @@ func (f *fakeVolumeHost) GetSecretFunc() func(namespace, name string) (*v1.Secre
}
}
func (f *fakeVolumeHost) GetExec(pluginName string) mount.Exec {
return f.exec
}
func (f *fakeVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
return func(namespace, name string) (*v1.ConfigMap, error) {
return f.kubeClient.Core().ConfigMaps(namespace).Get(name, metav1.GetOptions{})

View File

@ -9,6 +9,7 @@ go_library(
name = "go_default_library",
srcs = ["volumehelper.go"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",

View File

@ -23,6 +23,7 @@ import (
"strings"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/types"
)
@ -127,3 +128,11 @@ func SplitUniqueName(uniqueName v1.UniqueVolumeName) (string, string, error) {
pluginName := fmt.Sprintf("%s/%s", components[0], components[1])
return pluginName, components[2], nil
}
// NewSafeFormatAndMountFromHost creates a new SafeFormatAndMount with Mounter
// and Exec taken from given VolumeHost.
func NewSafeFormatAndMountFromHost(pluginName string, host volume.VolumeHost) *mount.SafeFormatAndMount {
mounter := host.GetMounter(pluginName)
exec := host.GetExec(pluginName)
return &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}
}

View File

@ -28,7 +28,6 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)

View File

@ -29,7 +29,7 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/utils/exec"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
type vsphereVMDKAttacher struct {
@ -167,13 +167,13 @@ func (attacher *vsphereVMDKAttacher) GetDeviceMountPath(spec *volume.Spec) (stri
// GetMountDeviceRefs finds all other references to the device referenced
// by deviceMountPath; returns a list of paths.
func (plugin *vsphereVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter()
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
// MountDevice mounts device to global mount point.
func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter()
mounter := attacher.host.GetMounter(vsphereVolumePluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
@ -195,7 +195,7 @@ func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath s
options := []string{}
if notMnt {
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(vsphereVolumePluginName, attacher.host)
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
@ -221,7 +221,7 @@ func (plugin *vsphereVolumePlugin) NewDetacher() (volume.Detacher, error) {
}
return &vsphereVMDKDetacher{
mounter: plugin.host.GetMounter(),
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
vsphereVolumes: vsphereCloud,
}, nil
}

View File

@ -32,7 +32,6 @@ import (
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/utils/exec"
)
// This is the primary entrypoint for volume plugins.
@ -90,11 +89,11 @@ func (plugin *vsphereVolumePlugin) SupportsBulkVolumeVerification() bool {
}
func (plugin *vsphereVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, &VsphereDiskUtil{}, plugin.host.GetMounter())
return plugin.newMounterInternal(spec, pod.UID, &VsphereDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *vsphereVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, &VsphereDiskUtil{}, plugin.host.GetMounter())
return plugin.newUnmounterInternal(volName, podUID, &VsphereDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *vsphereVolumePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Mounter, error) {
@ -116,7 +115,7 @@ func (plugin *vsphereVolumePlugin) newMounterInternal(spec *volume.Spec, podUID
plugin: plugin,
},
fsType: fsType,
diskMounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}}, nil
diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
}
func (plugin *vsphereVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Unmounter, error) {
@ -131,7 +130,7 @@ func (plugin *vsphereVolumePlugin) newUnmounterInternal(volName string, podUID t
}
func (plugin *vsphereVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter()
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
volumePath, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {