mirror of https://github.com/k3s-io/k3s
Merge pull request #69556 from leakingtapan/golint-fix-volume-fc-flock
Fix golint issues for pkg/volume/{fc,flocker} packagespull/58/head
commit
296ea67c84
|
@ -411,8 +411,6 @@ pkg/volume/cephfs
|
|||
pkg/volume/configmap
|
||||
pkg/volume/csi/fake
|
||||
pkg/volume/empty_dir
|
||||
pkg/volume/fc
|
||||
pkg/volume/flocker
|
||||
pkg/volume/git_repo
|
||||
pkg/volume/host_path
|
||||
pkg/volume/iscsi
|
||||
|
|
|
@ -49,7 +49,7 @@ var _ volume.DeviceMountableVolumePlugin = &fcPlugin{}
|
|||
func (plugin *fcPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
return &fcAttacher{
|
||||
host: plugin.host,
|
||||
manager: &FCUtil{},
|
||||
manager: &fcUtil{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -142,7 +142,7 @@ var _ volume.DeviceUnmounter = &fcDetacher{}
|
|||
func (plugin *fcPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
return &fcDetacher{
|
||||
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
|
||||
manager: &FCUtil{},
|
||||
manager: &fcUtil{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&fcPlugin{nil}}
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func (plugin *fcPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
|||
|
||||
func (plugin *fcPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newMounterInternal(spec, pod.UID, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
|
||||
return plugin.newMounterInternal(spec, pod.UID, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.Mounter, error) {
|
||||
|
@ -166,7 +166,7 @@ func (plugin *fcPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ v
|
|||
if pod != nil {
|
||||
uid = pod.UID
|
||||
}
|
||||
return plugin.newBlockVolumeMapperInternal(spec, uid, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
|
||||
return plugin.newBlockVolumeMapperInternal(spec, uid, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.BlockVolumeMapper, error) {
|
||||
|
@ -198,7 +198,7 @@ func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID t
|
|||
|
||||
func (plugin *fcPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newUnmounterInternal(volName, podUID, &FCUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
return plugin.newUnmounterInternal(volName, podUID, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
|
@ -216,7 +216,7 @@ func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, m
|
|||
}
|
||||
|
||||
func (plugin *fcPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
|
||||
return plugin.newUnmapperInternal(volName, podUID, &FCUtil{})
|
||||
return plugin.newUnmapperInternal(volName, podUID, &fcUtil{})
|
||||
}
|
||||
|
||||
func (plugin *fcPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager) (volume.BlockVolumeUnmapper, error) {
|
||||
|
|
|
@ -96,7 +96,7 @@ type fakeDiskManager struct {
|
|||
detachCalled bool
|
||||
}
|
||||
|
||||
func NewFakeDiskManager() *fakeDiskManager {
|
||||
func newFakeDiskManager() *fakeDiskManager {
|
||||
return &fakeDiskManager{
|
||||
tmpDir: utiltesting.MkTmpdirOrDie("fc_test"),
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
fakeManager := NewFakeDiskManager()
|
||||
fakeManager := newFakeDiskManager()
|
||||
defer fakeManager.Cleanup()
|
||||
fakeMounter := &mount.FakeMounter{}
|
||||
fakeExec := mount.NewFakeExec(nil)
|
||||
|
@ -190,7 +190,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||
}
|
||||
}
|
||||
|
||||
fakeManager2 := NewFakeDiskManager()
|
||||
fakeManager2 := newFakeDiskManager()
|
||||
defer fakeManager2.Cleanup()
|
||||
unmounter, err := plug.(*fcPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter)
|
||||
if err != nil {
|
||||
|
@ -224,7 +224,7 @@ func doTestPluginNilMounter(t *testing.T, spec *volume.Spec) {
|
|||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
fakeManager := NewFakeDiskManager()
|
||||
fakeManager := newFakeDiskManager()
|
||||
defer fakeManager.Cleanup()
|
||||
fakeMounter := &mount.FakeMounter{}
|
||||
fakeExec := mount.NewFakeExec(nil)
|
||||
|
|
|
@ -61,13 +61,13 @@ func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.File
|
|||
|
||||
// given a wwn and lun, find the device and associated devicemapper parent
|
||||
func findDisk(wwn, lun string, io ioHandler, deviceUtil volumeutil.DeviceUtil) (string, string) {
|
||||
fc_path := "-fc-0x" + wwn + "-lun-" + lun
|
||||
dev_path := byPath
|
||||
if dirs, err := io.ReadDir(dev_path); err == nil {
|
||||
fcPath := "-fc-0x" + wwn + "-lun-" + lun
|
||||
devPath := byPath
|
||||
if dirs, err := io.ReadDir(devPath); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
if strings.Contains(name, fc_path) {
|
||||
if disk, err1 := io.EvalSymlinks(dev_path + name); err1 == nil {
|
||||
if strings.Contains(name, fcPath) {
|
||||
if disk, err1 := io.EvalSymlinks(devPath + name); err1 == nil {
|
||||
dm := deviceUtil.FindMultipathDeviceForDevice(disk)
|
||||
glog.Infof("fc: find disk: %v, dm: %v", disk, dm)
|
||||
return disk, dm
|
||||
|
@ -89,15 +89,15 @@ func findDiskWWIDs(wwid string, io ioHandler, deviceUtil volumeutil.DeviceUtil)
|
|||
// The wwid could contain white space and it will be replaced
|
||||
// underscore when wwid is exposed under /dev/by-id.
|
||||
|
||||
fc_path := "scsi-" + wwid
|
||||
dev_id := byID
|
||||
if dirs, err := io.ReadDir(dev_id); err == nil {
|
||||
fcPath := "scsi-" + wwid
|
||||
devID := byID
|
||||
if dirs, err := io.ReadDir(devID); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
if name == fc_path {
|
||||
disk, err := io.EvalSymlinks(dev_id + name)
|
||||
if name == fcPath {
|
||||
disk, err := io.EvalSymlinks(devID + name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("fc: failed to find a corresponding disk from symlink[%s], error %v", dev_id+name, err)
|
||||
glog.V(2).Infof("fc: failed to find a corresponding disk from symlink[%s], error %v", devID+name, err)
|
||||
return "", ""
|
||||
}
|
||||
dm := deviceUtil.FindMultipathDeviceForDevice(disk)
|
||||
|
@ -106,7 +106,7 @@ func findDiskWWIDs(wwid string, io ioHandler, deviceUtil volumeutil.DeviceUtil)
|
|||
}
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("fc: failed to find a disk [%s]", dev_id+fc_path)
|
||||
glog.V(2).Infof("fc: failed to find a disk [%s]", devID+fcPath)
|
||||
return "", ""
|
||||
}
|
||||
|
||||
|
@ -120,10 +120,10 @@ func removeFromScsiSubsystem(deviceName string, io ioHandler) {
|
|||
|
||||
// rescan scsi bus
|
||||
func scsiHostRescan(io ioHandler) {
|
||||
scsi_path := "/sys/class/scsi_host/"
|
||||
if dirs, err := io.ReadDir(scsi_path); err == nil {
|
||||
scsiPath := "/sys/class/scsi_host/"
|
||||
if dirs, err := io.ReadDir(scsiPath); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := scsi_path + f.Name() + "/scan"
|
||||
name := scsiPath + f.Name() + "/scan"
|
||||
data := []byte("- - -")
|
||||
io.WriteFile(name, data, 0666)
|
||||
}
|
||||
|
@ -134,33 +134,31 @@ func scsiHostRescan(io ioHandler) {
|
|||
func makePDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string {
|
||||
if len(wwns) != 0 {
|
||||
return path.Join(host.GetPluginDir(fcPluginName), wwns[0]+"-lun-"+lun)
|
||||
} else {
|
||||
return path.Join(host.GetPluginDir(fcPluginName), wwids[0])
|
||||
}
|
||||
return path.Join(host.GetPluginDir(fcPluginName), wwids[0])
|
||||
}
|
||||
|
||||
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/target-lun-0
|
||||
func makeVDPDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string {
|
||||
if len(wwns) != 0 {
|
||||
return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwns[0]+"-lun-"+lun)
|
||||
} else {
|
||||
return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwids[0])
|
||||
}
|
||||
return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwids[0])
|
||||
}
|
||||
|
||||
type FCUtil struct{}
|
||||
type fcUtil struct{}
|
||||
|
||||
func (util *FCUtil) MakeGlobalPDName(fc fcDisk) string {
|
||||
func (util *fcUtil) MakeGlobalPDName(fc fcDisk) string {
|
||||
return makePDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids)
|
||||
}
|
||||
|
||||
// Global volume device plugin dir
|
||||
func (util *FCUtil) MakeGlobalVDPDName(fc fcDisk) string {
|
||||
func (util *fcUtil) MakeGlobalVDPDName(fc fcDisk) string {
|
||||
return makeVDPDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids)
|
||||
}
|
||||
|
||||
func searchDisk(b fcDiskMounter) (string, error) {
|
||||
var diskIds []string
|
||||
var diskIDs []string
|
||||
var disk string
|
||||
var dm string
|
||||
io := b.io
|
||||
|
@ -169,9 +167,9 @@ func searchDisk(b fcDiskMounter) (string, error) {
|
|||
lun := b.lun
|
||||
|
||||
if len(wwns) != 0 {
|
||||
diskIds = wwns
|
||||
diskIDs = wwns
|
||||
} else {
|
||||
diskIds = wwids
|
||||
diskIDs = wwids
|
||||
}
|
||||
|
||||
rescaned := false
|
||||
|
@ -179,11 +177,11 @@ func searchDisk(b fcDiskMounter) (string, error) {
|
|||
// first phase, search existing device path, if a multipath dm is found, exit loop
|
||||
// otherwise, in second phase, rescan scsi bus and search again, return with any findings
|
||||
for true {
|
||||
for _, diskId := range diskIds {
|
||||
for _, diskID := range diskIDs {
|
||||
if len(wwns) != 0 {
|
||||
disk, dm = findDisk(diskId, lun, io, b.deviceUtil)
|
||||
disk, dm = findDisk(diskID, lun, io, b.deviceUtil)
|
||||
} else {
|
||||
disk, dm = findDiskWWIDs(diskId, io, b.deviceUtil)
|
||||
disk, dm = findDiskWWIDs(diskID, io, b.deviceUtil)
|
||||
}
|
||||
// if multipath device is found, break
|
||||
if dm != "" {
|
||||
|
@ -211,7 +209,7 @@ func searchDisk(b fcDiskMounter) (string, error) {
|
|||
return disk, nil
|
||||
}
|
||||
|
||||
func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) {
|
||||
func (util *fcUtil) AttachDisk(b fcDiskMounter) (string, error) {
|
||||
devicePath, err := searchDisk(b)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -250,7 +248,7 @@ func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) {
|
|||
}
|
||||
|
||||
// DetachDisk removes scsi device file such as /dev/sdX from the node.
|
||||
func (util *FCUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error {
|
||||
func (util *fcUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error {
|
||||
var devices []string
|
||||
// devicePath might be like /dev/mapper/mpathX. Find destination.
|
||||
dstPath, err := c.io.EvalSymlinks(devicePath)
|
||||
|
@ -281,7 +279,7 @@ func (util *FCUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error {
|
|||
}
|
||||
|
||||
// detachFCDisk removes scsi device file such as /dev/sdX from the node.
|
||||
func (util *FCUtil) detachFCDisk(io ioHandler, devicePath string) error {
|
||||
func (util *fcUtil) detachFCDisk(io ioHandler, devicePath string) error {
|
||||
// Remove scsi device from the node.
|
||||
if !strings.HasPrefix(devicePath, "/dev/") {
|
||||
return fmt.Errorf("fc detach disk: invalid device name: %s", devicePath)
|
||||
|
@ -294,7 +292,7 @@ func (util *FCUtil) detachFCDisk(io ioHandler, devicePath string) error {
|
|||
|
||||
// DetachBlockFCDisk detaches a volume from kubelet node, removes scsi device file
|
||||
// such as /dev/sdX from the node, and then removes loopback for the scsi device.
|
||||
func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error {
|
||||
func (util *fcUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error {
|
||||
// Check if devicePath is valid
|
||||
if len(devicePath) != 0 {
|
||||
if pathExists, pathErr := checkPathExists(devicePath); !pathExists || pathErr != nil {
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&flockerPlugin{nil}}
|
||||
}
|
||||
|
@ -116,11 +116,11 @@ func (p *flockerPlugin) SupportsMountOption() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) SupportsBulkVolumeVerification() bool {
|
||||
func (p *flockerPlugin) SupportsBulkVolumeVerification() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
func (p *flockerPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
|
||||
return []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
}
|
||||
|
@ -136,12 +136,12 @@ func (p *flockerPlugin) getFlockerVolumeSource(spec *volume.Spec) (*v1.FlockerVo
|
|||
return spec.PersistentVolume.Spec.Flocker, readOnly
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
func (p *flockerPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return plugin.newMounterInternal(spec, pod.UID, &FlockerUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
return p.newMounterInternal(spec, pod.UID, &flockerUtil{}, p.host.GetMounter(p.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager volumeManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||
func (p *flockerPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager volumeManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -158,15 +158,15 @@ func (plugin *flockerPlugin) newMounterInternal(spec *volume.Spec, podUID types.
|
|||
datasetUUID: datasetUUID,
|
||||
mounter: mounter,
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
|
||||
plugin: p,
|
||||
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), p.host)),
|
||||
},
|
||||
readOnly: readOnly}, nil
|
||||
}
|
||||
|
||||
func (p *flockerPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
// Inject real implementations here, test through the internal function.
|
||||
return p.newUnmounterInternal(volName, podUID, &FlockerUtil{}, p.host.GetMounter(p.GetPluginName()))
|
||||
return p.newUnmounterInternal(volName, podUID, &flockerUtil{}, p.host.GetMounter(p.GetPluginName()))
|
||||
}
|
||||
|
||||
func (p *flockerPlugin) newUnmounterInternal(volName string, podUID types.UID, manager volumeManager, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
|
@ -304,7 +304,7 @@ func (b *flockerVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
_, err := b.flockerClient.GetDatasetState(datasetUUID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("The volume with datasetUUID='%s' migrated unsuccessfully.", datasetUUID)
|
||||
return fmt.Errorf("The volume with datasetUUID='%s' migrated unsuccessfully", datasetUUID)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -433,11 +433,11 @@ func (c *flockerVolumeUnmounter) TearDownAt(dir string) error {
|
|||
return util.UnmountPath(dir, c.mounter)
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
return plugin.newDeleterInternal(spec, &FlockerUtil{})
|
||||
func (p *flockerPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
return p.newDeleterInternal(spec, &flockerUtil{})
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) newDeleterInternal(spec *volume.Spec, manager volumeManager) (volume.Deleter, error) {
|
||||
func (p *flockerPlugin) newDeleterInternal(spec *volume.Spec, manager volumeManager) (volume.Deleter, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Flocker == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.Flocker is nil")
|
||||
}
|
||||
|
@ -450,15 +450,15 @@ func (plugin *flockerPlugin) newDeleterInternal(spec *volume.Spec, manager volum
|
|||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
return plugin.newProvisionerInternal(options, &FlockerUtil{})
|
||||
func (p *flockerPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
return p.newProvisionerInternal(options, &flockerUtil{})
|
||||
}
|
||||
|
||||
func (plugin *flockerPlugin) newProvisionerInternal(options volume.VolumeOptions, manager volumeManager) (volume.Provisioner, error) {
|
||||
func (p *flockerPlugin) newProvisionerInternal(options volume.VolumeOptions, manager volumeManager) (volume.Provisioner, error) {
|
||||
return &flockerVolumeProvisioner{
|
||||
flockerVolume: &flockerVolume{
|
||||
manager: manager,
|
||||
plugin: plugin,
|
||||
plugin: p,
|
||||
},
|
||||
options: options,
|
||||
}, nil
|
||||
|
|
|
@ -29,9 +29,9 @@ import (
|
|||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type FlockerUtil struct{}
|
||||
type flockerUtil struct{}
|
||||
|
||||
func (util *FlockerUtil) DeleteVolume(d *flockerVolumeDeleter) error {
|
||||
func (util *flockerUtil) DeleteVolume(d *flockerVolumeDeleter) error {
|
||||
var err error
|
||||
|
||||
if d.flockerClient == nil {
|
||||
|
@ -49,7 +49,7 @@ func (util *FlockerUtil) DeleteVolume(d *flockerVolumeDeleter) error {
|
|||
return d.flockerClient.DeleteDataset(datasetUUID)
|
||||
}
|
||||
|
||||
func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGiB int, labels map[string]string, err error) {
|
||||
func (util *flockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGiB int, labels map[string]string, err error) {
|
||||
|
||||
if c.flockerClient == nil {
|
||||
c.flockerClient, err = c.plugin.newFlockerClient("")
|
||||
|
|
|
@ -44,7 +44,7 @@ func TestFlockerUtil_CreateVolume(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
provisioner.flockerClient = fakeFlockerClient
|
||||
|
||||
flockerUtil := &FlockerUtil{}
|
||||
flockerUtil := &flockerUtil{}
|
||||
|
||||
datasetID, size, _, err := flockerUtil.CreateVolume(provisioner)
|
||||
assert.NoError(err)
|
||||
|
|
|
@ -43,8 +43,8 @@ func (b *flockerVolumeDeleter) GetPath() string {
|
|||
return getPath(b.podUID, b.volName, b.plugin.host)
|
||||
}
|
||||
|
||||
func (d *flockerVolumeDeleter) Delete() error {
|
||||
return d.manager.DeleteVolume(d)
|
||||
func (b *flockerVolumeDeleter) Delete() error {
|
||||
return b.manager.DeleteVolume(b)
|
||||
}
|
||||
|
||||
type flockerVolumeProvisioner struct {
|
||||
|
|
Loading…
Reference in New Issue