mirror of https://github.com/k3s-io/k3s
parent
6f37347fce
commit
9448aa66ff
|
@ -1222,17 +1222,11 @@ func (plugin *mockVolumePlugin) GetMetrics() (*vol.Metrics, error) {
|
|||
|
||||
// Recycler interfaces
|
||||
|
||||
func (plugin *mockVolumePlugin) NewRecycler(pvName string, spec *vol.Spec, eventRecorder vol.RecycleEventRecorder) (vol.Recycler, error) {
|
||||
if len(plugin.recycleCalls) > 0 {
|
||||
// mockVolumePlugin directly implements Recycler interface
|
||||
glog.V(4).Infof("mock plugin NewRecycler called, returning mock recycler")
|
||||
return plugin, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("Mock plugin error: no recycleCalls configured")
|
||||
func (plugin *mockVolumePlugin) Recycle(pvName string, spec *vol.Spec, eventRecorder vol.RecycleEventRecorder) error {
|
||||
if len(plugin.recycleCalls) == 0 {
|
||||
return fmt.Errorf("Mock plugin error: no recycleCalls configured")
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *mockVolumePlugin) Recycle() error {
|
||||
if len(plugin.recycleCalls) <= plugin.recycleCallCounter {
|
||||
return fmt.Errorf("Mock plugin error: unexpected recycle call %d", plugin.recycleCallCounter)
|
||||
}
|
||||
|
|
|
@ -1017,23 +1017,10 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{})
|
|||
|
||||
// Plugin found
|
||||
recorder := ctrl.newRecyclerEventRecorder(volume)
|
||||
recycler, err := plugin.NewRecycler(volume.Name, spec, recorder)
|
||||
if err != nil {
|
||||
// Cannot create recycler
|
||||
strerr := fmt.Sprintf("Failed to create recycler: %v", err)
|
||||
if _, err = ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, "VolumeFailedRecycle", strerr); err != nil {
|
||||
glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err)
|
||||
// Save failed, retry on the next deletion attempt
|
||||
return
|
||||
}
|
||||
// Despite the volume being Failed, the controller will retry recycling
|
||||
// the volume in every syncVolume() call.
|
||||
return
|
||||
}
|
||||
|
||||
if err = recycler.Recycle(); err != nil {
|
||||
if err = plugin.Recycle(volume.Name, spec, recorder); err != nil {
|
||||
// Recycler failed
|
||||
strerr := fmt.Sprintf("Recycler failed: %s", err)
|
||||
strerr := fmt.Sprintf("Recycle failed: %s", err)
|
||||
if _, err = ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, "VolumeFailedRecycle", strerr); err != nil {
|
||||
glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err)
|
||||
// Save failed, retry on the next deletion attempt
|
||||
|
|
|
@ -64,10 +64,10 @@ func TestRecycleSync(t *testing.T) {
|
|||
[]string{"Warning VolumeFailedRecycle"}, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// recycle failure - newRecycler returns error
|
||||
// recycle failure - Recycle returns error
|
||||
"6-4 - newRecycler returns error",
|
||||
newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle),
|
||||
withMessage("Failed to create recycler: Mock plugin error: no recycleCalls configured", newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", v1.VolumeFailed, v1.PersistentVolumeReclaimRecycle)),
|
||||
withMessage("Recycle failed: Mock plugin error: no recycleCalls configured", newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", v1.VolumeFailed, v1.PersistentVolumeReclaimRecycle)),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedRecycle"}, noerrors,
|
||||
|
@ -77,7 +77,7 @@ func TestRecycleSync(t *testing.T) {
|
|||
// recycle failure - recycle returns error
|
||||
"6-5 - recycle returns error",
|
||||
newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle),
|
||||
withMessage("Recycler failed: Mock recycle error", newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", v1.VolumeFailed, v1.PersistentVolumeReclaimRecycle)),
|
||||
withMessage("Recycle failed: Mock recycle error", newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", v1.VolumeFailed, v1.PersistentVolumeReclaimRecycle)),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedRecycle"}, noerrors,
|
||||
|
|
|
@ -16,7 +16,6 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
|
|
|
@ -24,14 +24,13 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
// The volumeConfig arg provides the ability to configure volume behavior. It is implemented as a pointer to allow nils.
|
||||
// The hostPathPlugin is used to store the volumeConfig and give it, when needed, to the func that creates HostPath Recyclers.
|
||||
// The hostPathPlugin is used to store the volumeConfig and give it, when needed, to the func that Recycles.
|
||||
// Tests that exercise recycling should not use this func but instead use ProbeRecyclablePlugins() to override default behavior.
|
||||
func ProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{
|
||||
|
@ -107,8 +106,24 @@ func (plugin *hostPathPlugin) NewUnmounter(volName string, podUID types.UID) (vo
|
|||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) (volume.Recycler, error) {
|
||||
return newRecycler(pvName, spec, eventRecorder, plugin.host, plugin.config)
|
||||
// Recycle recycles/scrubs clean a HostPath volume.
|
||||
// Recycle blocks until the pod has completed or any error occurs.
|
||||
// HostPath recycling only works in single node clusters and is meant for testing purposes only.
|
||||
func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) error {
|
||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
|
||||
return fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
|
||||
}
|
||||
|
||||
pod := plugin.config.RecyclerPodTemplate
|
||||
timeout := volume.CalculateTimeoutForVolume(plugin.config.RecyclerMinimumTimeout, plugin.config.RecyclerTimeoutIncrement, spec.PersistentVolume)
|
||||
// overrides
|
||||
pod.Spec.ActiveDeadlineSeconds = &timeout
|
||||
pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: spec.PersistentVolume.Spec.HostPath.Path,
|
||||
},
|
||||
}
|
||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder)
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
|
@ -134,22 +149,6 @@ func (plugin *hostPathPlugin) ConstructVolumeSpec(volumeName, mountPath string)
|
|||
return volume.NewSpecFromVolume(hostPathVolume), nil
|
||||
}
|
||||
|
||||
func newRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
|
||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
|
||||
}
|
||||
path := spec.PersistentVolume.Spec.HostPath.Path
|
||||
return &hostPathRecycler{
|
||||
name: spec.Name(),
|
||||
path: path,
|
||||
host: host,
|
||||
config: config,
|
||||
timeout: volume.CalculateTimeoutForVolume(config.RecyclerMinimumTimeout, config.RecyclerTimeoutIncrement, spec.PersistentVolume),
|
||||
pvName: pvName,
|
||||
eventRecorder: eventRecorder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newDeleter(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.HostPath == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
|
||||
|
@ -225,42 +224,6 @@ func (c *hostPathUnmounter) TearDownAt(dir string) error {
|
|||
return fmt.Errorf("TearDownAt() does not make sense for host paths")
|
||||
}
|
||||
|
||||
// hostPathRecycler implements a Recycler for the HostPath plugin
|
||||
// This implementation is meant for testing only and only works in a single node cluster
|
||||
type hostPathRecycler struct {
|
||||
name string
|
||||
path string
|
||||
host volume.VolumeHost
|
||||
config volume.VolumeConfig
|
||||
timeout int64
|
||||
volume.MetricsNil
|
||||
pvName string
|
||||
eventRecorder volume.RecycleEventRecorder
|
||||
}
|
||||
|
||||
func (r *hostPathRecycler) GetPath() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
// Recycle recycles/scrubs clean a HostPath volume.
|
||||
// Recycle blocks until the pod has completed or any error occurs.
|
||||
// HostPath recycling only works in single node clusters and is meant for testing purposes only.
|
||||
func (r *hostPathRecycler) Recycle() error {
|
||||
templateClone, err := api.Scheme.DeepCopy(r.config.RecyclerPodTemplate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pod := templateClone.(*v1.Pod)
|
||||
// overrides
|
||||
pod.Spec.ActiveDeadlineSeconds = &r.timeout
|
||||
pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: r.path,
|
||||
},
|
||||
}
|
||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient(), r.eventRecorder)
|
||||
}
|
||||
|
||||
// hostPathProvisioner implements a Provisioner for the HostPath plugin
|
||||
// This implementation is meant for testing only and only works in a single node cluster.
|
||||
type hostPathProvisioner struct {
|
||||
|
|
|
@ -75,17 +75,10 @@ func TestRecycler(t *testing.T) {
|
|||
plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volume.VolumeConfig{}}}, pluginHost)
|
||||
|
||||
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/foo"}}}}}
|
||||
plug, err := plugMgr.FindRecyclablePluginBySpec(spec)
|
||||
_, err := plugMgr.FindRecyclablePluginBySpec(spec)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
recycler, err := plug.NewRecycler("pv-name", spec, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Recycler: %v", err)
|
||||
}
|
||||
if recycler.GetPath() != spec.PersistentVolume.Spec.HostPath.Path {
|
||||
t.Errorf("Expected %s but got %s", spec.PersistentVolume.Spec.HostPath.Path, recycler.GetPath())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleter(t *testing.T) {
|
||||
|
|
|
@ -16,7 +16,6 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
@ -133,8 +132,25 @@ func (plugin *nfsPlugin) newUnmounterInternal(volName string, podUID types.UID,
|
|||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *nfsPlugin) NewRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) (volume.Recycler, error) {
|
||||
return newRecycler(pvName, spec, eventRecorder, plugin.host, plugin.config)
|
||||
// Recycle recycles/scrubs clean an NFS volume.
|
||||
// Recycle blocks until the pod has completed or any error occurs.
|
||||
func (plugin *nfsPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) error {
|
||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.NFS == nil {
|
||||
return fmt.Errorf("spec.PersistentVolumeSource.NFS is nil")
|
||||
}
|
||||
|
||||
pod := plugin.config.RecyclerPodTemplate
|
||||
timeout := volume.CalculateTimeoutForVolume(plugin.config.RecyclerMinimumTimeout, plugin.config.RecyclerTimeoutIncrement, spec.PersistentVolume)
|
||||
// overrides
|
||||
pod.Spec.ActiveDeadlineSeconds = &timeout
|
||||
pod.GenerateName = "pv-recycler-nfs-"
|
||||
pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: spec.PersistentVolume.Spec.NFS.Server,
|
||||
Path: spec.PersistentVolume.Spec.NFS.Path,
|
||||
},
|
||||
}
|
||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder)
|
||||
}
|
||||
|
||||
func (plugin *nfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
|
@ -255,12 +271,6 @@ func (b *nfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
//
|
||||
//func (c *nfsUnmounter) GetPath() string {
|
||||
// name := nfsPluginName
|
||||
// return c.plugin.host.GetPodVolumeDir(c.pod.UID, strings.EscapeQualifiedNameForDisk(name), c.volName)
|
||||
//}
|
||||
|
||||
var _ volume.Unmounter = &nfsUnmounter{}
|
||||
|
||||
type nfsUnmounter struct {
|
||||
|
@ -275,59 +285,6 @@ func (c *nfsUnmounter) TearDownAt(dir string) error {
|
|||
return util.UnmountPath(dir, c.mounter)
|
||||
}
|
||||
|
||||
func newRecycler(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error) {
|
||||
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.NFS == nil {
|
||||
return nil, fmt.Errorf("spec.PersistentVolumeSource.NFS is nil")
|
||||
}
|
||||
return &nfsRecycler{
|
||||
name: spec.Name(),
|
||||
server: spec.PersistentVolume.Spec.NFS.Server,
|
||||
path: spec.PersistentVolume.Spec.NFS.Path,
|
||||
host: host,
|
||||
config: volumeConfig,
|
||||
timeout: volume.CalculateTimeoutForVolume(volumeConfig.RecyclerMinimumTimeout, volumeConfig.RecyclerTimeoutIncrement, spec.PersistentVolume),
|
||||
pvName: pvName,
|
||||
eventRecorder: eventRecorder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// nfsRecycler scrubs an NFS volume by running "rm -rf" on the volume in a pod.
|
||||
type nfsRecycler struct {
|
||||
name string
|
||||
server string
|
||||
path string
|
||||
host volume.VolumeHost
|
||||
config volume.VolumeConfig
|
||||
timeout int64
|
||||
volume.MetricsNil
|
||||
pvName string
|
||||
eventRecorder volume.RecycleEventRecorder
|
||||
}
|
||||
|
||||
func (r *nfsRecycler) GetPath() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
// Recycle recycles/scrubs clean an NFS volume.
|
||||
// Recycle blocks until the pod has completed or any error occurs.
|
||||
func (r *nfsRecycler) Recycle() error {
|
||||
templateClone, err := api.Scheme.DeepCopy(r.config.RecyclerPodTemplate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pod := templateClone.(*v1.Pod)
|
||||
// overrides
|
||||
pod.Spec.ActiveDeadlineSeconds = &r.timeout
|
||||
pod.GenerateName = "pv-recycler-nfs-"
|
||||
pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: r.server,
|
||||
Path: r.path,
|
||||
},
|
||||
}
|
||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient(), r.eventRecorder)
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.NFSVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.NFS != nil {
|
||||
return spec.Volume.NFS, spec.Volume.NFS.ReadOnly, nil
|
||||
|
|
|
@ -89,32 +89,10 @@ func TestRecycler(t *testing.T) {
|
|||
plugMgr.InitPlugins([]volume.VolumePlugin{&nfsPlugin{nil, volume.VolumeConfig{}}}, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{NFS: &v1.NFSVolumeSource{Path: "/foo"}}}}}
|
||||
plug, err := plugMgr.FindRecyclablePluginBySpec(spec)
|
||||
if err != nil {
|
||||
_, plugin_err := plugMgr.FindRecyclablePluginBySpec(spec)
|
||||
if plugin_err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
recycler, err := plug.NewRecycler("pv-name", spec, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Recycler: %v", err)
|
||||
}
|
||||
if recycler.GetPath() != spec.PersistentVolume.Spec.NFS.Path {
|
||||
t.Errorf("Expected %s but got %s", spec.PersistentVolume.Spec.NFS.Path, recycler.GetPath())
|
||||
}
|
||||
}
|
||||
|
||||
type mockRecycler struct {
|
||||
path string
|
||||
host volume.VolumeHost
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
func (r *mockRecycler) GetPath() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
func (r *mockRecycler) Recycle() error {
|
||||
// return nil means recycle passed
|
||||
return nil
|
||||
}
|
||||
|
||||
func contains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
|
||||
|
|
|
@ -122,12 +122,13 @@ type PersistentVolumePlugin interface {
|
|||
// again to new claims
|
||||
type RecyclableVolumePlugin interface {
|
||||
VolumePlugin
|
||||
// NewRecycler creates a new volume.Recycler which knows how to reclaim this
|
||||
// resource after the volume's release from a PersistentVolumeClaim. The
|
||||
// recycler will use the provided recorder to write any events that might be
|
||||
|
||||
// Recycle knows how to reclaim this
|
||||
// resource after the volume's release from a PersistentVolumeClaim.
|
||||
// Recycle will use the provided recorder to write any events that might be
|
||||
// interesting to user. It's expected that caller will pass these events to
|
||||
// the PV being recycled.
|
||||
NewRecycler(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) (Recycler, error)
|
||||
Recycle(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) error
|
||||
}
|
||||
|
||||
// DeletableVolumePlugin is an extended interface of VolumePlugin and is used
|
||||
|
|
|
@ -275,8 +275,8 @@ func (plugin *FakeVolumePlugin) GetNewDetacherCallCount() int {
|
|||
return plugin.NewDetacherCallCount
|
||||
}
|
||||
|
||||
func (plugin *FakeVolumePlugin) NewRecycler(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) (Recycler, error) {
|
||||
return &fakeRecycler{"/attributesTransferredFromSpec", MetricsNil{}}, nil
|
||||
func (plugin *FakeVolumePlugin) Recycle(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (plugin *FakeVolumePlugin) NewDeleter(spec *Spec) (Deleter, error) {
|
||||
|
@ -447,20 +447,6 @@ func (fv *FakeVolume) UnmountDevice(globalMountPath string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type fakeRecycler struct {
|
||||
path string
|
||||
MetricsNil
|
||||
}
|
||||
|
||||
func (fr *fakeRecycler) Recycle() error {
|
||||
// nil is success, else error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fr *fakeRecycler) GetPath() string {
|
||||
return fr.path
|
||||
}
|
||||
|
||||
type FakeDeleter struct {
|
||||
path string
|
||||
MetricsNil
|
||||
|
|
|
@ -134,15 +134,6 @@ type Unmounter interface {
|
|||
TearDownAt(dir string) error
|
||||
}
|
||||
|
||||
// Recycler provides methods to reclaim the volume resource.
|
||||
type Recycler interface {
|
||||
Volume
|
||||
// Recycle reclaims the resource. Calls to this method should block until
|
||||
// the recycling task is complete. Any error returned indicates the volume
|
||||
// has failed to be reclaimed. A nil return indicates success.
|
||||
Recycle() error
|
||||
}
|
||||
|
||||
// Provisioner is an interface that creates templates for PersistentVolumes
|
||||
// and can create the volume as a new resource in the infrastructure provider.
|
||||
type Provisioner interface {
|
||||
|
|
Loading…
Reference in New Issue