mirror of https://github.com/k3s-io/k3s
Dynamic provisioning V2 controller, provisioners, docs and tests.
parent
214c916045
commit
6e4d95f646
|
@ -419,19 +419,16 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||
glog.Infof("Not starting %s apis", groupVersion)
|
||||
}
|
||||
|
||||
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration)
|
||||
if err != nil {
|
||||
glog.Fatalf("A Provisioner could not be created: %v, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.", err)
|
||||
}
|
||||
|
||||
volumeController := persistentvolumecontroller.NewPersistentVolumeController(
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
|
||||
s.PVClaimBinderSyncPeriod.Duration,
|
||||
provisioner,
|
||||
ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
|
||||
ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration),
|
||||
cloud,
|
||||
s.ClusterName,
|
||||
nil, nil, nil,
|
||||
nil, // volumeSource
|
||||
nil, // claimSource
|
||||
nil, // classSource
|
||||
nil, // eventRecorder
|
||||
s.VolumeConfiguration.EnableDynamicProvisioning,
|
||||
)
|
||||
volumeController.Run()
|
||||
|
|
|
@ -62,8 +62,10 @@ func ProbeAttachableVolumePlugins(config componentconfig.VolumeConfiguration) []
|
|||
return allPlugins
|
||||
}
|
||||
|
||||
// ProbeRecyclableVolumePlugins collects all persistent volume plugins into an easy to use list.
|
||||
func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) []volume.VolumePlugin {
|
||||
// ProbeControllerVolumePlugins collects all persistent volume plugins into an
|
||||
// easy to use list. Only volume plugins that implement any of
|
||||
// provisioner/recycler/deleter interface should be returned.
|
||||
func ProbeControllerVolumePlugins(cloud cloudprovider.Interface, config componentconfig.VolumeConfiguration) []volume.VolumePlugin {
|
||||
allPlugins := []volume.VolumePlugin{}
|
||||
|
||||
// The list of plugins to probe is decided by this binary, not
|
||||
|
@ -79,6 +81,7 @@ func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) []
|
|||
RecyclerMinimumTimeout: int(config.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath),
|
||||
RecyclerTimeoutIncrement: int(config.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath),
|
||||
RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(),
|
||||
ProvisioningEnabled: config.EnableHostPathProvisioning,
|
||||
}
|
||||
if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil {
|
||||
glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, err)
|
||||
|
@ -95,34 +98,22 @@ func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) []
|
|||
}
|
||||
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...)
|
||||
|
||||
allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...)
|
||||
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
|
||||
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
|
||||
allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...)
|
||||
if cloud != nil {
|
||||
switch {
|
||||
case aws.ProviderName == cloud.ProviderName():
|
||||
allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...)
|
||||
case gce.ProviderName == cloud.ProviderName():
|
||||
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
|
||||
case openstack.ProviderName == cloud.ProviderName():
|
||||
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
|
||||
case vsphere.ProviderName == cloud.ProviderName():
|
||||
allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...)
|
||||
}
|
||||
}
|
||||
|
||||
return allPlugins
|
||||
}
|
||||
|
||||
// NewVolumeProvisioner returns a volume provisioner to use when running in a cloud or development environment.
|
||||
// The beta implementation of provisioning allows 1 implied provisioner per cloud, until we allow configuration of many.
|
||||
// We explicitly map clouds to volume plugins here which allows us to configure many later without backwards compatibility issues.
|
||||
// Not all cloudproviders have provisioning capability, which is the reason for the bool in the return to tell the caller to expect one or not.
|
||||
func NewVolumeProvisioner(cloud cloudprovider.Interface, config componentconfig.VolumeConfiguration) (volume.ProvisionableVolumePlugin, error) {
|
||||
switch {
|
||||
case cloud == nil && config.EnableHostPathProvisioning:
|
||||
return getProvisionablePluginFromVolumePlugins(host_path.ProbeVolumePlugins(volume.VolumeConfig{}))
|
||||
case cloud != nil && aws.ProviderName == cloud.ProviderName():
|
||||
return getProvisionablePluginFromVolumePlugins(aws_ebs.ProbeVolumePlugins())
|
||||
case cloud != nil && gce.ProviderName == cloud.ProviderName():
|
||||
return getProvisionablePluginFromVolumePlugins(gce_pd.ProbeVolumePlugins())
|
||||
case cloud != nil && openstack.ProviderName == cloud.ProviderName():
|
||||
return getProvisionablePluginFromVolumePlugins(cinder.ProbeVolumePlugins())
|
||||
case cloud != nil && vsphere.ProviderName == cloud.ProviderName():
|
||||
return getProvisionablePluginFromVolumePlugins(vsphere_volume.ProbeVolumePlugins())
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func getProvisionablePluginFromVolumePlugins(plugins []volume.VolumePlugin) (volume.ProvisionableVolumePlugin, error) {
|
||||
for _, plugin := range plugins {
|
||||
if provisonablePlugin, ok := plugin.(volume.ProvisionableVolumePlugin); ok {
|
||||
|
|
|
@ -285,21 +285,16 @@ func (s *CMServer) Run(_ []string) error {
|
|||
}
|
||||
}
|
||||
|
||||
provisioner, err := kubecontrollermanager.NewVolumeProvisioner(cloud, s.VolumeConfiguration)
|
||||
if err != nil {
|
||||
glog.Fatalf("A Provisioner could not be created: %v, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.", err)
|
||||
}
|
||||
|
||||
volumeController := persistentvolumecontroller.NewPersistentVolumeController(
|
||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
|
||||
s.PVClaimBinderSyncPeriod.Duration,
|
||||
provisioner,
|
||||
kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
|
||||
kubecontrollermanager.ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration),
|
||||
cloud,
|
||||
s.ClusterName,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil, // volumeSource
|
||||
nil, // claimSource
|
||||
nil, // classSource
|
||||
nil, // eventRecorder
|
||||
s.VolumeConfiguration.EnableDynamicProvisioning,
|
||||
)
|
||||
volumeController.Run()
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
// Test single call to syncClaim and syncVolume methods.
|
||||
|
@ -422,7 +423,7 @@ func TestSync(t *testing.T) {
|
|||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests)
|
||||
runSyncTests(t, tests, []*extensions.StorageClass{})
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
|
@ -469,5 +470,5 @@ func TestMultiSync(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
runMultisyncTests(t, tests)
|
||||
runMultisyncTests(t, tests, []*extensions.StorageClass{}, "")
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
|
@ -108,10 +109,19 @@ const annBindCompleted = "pv.kubernetes.io/bind-completed"
|
|||
// pre-bound). Value of this annotation does not matter.
|
||||
const annBoundByController = "pv.kubernetes.io/bound-by-controller"
|
||||
|
||||
// annClass annotation represents a new field which instructs dynamic
|
||||
// provisioning to choose a particular storage class (aka profile).
|
||||
// Value of this annotation should be empty.
|
||||
const annClass = "volume.alpha.kubernetes.io/storage-class"
|
||||
// annClass annotation represents the storage class associated with a resource:
|
||||
// - in PersistentVolumeClaim it represents required class to match.
|
||||
// Only PersistentVolumes with the same class (i.e. annotation with the same
|
||||
// value) can be bound to the claim. In case no such volume exists, the
|
||||
// controller will provision a new one using StorageClass instance with
|
||||
// the same name as the annotation value.
|
||||
// - in PersistentVolume it represents storage class to which the persistent
|
||||
// volume belongs.
|
||||
const annClass = "volume.beta.kubernetes.io/storage-class"
|
||||
|
||||
// alphaAnnClass annotation represents the previous alpha storage class
|
||||
// annotation. it's no longer used and held here for posterity.
|
||||
const alphaAnnClass = "volume.alpha.kubernetes.io/storage-class"
|
||||
|
||||
// This annotation is added to a PV that has been dynamically provisioned by
|
||||
// Kubernetes. Its value is name of volume plugin that created the volume.
|
||||
|
@ -148,13 +158,16 @@ type PersistentVolumeController struct {
|
|||
claimController *framework.Controller
|
||||
claimControllerStopCh chan struct{}
|
||||
claimSource cache.ListerWatcher
|
||||
classReflector *cache.Reflector
|
||||
classReflectorStopCh chan struct{}
|
||||
classSource cache.ListerWatcher
|
||||
kubeClient clientset.Interface
|
||||
eventRecorder record.EventRecorder
|
||||
cloud cloudprovider.Interface
|
||||
recyclePluginMgr vol.VolumePluginMgr
|
||||
provisioner vol.ProvisionableVolumePlugin
|
||||
volumePluginMgr vol.VolumePluginMgr
|
||||
enableDynamicProvisioning bool
|
||||
clusterName string
|
||||
defaultStorageClass string
|
||||
|
||||
// Cache of the last known version of volumes and claims. This cache is
|
||||
// thread safe as long as the volumes/claims there are not modified, they
|
||||
|
@ -163,6 +176,7 @@ type PersistentVolumeController struct {
|
|||
// it saves newer version to etcd.
|
||||
volumes persistentVolumeOrderedIndex
|
||||
claims cache.Store
|
||||
classes cache.Store
|
||||
|
||||
// Map of scheduled/running operations.
|
||||
runningOperations goroutinemap.GoRoutineMap
|
||||
|
@ -208,7 +222,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *api.PersistentVo
|
|||
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: no volume found", claimToClaimKey(claim))
|
||||
// No PV could be found
|
||||
// OBSERVATION: pvc is "Pending", will retry
|
||||
if hasAnnotation(claim.ObjectMeta, annClass) {
|
||||
if getClaimClass(claim) != "" {
|
||||
if err = ctrl.provisionClaim(claim); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -965,7 +979,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{})
|
|||
|
||||
// Find a plugin.
|
||||
spec := vol.NewSpecFromPersistentVolume(volume, false)
|
||||
plugin, err := ctrl.recyclePluginMgr.FindRecyclablePluginBySpec(spec)
|
||||
plugin, err := ctrl.volumePluginMgr.FindRecyclablePluginBySpec(spec)
|
||||
if err != nil {
|
||||
// No recycler found. Emit an event and mark the volume Failed.
|
||||
if _, err = ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeFailedRecycle", "No recycler plugin found for the volume!"); err != nil {
|
||||
|
@ -1119,14 +1133,33 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *api.PersistentV
|
|||
// (it will be re-used in future provisioner error cases).
|
||||
func (ctrl *PersistentVolumeController) doDeleteVolume(volume *api.PersistentVolume) error {
|
||||
glog.V(4).Infof("doDeleteVolume [%s]", volume.Name)
|
||||
// Find a plugin.
|
||||
spec := vol.NewSpecFromPersistentVolume(volume, false)
|
||||
plugin, err := ctrl.recyclePluginMgr.FindDeletablePluginBySpec(spec)
|
||||
if err != nil {
|
||||
// No deleter found. Emit an event and mark the volume Failed.
|
||||
return fmt.Errorf("Error getting deleter volume plugin for volume %q: %v", volume.Name, err)
|
||||
var err error
|
||||
|
||||
// Find a plugin. Try to find the same plugin that provisioned the volume
|
||||
var plugin vol.DeletableVolumePlugin
|
||||
if hasAnnotation(volume.ObjectMeta, annDynamicallyProvisioned) {
|
||||
provisionPluginName := volume.Annotations[annDynamicallyProvisioned]
|
||||
if provisionPluginName != "" {
|
||||
plugin, err = ctrl.volumePluginMgr.FindDeletablePluginByName(provisionPluginName)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("did not find a deleter plugin %q for volume %q: %v, will try to find a generic one",
|
||||
provisionPluginName, volume.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spec := vol.NewSpecFromPersistentVolume(volume, false)
|
||||
if plugin == nil {
|
||||
// The plugin that provisioned the volume was not found or the volume
|
||||
// was not dynamically provisioned. Try to find a plugin by spec.
|
||||
plugin, err = ctrl.volumePluginMgr.FindDeletablePluginBySpec(spec)
|
||||
if err != nil {
|
||||
// No deleter found. Emit an event and mark the volume Failed.
|
||||
return fmt.Errorf("Error getting deleter volume plugin for volume %q: %v", volume.Name, err)
|
||||
}
|
||||
}
|
||||
glog.V(5).Infof("found a deleter plugin %q for volume %q", plugin.GetPluginName(), volume.Name)
|
||||
|
||||
// Plugin found
|
||||
deleter, err := plugin.NewDeleter(spec)
|
||||
if err != nil {
|
||||
|
@ -1187,12 +1220,10 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
|
|||
return
|
||||
}
|
||||
|
||||
// TODO: find provisionable plugin based on a class/profile
|
||||
plugin := ctrl.provisioner
|
||||
if plugin == nil {
|
||||
// No provisioner found. Emit an event.
|
||||
ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", "No provisioner plugin found for the claim!")
|
||||
glog.V(2).Infof("no provisioner plugin found for claim %s!", claimToClaimKey(claim))
|
||||
plugin, storageClass, err := ctrl.findProvisionablePlugin(claim)
|
||||
if err != nil {
|
||||
ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", err.Error())
|
||||
glog.V(2).Infof("error finding provisioning plugin for claim %s: %v", claimToClaimKey(claim), err)
|
||||
// The controller will retry provisioning the volume in every
|
||||
// syncVolume() call.
|
||||
return
|
||||
|
@ -1212,21 +1243,23 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
|
|||
ClusterName: ctrl.clusterName,
|
||||
PVName: pvName,
|
||||
PVCName: claim.Name,
|
||||
Parameters: storageClass.Parameters,
|
||||
Selector: claim.Spec.Selector,
|
||||
}
|
||||
|
||||
// Provision the volume
|
||||
provisioner, err := plugin.NewProvisioner(options)
|
||||
if err != nil {
|
||||
strerr := fmt.Sprintf("Failed to create provisioner: %v", err)
|
||||
glog.V(2).Infof("failed to create provisioner for claim %q: %v", claimToClaimKey(claim), err)
|
||||
glog.V(2).Infof("failed to create provisioner for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err)
|
||||
ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", strerr)
|
||||
return
|
||||
}
|
||||
|
||||
volume, err = provisioner.Provision()
|
||||
if err != nil {
|
||||
strerr := fmt.Sprintf("Failed to provision volume: %v", err)
|
||||
glog.V(2).Infof("failed to provision volume for claim %q: %v", claimToClaimKey(claim), err)
|
||||
strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err)
|
||||
glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err)
|
||||
ctrl.eventRecorder.Event(claim, api.EventTypeWarning, "ProvisioningFailed", strerr)
|
||||
return
|
||||
}
|
||||
|
@ -1242,6 +1275,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
|
|||
// Add annBoundByController (used in deleting the volume)
|
||||
setAnnotation(&volume.ObjectMeta, annBoundByController, "yes")
|
||||
setAnnotation(&volume.ObjectMeta, annDynamicallyProvisioned, plugin.GetPluginName())
|
||||
setAnnotation(&volume.ObjectMeta, annClass, getClaimClass(claim))
|
||||
|
||||
// Try to create the PV object several times
|
||||
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
|
||||
|
@ -1320,3 +1354,40 @@ func (ctrl *PersistentVolumeController) scheduleOperation(operationName string,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) findProvisionablePlugin(claim *api.PersistentVolumeClaim) (vol.ProvisionableVolumePlugin, *extensions.StorageClass, error) {
|
||||
storageClass, err := ctrl.findStorageClass(claim)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Find a plugin for the class
|
||||
plugin, err := ctrl.volumePluginMgr.FindProvisionablePluginByName(storageClass.Provisioner)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return plugin, storageClass, nil
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) findStorageClass(claim *api.PersistentVolumeClaim) (*extensions.StorageClass, error) {
|
||||
className := getClaimClass(claim)
|
||||
if className == "" {
|
||||
className = ctrl.defaultStorageClass
|
||||
}
|
||||
if className == "" {
|
||||
return nil, fmt.Errorf("No default StorageClass configured")
|
||||
}
|
||||
|
||||
classObj, found, err := ctrl.classes.GetByKey(className)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !found {
|
||||
return nil, fmt.Errorf("StorageClass %q not found", className)
|
||||
}
|
||||
class, ok := classObj.(*extensions.StorageClass)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Cannot convert object to StorageClass: %+v", classObj)
|
||||
}
|
||||
return class, nil
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||
|
@ -47,11 +48,10 @@ import (
|
|||
func NewPersistentVolumeController(
|
||||
kubeClient clientset.Interface,
|
||||
syncPeriod time.Duration,
|
||||
provisioner vol.ProvisionableVolumePlugin,
|
||||
recyclers []vol.VolumePlugin,
|
||||
volumePlugins []vol.VolumePlugin,
|
||||
cloud cloudprovider.Interface,
|
||||
clusterName string,
|
||||
volumeSource, claimSource cache.ListerWatcher,
|
||||
volumeSource, claimSource, classSource cache.ListerWatcher,
|
||||
eventRecorder record.EventRecorder,
|
||||
enableDynamicProvisioning bool,
|
||||
) *PersistentVolumeController {
|
||||
|
@ -63,25 +63,19 @@ func NewPersistentVolumeController(
|
|||
}
|
||||
|
||||
controller := &PersistentVolumeController{
|
||||
volumes: newPersistentVolumeOrderedIndex(),
|
||||
claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc),
|
||||
kubeClient: kubeClient,
|
||||
eventRecorder: eventRecorder,
|
||||
runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */),
|
||||
cloud: cloud,
|
||||
provisioner: provisioner,
|
||||
volumes: newPersistentVolumeOrderedIndex(),
|
||||
claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc),
|
||||
kubeClient: kubeClient,
|
||||
eventRecorder: eventRecorder,
|
||||
runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */),
|
||||
cloud: cloud,
|
||||
enableDynamicProvisioning: enableDynamicProvisioning,
|
||||
clusterName: clusterName,
|
||||
createProvisionedPVRetryCount: createProvisionedPVRetryCount,
|
||||
createProvisionedPVInterval: createProvisionedPVInterval,
|
||||
}
|
||||
|
||||
controller.recyclePluginMgr.InitPlugins(recyclers, controller)
|
||||
if controller.provisioner != nil {
|
||||
if err := controller.provisioner.Init(controller); err != nil {
|
||||
glog.Errorf("PersistentVolumeController: error initializing provisioner plugin: %v", err)
|
||||
}
|
||||
}
|
||||
controller.volumePluginMgr.InitPlugins(volumePlugins, controller)
|
||||
|
||||
if volumeSource == nil {
|
||||
volumeSource = &cache.ListWatch{
|
||||
|
@ -107,6 +101,18 @@ func NewPersistentVolumeController(
|
|||
}
|
||||
controller.claimSource = claimSource
|
||||
|
||||
if classSource == nil {
|
||||
classSource = &cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Extensions().StorageClasses().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return kubeClient.Extensions().StorageClasses().Watch(options)
|
||||
},
|
||||
}
|
||||
}
|
||||
controller.classSource = classSource
|
||||
|
||||
_, controller.volumeController = framework.NewIndexerInformer(
|
||||
volumeSource,
|
||||
&api.PersistentVolume{},
|
||||
|
@ -128,6 +134,16 @@ func NewPersistentVolumeController(
|
|||
DeleteFunc: controller.deleteClaim,
|
||||
},
|
||||
)
|
||||
|
||||
// This is just a cache of StorageClass instances, no special actions are
|
||||
// needed when a class is created/deleted/updated.
|
||||
controller.classes = cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)
|
||||
controller.classReflector = cache.NewReflector(
|
||||
classSource,
|
||||
&extensions.StorageClass{},
|
||||
controller.classes,
|
||||
syncPeriod,
|
||||
)
|
||||
return controller
|
||||
}
|
||||
|
||||
|
@ -433,6 +449,11 @@ func (ctrl *PersistentVolumeController) Run() {
|
|||
ctrl.claimControllerStopCh = make(chan struct{})
|
||||
go ctrl.claimController.Run(ctrl.claimControllerStopCh)
|
||||
}
|
||||
|
||||
if ctrl.classReflectorStopCh == nil {
|
||||
ctrl.classReflectorStopCh = make(chan struct{})
|
||||
go ctrl.classReflector.RunUntil(ctrl.classReflectorStopCh)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down this controller
|
||||
|
@ -440,6 +461,7 @@ func (ctrl *PersistentVolumeController) Stop() {
|
|||
glog.V(4).Infof("stopping PersistentVolumeController")
|
||||
close(ctrl.volumeControllerStopCh)
|
||||
close(ctrl.claimControllerStopCh)
|
||||
close(ctrl.classReflectorStopCh)
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -578,3 +600,25 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo
|
|||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// getVolumeClass returns value of annClass annotation or empty string in case
|
||||
// the annotation does not exist.
|
||||
// TODO: change to PersistentVolume.Spec.Class value when this attribute is
|
||||
// introduced.
|
||||
func getVolumeClass(volume *api.PersistentVolume) string {
|
||||
if class, found := volume.Annotations[annClass]; found {
|
||||
return class
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// getClaimClass returns value of annClass annotation or empty string in case
|
||||
// the annotation does not exist.
|
||||
// TODO: change to PersistentVolumeClaim.Spec.Class value when this attribute is
|
||||
// introduced.
|
||||
func getClaimClass(claim *api.PersistentVolumeClaim) string {
|
||||
if class, found := claim.Annotations[annClass]; found {
|
||||
return class
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -163,7 +163,7 @@ func TestControllerSync(t *testing.T) {
|
|||
client := &fake.Clientset{}
|
||||
volumeSource := framework.NewFakePVControllerSource()
|
||||
claimSource := framework.NewFakePVCControllerSource()
|
||||
ctrl := newTestController(client, volumeSource, claimSource, true)
|
||||
ctrl := newTestController(client, volumeSource, claimSource, nil, true)
|
||||
reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors)
|
||||
for _, claim := range test.initialClaims {
|
||||
claimSource.Add(claim)
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
// Test single call to syncVolume, expecting recycling to happen.
|
||||
|
@ -39,7 +40,7 @@ func TestDeleteSync(t *testing.T) {
|
|||
noevents, noerrors,
|
||||
// Inject deleter into the controller and call syncVolume. The
|
||||
// deleter simulates one delete() call that succeeds.
|
||||
wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume),
|
||||
wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// delete volume bound by user
|
||||
|
@ -51,7 +52,7 @@ func TestDeleteSync(t *testing.T) {
|
|||
noevents, noerrors,
|
||||
// Inject deleter into the controller and call syncVolume. The
|
||||
// deleter simulates one delete() call that succeeds.
|
||||
wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume),
|
||||
wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// delete failure - plugin not found
|
||||
|
@ -70,7 +71,7 @@ func TestDeleteSync(t *testing.T) {
|
|||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedDelete"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume),
|
||||
wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// delete failure - delete() returns error
|
||||
|
@ -80,7 +81,7 @@ func TestDeleteSync(t *testing.T) {
|
|||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedDelete"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error")}, testSyncVolume),
|
||||
wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error")}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// delete success(?) - volume is deleted before doDelete() starts
|
||||
|
@ -90,7 +91,7 @@ func TestDeleteSync(t *testing.T) {
|
|||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
// Delete the volume before delete operation starts
|
||||
reactor.lock.Lock()
|
||||
delete(reactor.volumes, "volume8-6")
|
||||
|
@ -107,7 +108,7 @@ func TestDeleteSync(t *testing.T) {
|
|||
noclaims,
|
||||
newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound),
|
||||
noevents, noerrors,
|
||||
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
reactor.lock.Lock()
|
||||
defer reactor.lock.Unlock()
|
||||
// Bind the volume to resurrected claim (this should never
|
||||
|
@ -130,10 +131,10 @@ func TestDeleteSync(t *testing.T) {
|
|||
noevents, noerrors,
|
||||
// Inject deleter into the controller and call syncVolume. The
|
||||
// deleter simulates one delete() call that succeeds.
|
||||
wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume),
|
||||
wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests)
|
||||
runSyncTests(t, tests, []*extensions.StorageClass{})
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
|
@ -161,9 +162,9 @@ func TestDeleteMultiSync(t *testing.T) {
|
|||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedDelete"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume),
|
||||
wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume),
|
||||
},
|
||||
}
|
||||
|
||||
runMultisyncTests(t, tests)
|
||||
runMultisyncTests(t, tests, []*extensions.StorageClass{}, "")
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
|
@ -583,7 +584,7 @@ func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController,
|
|||
return reactor
|
||||
}
|
||||
|
||||
func newTestController(kubeClient clientset.Interface, volumeSource, claimSource cache.ListerWatcher, enableDynamicProvisioning bool) *PersistentVolumeController {
|
||||
func newTestController(kubeClient clientset.Interface, volumeSource, claimSource, classSource cache.ListerWatcher, enableDynamicProvisioning bool) *PersistentVolumeController {
|
||||
if volumeSource == nil {
|
||||
volumeSource = framework.NewFakePVControllerSource()
|
||||
}
|
||||
|
@ -593,12 +594,12 @@ func newTestController(kubeClient clientset.Interface, volumeSource, claimSource
|
|||
ctrl := NewPersistentVolumeController(
|
||||
kubeClient,
|
||||
5*time.Second, // sync period
|
||||
nil, // provisioner
|
||||
[]vol.VolumePlugin{}, // recyclers
|
||||
nil, // cloud
|
||||
"",
|
||||
volumeSource,
|
||||
claimSource,
|
||||
classSource,
|
||||
record.NewFakeRecorder(1000), // event recorder
|
||||
enableDynamicProvisioning,
|
||||
)
|
||||
|
@ -608,27 +609,6 @@ func newTestController(kubeClient clientset.Interface, volumeSource, claimSource
|
|||
return ctrl
|
||||
}
|
||||
|
||||
func addRecyclePlugin(ctrl *PersistentVolumeController, expectedRecycleCalls []error) {
|
||||
plugin := &mockVolumePlugin{
|
||||
recycleCalls: expectedRecycleCalls,
|
||||
}
|
||||
ctrl.recyclePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl)
|
||||
}
|
||||
|
||||
func addDeletePlugin(ctrl *PersistentVolumeController, expectedDeleteCalls []error) {
|
||||
plugin := &mockVolumePlugin{
|
||||
deleteCalls: expectedDeleteCalls,
|
||||
}
|
||||
ctrl.recyclePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl)
|
||||
}
|
||||
|
||||
func addProvisionPlugin(ctrl *PersistentVolumeController, expectedDeleteCalls []error) {
|
||||
plugin := &mockVolumePlugin{
|
||||
provisionCalls: expectedDeleteCalls,
|
||||
}
|
||||
ctrl.provisioner = plugin
|
||||
}
|
||||
|
||||
// newVolume returns a new volume with given attributes
|
||||
func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, reclaimPolicy api.PersistentVolumeReclaimPolicy, annotations ...string) *api.PersistentVolume {
|
||||
volume := api.PersistentVolume{
|
||||
|
@ -664,10 +644,13 @@ func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase a
|
|||
if len(annotations) > 0 {
|
||||
volume.Annotations = make(map[string]string)
|
||||
for _, a := range annotations {
|
||||
if a != annDynamicallyProvisioned {
|
||||
volume.Annotations[a] = "yes"
|
||||
} else {
|
||||
switch a {
|
||||
case annDynamicallyProvisioned:
|
||||
volume.Annotations[a] = mockPluginName
|
||||
case annClass:
|
||||
volume.Annotations[a] = "gold"
|
||||
default:
|
||||
volume.Annotations[a] = "yes"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -713,6 +696,17 @@ func withMessage(message string, volumes []*api.PersistentVolume) []*api.Persist
|
|||
return volumes
|
||||
}
|
||||
|
||||
// volumeWithClass saves given class into annClass annotation.
|
||||
// Meant to be used to compose claims specified inline in a test.
|
||||
func volumeWithClass(className string, volumes []*api.PersistentVolume) []*api.PersistentVolume {
|
||||
if volumes[0].Annotations == nil {
|
||||
volumes[0].Annotations = map[string]string{annClass: className}
|
||||
} else {
|
||||
volumes[0].Annotations[annClass] = className
|
||||
}
|
||||
return volumes
|
||||
}
|
||||
|
||||
// newVolumeArray returns array with a single volume that would be returned by
|
||||
// newVolume() with the same parameters.
|
||||
func newVolumeArray(name, capacity, boundToClaimUID, boundToClaimName string, phase api.PersistentVolumePhase, reclaimPolicy api.PersistentVolumeReclaimPolicy, annotations ...string) []*api.PersistentVolume {
|
||||
|
@ -749,7 +743,12 @@ func newClaim(name, claimUID, capacity, boundToVolume string, phase api.Persiste
|
|||
if len(annotations) > 0 {
|
||||
claim.Annotations = make(map[string]string)
|
||||
for _, a := range annotations {
|
||||
claim.Annotations[a] = "yes"
|
||||
switch a {
|
||||
case annClass:
|
||||
claim.Annotations[a] = "gold"
|
||||
default:
|
||||
claim.Annotations[a] = "yes"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -772,6 +771,17 @@ func newClaimArray(name, claimUID, capacity, boundToVolume string, phase api.Per
|
|||
}
|
||||
}
|
||||
|
||||
// claimWithClass saves given class into annClass annotation.
|
||||
// Meant to be used to compose claims specified inline in a test.
|
||||
func claimWithClass(className string, claims []*api.PersistentVolumeClaim) []*api.PersistentVolumeClaim {
|
||||
if claims[0].Annotations == nil {
|
||||
claims[0].Annotations = map[string]string{annClass: className}
|
||||
} else {
|
||||
claims[0].Annotations[annClass] = className
|
||||
}
|
||||
return claims
|
||||
}
|
||||
|
||||
func testSyncClaim(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
||||
return ctrl.syncClaim(test.initialClaims[0])
|
||||
}
|
||||
|
@ -793,29 +803,45 @@ type operationType string
|
|||
|
||||
const operationDelete = "Delete"
|
||||
const operationRecycle = "Recycle"
|
||||
const operationProvision = "Provision"
|
||||
|
||||
// wrapTestWithControllerConfig returns a testCall that:
|
||||
// - configures controller with recycler, deleter or provisioner which will
|
||||
// return provided errors when a volume is deleted, recycled or provisioned
|
||||
// wrapTestWithPluginCalls returns a testCall that:
|
||||
// - configures controller with a volume plugin that implements recycler,
|
||||
// deleter and provisioner. The plugin retunrs provided errors when a volume
|
||||
// is deleted, recycled or provisioned.
|
||||
// - calls given testCall
|
||||
func wrapTestWithControllerConfig(operation operationType, expectedOperationCalls []error, toWrap testCall) testCall {
|
||||
expected := expectedOperationCalls
|
||||
|
||||
func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error, expectedProvisionCalls []provisionCall, toWrap testCall) testCall {
|
||||
return func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
||||
switch operation {
|
||||
case operationDelete:
|
||||
addDeletePlugin(ctrl, expected)
|
||||
case operationRecycle:
|
||||
addRecyclePlugin(ctrl, expected)
|
||||
case operationProvision:
|
||||
addProvisionPlugin(ctrl, expected)
|
||||
plugin := &mockVolumePlugin{
|
||||
recycleCalls: expectedRecycleCalls,
|
||||
deleteCalls: expectedDeleteCalls,
|
||||
provisionCalls: expectedProvisionCalls,
|
||||
}
|
||||
ctrl.volumePluginMgr.InitPlugins([]vol.VolumePlugin{plugin}, ctrl)
|
||||
|
||||
return toWrap(ctrl, reactor, test)
|
||||
}
|
||||
}
|
||||
|
||||
// wrapTestWithReclaimCalls returns a testCall that:
|
||||
// - configures controller with recycler or deleter which will return provided
|
||||
// errors when a volume is deleted or recycled
|
||||
// - calls given testCall
|
||||
func wrapTestWithReclaimCalls(operation operationType, expectedOperationCalls []error, toWrap testCall) testCall {
|
||||
if operation == operationDelete {
|
||||
return wrapTestWithPluginCalls(nil, expectedOperationCalls, nil, toWrap)
|
||||
} else {
|
||||
return wrapTestWithPluginCalls(expectedOperationCalls, nil, nil, toWrap)
|
||||
}
|
||||
}
|
||||
|
||||
// wrapTestWithProvisionCalls returns a testCall that:
|
||||
// - configures controller with a provisioner which will return provided errors
|
||||
// when a claim is provisioned
|
||||
// - calls given testCall
|
||||
func wrapTestWithProvisionCalls(expectedProvisionCalls []provisionCall, toWrap testCall) testCall {
|
||||
return wrapTestWithPluginCalls(nil, nil, expectedProvisionCalls, toWrap)
|
||||
}
|
||||
|
||||
// wrapTestWithInjectedOperation returns a testCall that:
|
||||
// - starts the controller and lets it run original testCall until
|
||||
// scheduleOperation() call. It blocks the controller there and calls the
|
||||
|
@ -873,13 +899,13 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *volumeReacto
|
|||
// 2. Call the tested function (syncClaim/syncVolume) via
|
||||
// controllerTest.testCall *once*.
|
||||
// 3. Compare resulting volumes and claims with expected volumes and claims.
|
||||
func runSyncTests(t *testing.T, tests []controllerTest) {
|
||||
func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*extensions.StorageClass) {
|
||||
for _, test := range tests {
|
||||
glog.V(4).Infof("starting test %q", test.name)
|
||||
|
||||
// Initialize the controller
|
||||
client := &fake.Clientset{}
|
||||
ctrl := newTestController(client, nil, nil, true)
|
||||
ctrl := newTestController(client, nil, nil, nil, true)
|
||||
reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors)
|
||||
for _, claim := range test.initialClaims {
|
||||
ctrl.claims.Add(claim)
|
||||
|
@ -890,6 +916,15 @@ func runSyncTests(t *testing.T, tests []controllerTest) {
|
|||
reactor.volumes[volume.Name] = volume
|
||||
}
|
||||
|
||||
// Convert classes to []interface{} and forcefully inject them into
|
||||
// controller.
|
||||
storageClassPtrs := make([]interface{}, len(storageClasses))
|
||||
for i, s := range storageClasses {
|
||||
storageClassPtrs[i] = s
|
||||
}
|
||||
// 1 is the resource version
|
||||
ctrl.classes.Replace(storageClassPtrs, "1")
|
||||
|
||||
// Run the tested functions
|
||||
err := test.test(ctrl, reactor, test)
|
||||
if err != nil {
|
||||
|
@ -920,13 +955,22 @@ func runSyncTests(t *testing.T, tests []controllerTest) {
|
|||
// 5. When 3. does not do any changes, finish the tests and compare final set
|
||||
// of volumes/claims with expected claims/volumes and report differences.
|
||||
// Some limit of calls in enforced to prevent endless loops.
|
||||
func runMultisyncTests(t *testing.T, tests []controllerTest) {
|
||||
func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*extensions.StorageClass, defaultStorageClass string) {
|
||||
for _, test := range tests {
|
||||
glog.V(4).Infof("starting multisync test %q", test.name)
|
||||
|
||||
// Initialize the controller
|
||||
client := &fake.Clientset{}
|
||||
ctrl := newTestController(client, nil, nil, true)
|
||||
ctrl := newTestController(client, nil, nil, nil, true)
|
||||
|
||||
// Convert classes to []interface{} and forcefully inject them into
|
||||
// controller.
|
||||
storageClassPtrs := make([]interface{}, len(storageClasses))
|
||||
for i, s := range storageClasses {
|
||||
storageClassPtrs[i] = s
|
||||
}
|
||||
ctrl.classes.Replace(storageClassPtrs, "1")
|
||||
|
||||
reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors)
|
||||
for _, claim := range test.initialClaims {
|
||||
ctrl.claims.Add(claim)
|
||||
|
@ -1022,7 +1066,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest) {
|
|||
// Dummy volume plugin for provisioning, deletion and recycling. It contains
|
||||
// lists of expected return values to simulate errors.
|
||||
type mockVolumePlugin struct {
|
||||
provisionCalls []error
|
||||
provisionCalls []provisionCall
|
||||
provisionCallCounter int
|
||||
deleteCalls []error
|
||||
deleteCallCounter int
|
||||
|
@ -1031,6 +1075,11 @@ type mockVolumePlugin struct {
|
|||
provisionOptions vol.VolumeOptions
|
||||
}
|
||||
|
||||
type provisionCall struct {
|
||||
expectedParameters map[string]string
|
||||
ret error
|
||||
}
|
||||
|
||||
var _ vol.VolumePlugin = &mockVolumePlugin{}
|
||||
var _ vol.RecyclableVolumePlugin = &mockVolumePlugin{}
|
||||
var _ vol.DeletableVolumePlugin = &mockVolumePlugin{}
|
||||
|
@ -1087,8 +1136,12 @@ func (plugin *mockVolumePlugin) Provision() (*api.PersistentVolume, error) {
|
|||
}
|
||||
|
||||
var pv *api.PersistentVolume
|
||||
err := plugin.provisionCalls[plugin.provisionCallCounter]
|
||||
if err == nil {
|
||||
call := plugin.provisionCalls[plugin.provisionCallCounter]
|
||||
if !reflect.DeepEqual(call.expectedParameters, plugin.provisionOptions.Parameters) {
|
||||
glog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters)
|
||||
return nil, fmt.Errorf("Mock plugin error: invalid provisioner call")
|
||||
}
|
||||
if call.ret == nil {
|
||||
// Create a fake PV with known GCE volume (to match expected volume)
|
||||
pv = &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
|
@ -1108,8 +1161,8 @@ func (plugin *mockVolumePlugin) Provision() (*api.PersistentVolume, error) {
|
|||
}
|
||||
|
||||
plugin.provisionCallCounter++
|
||||
glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, err)
|
||||
return pv, err
|
||||
glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret)
|
||||
return pv, call.ret
|
||||
}
|
||||
|
||||
// Deleter interfaces
|
||||
|
|
|
@ -18,12 +18,11 @@ package persistentvolume
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes
|
||||
|
@ -126,11 +125,16 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVo
|
|||
// filter out:
|
||||
// - volumes bound to another claim
|
||||
// - volumes whose labels don't match the claim's selector, if specified
|
||||
// - volumes in Class that is not requested
|
||||
if volume.Spec.ClaimRef != nil {
|
||||
continue
|
||||
} else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) {
|
||||
continue
|
||||
}
|
||||
claimClass := getClaimClass(claim)
|
||||
if claimClass != "" && claimClass != getVolumeClass(volume) {
|
||||
continue
|
||||
}
|
||||
|
||||
volumeQty := volume.Spec.Capacity[api.ResourceStorage]
|
||||
volumeSize := volumeQty.Value()
|
||||
|
@ -142,17 +146,6 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVo
|
|||
}
|
||||
}
|
||||
|
||||
// We want to provision volumes if the annotation is set even if there
|
||||
// is matching PV. Therefore, do not look for available PV and let
|
||||
// a new volume to be provisioned.
|
||||
//
|
||||
// When provisioner creates a new PV to this claim, an exact match
|
||||
// pre-bound to the claim will be found by the checks above during
|
||||
// subsequent claim sync.
|
||||
if hasAnnotation(claim.ObjectMeta, annClass) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if smallestVolume != nil {
|
||||
// Found a matching volume
|
||||
return smallestVolume, nil
|
||||
|
|
|
@ -164,7 +164,52 @@ func TestMatchVolume(t *testing.T) {
|
|||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("10000G"),
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("20000G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"successful-match-with-class": {
|
||||
expectedMatch: "gce-pd-silver1",
|
||||
claim: &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "claim01",
|
||||
Namespace: "myns",
|
||||
Annotations: map[string]string{
|
||||
annClass: "silver",
|
||||
},
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Selector: &unversioned.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"should-exist": "true",
|
||||
},
|
||||
},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("1G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"successful-match-with-class-and-labels": {
|
||||
expectedMatch: "gce-pd-silver2",
|
||||
claim: &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "claim01",
|
||||
Namespace: "myns",
|
||||
Annotations: map[string]string{
|
||||
annClass: "silver",
|
||||
},
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("1G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -561,6 +606,29 @@ func createTestVolumes() []*api.PersistentVolume {
|
|||
"should-exist": "true",
|
||||
},
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("20000G"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "gce-pd-silver1",
|
||||
Name: "gce0023",
|
||||
Labels: map[string]string{
|
||||
"should-exist": "true",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
annClass: "silver",
|
||||
},
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("10000G"),
|
||||
|
@ -573,6 +641,46 @@ func createTestVolumes() []*api.PersistentVolume {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "gce-pd-silver2",
|
||||
Name: "gce0024",
|
||||
Annotations: map[string]string{
|
||||
annClass: "silver",
|
||||
},
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("100G"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "gce-pd-gold",
|
||||
Name: "gce0025",
|
||||
Annotations: map[string]string{
|
||||
annClass: "gold",
|
||||
},
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("50G"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -632,7 +740,7 @@ func TestFindingPreboundVolumes(t *testing.T) {
|
|||
// pretend the exact match is available but the largest volume is pre-bound to the claim.
|
||||
pv1.Spec.ClaimRef = nil
|
||||
pv8.Spec.ClaimRef = claimRef
|
||||
volume, _ = index.findBestMatchForClaim(claim)
|
||||
volume, _ = index.findBestMatchForClaim(claim, "")
|
||||
if volume.Name != pv8.Name {
|
||||
t.Errorf("Expected %s but got volume %s instead", pv8.Name, volume.Name)
|
||||
}
|
||||
|
|
|
@ -21,8 +21,59 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
var class1Parameters = map[string]string{
|
||||
"param1": "value1",
|
||||
}
|
||||
var class2Parameters = map[string]string{
|
||||
"param2": "value2",
|
||||
}
|
||||
var storageClasses = []*extensions.StorageClass{
|
||||
{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "gold",
|
||||
},
|
||||
|
||||
Provisioner: mockPluginName,
|
||||
Parameters: class1Parameters,
|
||||
},
|
||||
{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "silver",
|
||||
},
|
||||
Provisioner: mockPluginName,
|
||||
Parameters: class2Parameters,
|
||||
},
|
||||
}
|
||||
|
||||
// call to storageClass 1, returning an error
|
||||
var provision1Error = provisionCall{
|
||||
ret: errors.New("Moc provisioner error"),
|
||||
expectedParameters: class1Parameters,
|
||||
}
|
||||
|
||||
// call to storageClass 1, returning a valid PV
|
||||
var provision1Success = provisionCall{
|
||||
ret: nil,
|
||||
expectedParameters: class1Parameters,
|
||||
}
|
||||
|
||||
// call to storageClass 2, returning a valid PV
|
||||
var provision2Success = provisionCall{
|
||||
ret: nil,
|
||||
expectedParameters: class2Parameters,
|
||||
}
|
||||
|
||||
// Test single call to syncVolume, expecting provisioning to happen.
|
||||
// 1. Fill in the controller with initial data
|
||||
// 2. Call the syncVolume *once*.
|
||||
|
@ -30,14 +81,14 @@ import (
|
|||
func TestProvisionSync(t *testing.T) {
|
||||
tests := []controllerTest{
|
||||
{
|
||||
// Provision a volume
|
||||
"11-1 - successful provision",
|
||||
// Provision a volume (with the default class)
|
||||
"11-1 - successful provision with storage class 1",
|
||||
novolumes,
|
||||
newVolumeArray("pvc-uid11-1", "1Gi", "uid11-1", "claim11-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
|
||||
newVolumeArray("pvc-uid11-1", "1Gi", "uid11-1", "claim11-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass),
|
||||
newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass),
|
||||
// Binding will be completed in the next syncClaim
|
||||
newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass),
|
||||
noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision failure - plugin not found
|
||||
|
@ -57,7 +108,7 @@ func TestProvisionSync(t *testing.T) {
|
|||
newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass),
|
||||
newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass),
|
||||
[]string{"Warning ProvisioningFailed"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim),
|
||||
wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision failure - Provision returns error
|
||||
|
@ -67,40 +118,35 @@ func TestProvisionSync(t *testing.T) {
|
|||
newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass),
|
||||
newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass),
|
||||
[]string{"Warning ProvisioningFailed"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationProvision, []error{errors.New("Moc provisioner error")}, testSyncClaim),
|
||||
wrapTestWithProvisionCalls([]provisionCall{provision1Error}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision success - there is already a volume available, still
|
||||
// we provision a new one when requested.
|
||||
// No provisioning if there is a matching volume available
|
||||
"11-6 - provisioning when there is a volume available",
|
||||
newVolumeArray("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("pvc-uid11-6", "1Gi", "uid11-6", "claim11-6", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
|
||||
},
|
||||
newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass),
|
||||
// Binding will be completed in the next syncClaim
|
||||
newVolumeArray("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain, annClass),
|
||||
newVolumeArray("volume11-6", "1Gi", "uid11-6", "claim11-6", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController, annClass),
|
||||
newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass),
|
||||
newClaimArray("claim11-6", "uid11-6", "1Gi", "volume11-6", api.ClaimBound, annClass, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors,
|
||||
// No provisioning plugin confingure - makes the test fail when
|
||||
// the controller errorneously tries to provision something
|
||||
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision success? - claim is bound before provisioner creates
|
||||
// a volume.
|
||||
"11-7 - claim is bound before provisioning",
|
||||
novolumes,
|
||||
newVolumeArray("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
|
||||
newVolumeArray("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass),
|
||||
newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass),
|
||||
// The claim would be bound in next syncClaim
|
||||
newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass),
|
||||
noevents, noerrors,
|
||||
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
// Create a volume before provisionClaimOperation starts.
|
||||
// This similates a parallel controller provisioning the volume.
|
||||
reactor.lock.Lock()
|
||||
volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned)
|
||||
volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass)
|
||||
reactor.volumes[volume.Name] = volume
|
||||
reactor.lock.Unlock()
|
||||
}),
|
||||
|
@ -110,7 +156,7 @@ func TestProvisionSync(t *testing.T) {
|
|||
// second retry succeeds
|
||||
"11-8 - cannot save provisioned volume",
|
||||
novolumes,
|
||||
newVolumeArray("pvc-uid11-8", "1Gi", "uid11-8", "claim11-8", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
|
||||
newVolumeArray("pvc-uid11-8", "1Gi", "uid11-8", "claim11-8", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass),
|
||||
newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass),
|
||||
// Binding will be completed in the next syncClaim
|
||||
newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass),
|
||||
|
@ -121,7 +167,7 @@ func TestProvisionSync(t *testing.T) {
|
|||
// will succeed.
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error")},
|
||||
},
|
||||
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision success? - cannot save provisioned PV five times,
|
||||
|
@ -141,8 +187,12 @@ func TestProvisionSync(t *testing.T) {
|
|||
{"create", "persistentvolumes", errors.New("Mock creation error4")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error5")},
|
||||
},
|
||||
wrapTestWithControllerConfig(operationDelete, []error{nil},
|
||||
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim)),
|
||||
wrapTestWithPluginCalls(
|
||||
nil, // recycle calls
|
||||
[]error{nil}, // delete calls
|
||||
[]provisionCall{provision1Success}, // provision calls
|
||||
testSyncClaim,
|
||||
),
|
||||
},
|
||||
{
|
||||
// Provision failure - cannot save provisioned PV five times,
|
||||
|
@ -163,7 +213,7 @@ func TestProvisionSync(t *testing.T) {
|
|||
{"create", "persistentvolumes", errors.New("Mock creation error5")},
|
||||
},
|
||||
// No deleteCalls are configured, which results into no deleter plugin available for the volume
|
||||
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision failure - cannot save provisioned PV five times,
|
||||
|
@ -183,16 +233,17 @@ func TestProvisionSync(t *testing.T) {
|
|||
{"create", "persistentvolumes", errors.New("Mock creation error4")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error5")},
|
||||
},
|
||||
wrapTestWithControllerConfig(
|
||||
operationDelete, []error{
|
||||
wrapTestWithPluginCalls(
|
||||
nil, // recycle calls
|
||||
[]error{ // delete calls
|
||||
errors.New("Mock deletion error1"),
|
||||
errors.New("Mock deletion error2"),
|
||||
errors.New("Mock deletion error3"),
|
||||
errors.New("Mock deletion error4"),
|
||||
errors.New("Mock deletion error5"),
|
||||
},
|
||||
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
),
|
||||
[]provisionCall{provision1Success}, // provision calls
|
||||
testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision failure - cannot save provisioned PV five times,
|
||||
|
@ -212,16 +263,37 @@ func TestProvisionSync(t *testing.T) {
|
|||
{"create", "persistentvolumes", errors.New("Mock creation error4")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error5")},
|
||||
},
|
||||
wrapTestWithControllerConfig(
|
||||
operationDelete, []error{
|
||||
wrapTestWithPluginCalls(
|
||||
nil, // recycle calls
|
||||
[]error{ // delete calls
|
||||
errors.New("Mock deletion error1"),
|
||||
nil,
|
||||
},
|
||||
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
}, // provison calls
|
||||
[]provisionCall{provision1Success},
|
||||
testSyncClaim,
|
||||
),
|
||||
},
|
||||
{
|
||||
// Provision a volume (with non-default class)
|
||||
"11-13 - successful provision with storage class 2",
|
||||
novolumes,
|
||||
volumeWithClass("silver", newVolumeArray("pvc-uid11-13", "1Gi", "uid11-13", "claim11-13", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned)),
|
||||
claimWithClass("silver", newClaimArray("claim11-13", "uid11-13", "1Gi", "", api.ClaimPending)),
|
||||
// Binding will be completed in the next syncClaim
|
||||
claimWithClass("silver", newClaimArray("claim11-13", "uid11-13", "1Gi", "", api.ClaimPending)),
|
||||
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision2Success}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision error - non existing class
|
||||
"11-14 - fail due to non-existing class",
|
||||
novolumes,
|
||||
novolumes,
|
||||
claimWithClass("non-existing", newClaimArray("claim11-14", "uid11-14", "1Gi", "", api.ClaimPending)),
|
||||
claimWithClass("non-existing", newClaimArray("claim11-14", "uid11-14", "1Gi", "", api.ClaimPending)),
|
||||
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests)
|
||||
runSyncTests(t, tests, storageClasses)
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
|
@ -244,20 +316,20 @@ func TestProvisionMultiSync(t *testing.T) {
|
|||
// Provision a volume with binding
|
||||
"12-1 - successful provision",
|
||||
novolumes,
|
||||
newVolumeArray("pvc-uid12-1", "1Gi", "uid12-1", "claim12-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
|
||||
newVolumeArray("pvc-uid12-1", "1Gi", "uid12-1", "claim12-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned, annClass),
|
||||
newClaimArray("claim12-1", "uid12-1", "1Gi", "", api.ClaimPending, annClass),
|
||||
// Binding will be completed in the next syncClaim
|
||||
newClaimArray("claim12-1", "uid12-1", "1Gi", "pvc-uid12-1", api.ClaimBound, annClass, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
|
||||
},
|
||||
}
|
||||
|
||||
runMultisyncTests(t, tests)
|
||||
runMultisyncTests(t, tests, storageClasses, storageClasses[0].Name)
|
||||
}
|
||||
|
||||
// When provisioning is disabled, provisioning a claim should instantly return nil
|
||||
func TestDisablingDynamicProvisioner(t *testing.T) {
|
||||
ctrl := newTestController(nil, nil, nil, false)
|
||||
ctrl := newTestController(nil, nil, nil, nil, false)
|
||||
retVal := ctrl.provisionClaim(nil)
|
||||
if retVal != nil {
|
||||
t.Errorf("Expected nil return but got %v", retVal)
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
// Test single call to syncVolume, expecting recycling to happen.
|
||||
|
@ -39,7 +40,7 @@ func TestRecycleSync(t *testing.T) {
|
|||
noevents, noerrors,
|
||||
// Inject recycler into the controller and call syncVolume. The
|
||||
// recycler simulates one recycle() call that succeeds.
|
||||
wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume),
|
||||
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// recycle volume bound by user
|
||||
|
@ -51,7 +52,7 @@ func TestRecycleSync(t *testing.T) {
|
|||
noevents, noerrors,
|
||||
// Inject recycler into the controller and call syncVolume. The
|
||||
// recycler simulates one recycle() call that succeeds.
|
||||
wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume),
|
||||
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// recycle failure - plugin not found
|
||||
|
@ -70,7 +71,7 @@ func TestRecycleSync(t *testing.T) {
|
|||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedRecycle"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume),
|
||||
wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// recycle failure - recycle returns error
|
||||
|
@ -80,7 +81,7 @@ func TestRecycleSync(t *testing.T) {
|
|||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedRecycle"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error")}, testSyncVolume),
|
||||
wrapTestWithReclaimCalls(operationRecycle, []error{errors.New("Mock recycle error")}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// recycle success(?) - volume is deleted before doRecycle() starts
|
||||
|
@ -90,7 +91,7 @@ func TestRecycleSync(t *testing.T) {
|
|||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
// Delete the volume before recycle operation starts
|
||||
reactor.lock.Lock()
|
||||
delete(reactor.volumes, "volume6-6")
|
||||
|
@ -107,7 +108,7 @@ func TestRecycleSync(t *testing.T) {
|
|||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
// Mark the volume as Available before the recycler starts
|
||||
reactor.lock.Lock()
|
||||
volume := reactor.volumes["volume6-7"]
|
||||
|
@ -128,7 +129,7 @@ func TestRecycleSync(t *testing.T) {
|
|||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
// Mark the volume as Available before the recycler starts
|
||||
reactor.lock.Lock()
|
||||
volume := reactor.volumes["volume6-8"]
|
||||
|
@ -148,7 +149,7 @@ func TestRecycleSync(t *testing.T) {
|
|||
noevents, noerrors,
|
||||
// Inject recycler into the controller and call syncVolume. The
|
||||
// recycler simulates one recycle() call that succeeds.
|
||||
wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume),
|
||||
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// volume has unknown reclaim policy - failure expected
|
||||
|
@ -160,7 +161,7 @@ func TestRecycleSync(t *testing.T) {
|
|||
[]string{"Warning VolumeUnknownReclaimPolicy"}, noerrors, testSyncVolume,
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests)
|
||||
runSyncTests(t, tests, []*extensions.StorageClass{})
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
|
@ -188,9 +189,9 @@ func TestRecycleMultiSync(t *testing.T) {
|
|||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedRecycle"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error"), nil}, testSyncVolume),
|
||||
wrapTestWithReclaimCalls(operationRecycle, []error{errors.New("Mock recycle error"), nil}, testSyncVolume),
|
||||
},
|
||||
}
|
||||
|
||||
runMultisyncTests(t, tests)
|
||||
runMultisyncTests(t, tests, []*extensions.StorageClass{}, "")
|
||||
}
|
||||
|
|
|
@ -491,6 +491,7 @@ var roleColumns = []string{"NAME", "AGE"}
|
|||
var roleBindingColumns = []string{"NAME", "AGE"}
|
||||
var clusterRoleColumns = []string{"NAME", "AGE"}
|
||||
var clusterRoleBindingColumns = []string{"NAME", "AGE"}
|
||||
var storageClassColumns = []string{"NAME", "TYPE"}
|
||||
|
||||
// TODO: consider having 'KIND' for third party resource data
|
||||
var thirdPartyResourceDataColumns = []string{"NAME", "LABELS", "DATA"}
|
||||
|
@ -603,6 +604,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() {
|
|||
h.Handler(clusterRoleBindingColumns, printClusterRoleBindingList)
|
||||
h.Handler(certificateSigningRequestColumns, printCertificateSigningRequest)
|
||||
h.Handler(certificateSigningRequestColumns, printCertificateSigningRequestList)
|
||||
h.Handler(storageClassColumns, printStorageClass)
|
||||
h.Handler(storageClassColumns, printStorageClassList)
|
||||
}
|
||||
|
||||
func (h *HumanReadablePrinter) unknown(data []byte, w io.Writer) error {
|
||||
|
@ -2067,6 +2070,32 @@ func printNetworkPolicyList(list *extensions.NetworkPolicyList, w io.Writer, opt
|
|||
return nil
|
||||
}
|
||||
|
||||
func printStorageClass(sc *extensions.StorageClass, w io.Writer, options PrintOptions) error {
|
||||
name := sc.Name
|
||||
provtype := sc.Provisioner
|
||||
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t", name, provtype); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprint(w, AppendLabels(sc.Labels, options.ColumnLabels)); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, sc.Labels)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func printStorageClassList(scList *extensions.StorageClassList, w io.Writer, options PrintOptions) error {
|
||||
for _, sc := range scList.Items {
|
||||
if err := printStorageClass(&sc, w, options); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func AppendLabels(itemLabels map[string]string, columnLabels []string) string {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
|
|
|
@ -43,17 +43,6 @@ func ProbeVolumePlugins(volumeConfig volume.VolumeConfig) []volume.VolumePlugin
|
|||
}
|
||||
}
|
||||
|
||||
func ProbeRecyclableVolumePlugins(recyclerFunc func(pvName string, spec *volume.Spec, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error), volumeConfig volume.VolumeConfig) []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{
|
||||
&hostPathPlugin{
|
||||
host: nil,
|
||||
newRecyclerFunc: recyclerFunc,
|
||||
newProvisionerFunc: newProvisioner,
|
||||
config: volumeConfig,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type hostPathPlugin struct {
|
||||
host volume.VolumeHost
|
||||
// decouple creating Recyclers/Deleters/Provisioners by deferring to a function. Allows for easier testing.
|
||||
|
@ -132,6 +121,9 @@ func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, err
|
|||
}
|
||||
|
||||
func (plugin *hostPathPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
if !plugin.config.ProvisioningEnabled {
|
||||
return nil, fmt.Errorf("Provisioning in volume plugin %q is disabled", plugin.GetPluginName())
|
||||
}
|
||||
if len(options.AccessModes) == 0 {
|
||||
options.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
|
|
|
@ -154,7 +154,8 @@ func TestProvisioner(t *testing.T) {
|
|||
err := os.MkdirAll(tempPath, 0750)
|
||||
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */))
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{ProvisioningEnabled: true}),
|
||||
volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */))
|
||||
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
|
||||
plug, err := plugMgr.FindCreatablePluginBySpec(spec)
|
||||
if err != nil {
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
|
@ -55,6 +56,10 @@ type VolumeOptions struct {
|
|||
ClusterName string
|
||||
// Tags to attach to the real volume in the cloud provider - e.g. AWS EBS
|
||||
CloudTags *map[string]string
|
||||
// Volume provisioning parameters from StorageClass
|
||||
Parameters map[string]string
|
||||
// Volume selector from PersistentVolumeClaim
|
||||
Selector *unversioned.LabelSelector
|
||||
}
|
||||
|
||||
// VolumePlugin is an interface to volume plugins that can be used on a
|
||||
|
@ -293,6 +298,10 @@ type VolumeConfig struct {
|
|||
// the system and only understood by the binary hosting the plugin and the
|
||||
// plugin itself.
|
||||
OtherAttributes map[string]string
|
||||
|
||||
// ProvisioningEnabled configures whether provisioning of this plugin is
|
||||
// enabled or not. Currently used only in host_path plugin.
|
||||
ProvisioningEnabled bool
|
||||
}
|
||||
|
||||
// NewSpecFromVolume creates an Spec from an api.Volume
|
||||
|
@ -429,7 +438,20 @@ func (pm *VolumePluginMgr) FindRecyclablePluginBySpec(spec *Spec) (RecyclableVol
|
|||
return nil, fmt.Errorf("no recyclable volume plugin matched")
|
||||
}
|
||||
|
||||
// FindDeletablePluginByName fetches a persistent volume plugin by name. If
|
||||
// FindProvisionablePluginByName fetches a persistent volume plugin by name. If
|
||||
// no plugin is found, returns error.
|
||||
func (pm *VolumePluginMgr) FindProvisionablePluginByName(name string) (ProvisionableVolumePlugin, error) {
|
||||
volumePlugin, err := pm.FindPluginByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if provisionableVolumePlugin, ok := volumePlugin.(ProvisionableVolumePlugin); ok {
|
||||
return provisionableVolumePlugin, nil
|
||||
}
|
||||
return nil, fmt.Errorf("no provisionable volume plugin matched")
|
||||
}
|
||||
|
||||
// FindDeletablePluginBySppec fetches a persistent volume plugin by spec. If
|
||||
// no plugin is found, returns error.
|
||||
func (pm *VolumePluginMgr) FindDeletablePluginBySpec(spec *Spec) (DeletableVolumePlugin, error) {
|
||||
volumePlugin, err := pm.FindPluginBySpec(spec)
|
||||
|
@ -442,6 +464,19 @@ func (pm *VolumePluginMgr) FindDeletablePluginBySpec(spec *Spec) (DeletableVolum
|
|||
return nil, fmt.Errorf("no deletable volume plugin matched")
|
||||
}
|
||||
|
||||
// FindDeletablePluginByName fetches a persistent volume plugin by name. If
|
||||
// no plugin is found, returns error.
|
||||
func (pm *VolumePluginMgr) FindDeletablePluginByName(name string) (DeletableVolumePlugin, error) {
|
||||
volumePlugin, err := pm.FindPluginByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if deletableVolumePlugin, ok := volumePlugin.(DeletableVolumePlugin); ok {
|
||||
return deletableVolumePlugin, nil
|
||||
}
|
||||
return nil, fmt.Errorf("no deletable volume plugin matched")
|
||||
}
|
||||
|
||||
// FindCreatablePluginBySpec fetches a persistent volume plugin by name. If
|
||||
// no plugin is found, returns error.
|
||||
func (pm *VolumePluginMgr) FindCreatablePluginBySpec(spec *Spec) (ProvisionableVolumePlugin, error) {
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
|
@ -52,8 +53,15 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() {
|
|||
framework.KubeDescribe("DynamicProvisioner", func() {
|
||||
It("should create and delete persistent volumes", func() {
|
||||
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")
|
||||
|
||||
By("creating a StorageClass")
|
||||
class := newStorageClass()
|
||||
_, err := c.Extensions().StorageClasses().Create(class)
|
||||
defer c.Extensions().StorageClasses().Delete(class.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("creating a claim with a dynamic provisioning annotation")
|
||||
claim := createClaim(ns)
|
||||
claim := newClaim(ns)
|
||||
defer func() {
|
||||
c.PersistentVolumeClaims(ns).Delete(claim.Name)
|
||||
}()
|
||||
|
@ -124,13 +132,13 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() {
|
|||
})
|
||||
})
|
||||
|
||||
func createClaim(ns string) *api.PersistentVolumeClaim {
|
||||
func newClaim(ns string) *api.PersistentVolumeClaim {
|
||||
return &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
GenerateName: "pvc-",
|
||||
Namespace: ns,
|
||||
Annotations: map[string]string{
|
||||
"volume.alpha.kubernetes.io/storage-class": "",
|
||||
"volume.beta.kubernetes.io/storage-class": "fast",
|
||||
},
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
|
@ -192,3 +200,26 @@ func runInPodWithVolume(c *client.Client, ns, claimName, command string) {
|
|||
framework.ExpectNoError(err, "Failed to create pod: %v", err)
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Spec.Containers[0].Name, pod.Namespace))
|
||||
}
|
||||
|
||||
func newStorageClass() *extensions.StorageClass {
|
||||
var pluginName string
|
||||
|
||||
switch {
|
||||
case framework.ProviderIs("gke"), framework.ProviderIs("gce"):
|
||||
pluginName = "kubernetes.io/gce-pd"
|
||||
case framework.ProviderIs("aws"):
|
||||
pluginName = "kubernetes.io/aws-ebs"
|
||||
case framework.ProviderIs("openstack"):
|
||||
pluginName = "kubernetes.io/cinder"
|
||||
}
|
||||
|
||||
return &extensions.StorageClass{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "fast",
|
||||
},
|
||||
Provisioner: pluginName,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
|
@ -62,6 +63,8 @@ func init() {
|
|||
const defaultObjectCount = 100
|
||||
const defaultSyncPeriod = 10 * time.Second
|
||||
|
||||
const provisionerPluginName = "kubernetes.io/mock-provisioner"
|
||||
|
||||
func getObjectCount() int {
|
||||
objectCount := defaultObjectCount
|
||||
if s := os.Getenv("KUBE_INTEGRATION_PV_OBJECTS"); s != "" {
|
||||
|
@ -849,8 +852,20 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
|||
defer watchPVC.Stop()
|
||||
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (PersistenceVolumes).
|
||||
// non-namespaced objects (PersistenceVolumes and StorageClasses).
|
||||
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
|
||||
defer testClient.Extensions().StorageClasses().DeleteCollection(nil, api.ListOptions{})
|
||||
|
||||
storageClass := extensions.StorageClass{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "gold",
|
||||
},
|
||||
Provisioner: provisionerPluginName,
|
||||
}
|
||||
testClient.Extensions().StorageClasses().Create(&storageClass)
|
||||
|
||||
binder.Run()
|
||||
defer binder.Stop()
|
||||
|
@ -860,7 +875,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
|
|||
for i := 0; i < objCount; i++ {
|
||||
pvc := createPVC("pvc-provision-"+strconv.Itoa(i), ns.Name, "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
||||
pvc.Annotations = map[string]string{
|
||||
"volume.alpha.kubernetes.io/storage-class": "",
|
||||
"volume.beta.kubernetes.io/storage-class": "gold",
|
||||
}
|
||||
pvcs[i] = pvc
|
||||
}
|
||||
|
@ -1086,7 +1101,7 @@ func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeri
|
|||
|
||||
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */)
|
||||
plugin := &volumetest.FakeVolumePlugin{
|
||||
PluginName: "plugin-name",
|
||||
PluginName: provisionerPluginName,
|
||||
Host: host,
|
||||
Config: volume.VolumeConfig{},
|
||||
LastProvisionerOptions: volume.VolumeOptions{},
|
||||
|
@ -1101,7 +1116,7 @@ func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeri
|
|||
cloud := &fake_cloud.FakeCloud{}
|
||||
|
||||
syncPeriod = getSyncPeriod(syncPeriod)
|
||||
ctrl := persistentvolumecontroller.NewPersistentVolumeController(binderClient, syncPeriod, plugin, plugins, cloud, "", nil, nil, nil, true)
|
||||
ctrl := persistentvolumecontroller.NewPersistentVolumeController(binderClient, syncPeriod, plugins, cloud, "", nil, nil, nil, nil, true)
|
||||
|
||||
watchPV, err := testClient.PersistentVolumes().Watch(api.ListOptions{})
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in New Issue