mirror of https://github.com/k3s-io/k3s
Merge pull request #63193 from lichuqiang/provision_0425
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Volume topology aware dynamic provisioning: work based on new API **What this PR does / why we need it**: The PR has been split to 3 parts: Part1: https://github.com/kubernetes/kubernetes/pull/63232 for basic scheduler and PV controller plumbing Part2: https://github.com/kubernetes/kubernetes/pull/63233 for API change and the PR itself includes work based on the API change: - Dynamic provisioning allowed topologies scheduler work - Update provisioning interface to be aware of selected node and topology **Which issue(s) this PR fixes** Feature: https://github.com/kubernetes/features/issues/561 Design: https://github.com/kubernetes/community/issues/2168 **Special notes for your reviewer**: /sig storage /sig scheduling /assign @msau42 @jsafrane @saad-ali @bsalamat @kubernetes/sig-storage-pr-reviews @kubernetes/sig-scheduling-pr-reviews **Release note**: ```release-note Volume topology aware dynamic provisioning ```pull/8/head
commit
c615098a96
|
@ -175,6 +175,7 @@ func startPersistentVolumeBinderController(ctx ControllerContext) (bool, error)
|
|||
ClaimInformer: ctx.InformerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: ctx.InformerFactory.Storage().V1().StorageClasses(),
|
||||
PodInformer: ctx.InformerFactory.Core().V1().Pods(),
|
||||
NodeInformer: ctx.InformerFactory.Core().V1().Nodes(),
|
||||
EnableDynamicProvisioning: ctx.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration.EnableDynamicProvisioning,
|
||||
}
|
||||
volumeController, volumeControllerErr := persistentvolumecontroller.NewController(params)
|
||||
|
|
|
@ -612,6 +612,7 @@ func newTestController(kubeClient clientset.Interface, informerFactory informers
|
|||
ClaimInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||
PodInformer: informerFactory.Core().V1().Pods(),
|
||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||
EventRecorder: record.NewFakeRecorder(1000),
|
||||
EnableDynamicProvisioning: enableDynamicProvisioning,
|
||||
}
|
||||
|
@ -1192,7 +1193,7 @@ func (plugin *mockVolumePlugin) NewProvisioner(options vol.VolumeOptions) (vol.P
|
|||
}
|
||||
}
|
||||
|
||||
func (plugin *mockVolumePlugin) Provision() (*v1.PersistentVolume, error) {
|
||||
func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if len(plugin.provisionCalls) <= plugin.provisionCallCounter {
|
||||
return nil, fmt.Errorf("Mock plugin error: unexpected provisioner call %d", plugin.provisionCallCounter)
|
||||
}
|
||||
|
|
|
@ -174,6 +174,8 @@ type PersistentVolumeController struct {
|
|||
classListerSynced cache.InformerSynced
|
||||
podLister corelisters.PodLister
|
||||
podListerSynced cache.InformerSynced
|
||||
NodeLister corelisters.NodeLister
|
||||
NodeListerSynced cache.InformerSynced
|
||||
|
||||
kubeClient clientset.Interface
|
||||
eventRecorder record.EventRecorder
|
||||
|
@ -1434,13 +1436,26 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
|
|||
return
|
||||
}
|
||||
|
||||
var selectedNode *v1.Node = nil
|
||||
var allowedTopologies []v1.TopologySelectorTerm = nil
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
if nodeName, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
selectedNode, err = ctrl.NodeLister.Get(nodeName)
|
||||
if err != nil {
|
||||
strerr := fmt.Sprintf("Failed to get target node: %v", err)
|
||||
glog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
return
|
||||
}
|
||||
}
|
||||
allowedTopologies = storageClass.AllowedTopologies
|
||||
}
|
||||
|
||||
opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_provision")
|
||||
// TODO: modify the Provision() interface to pass in the allowed topology information
|
||||
// of the provisioned volume.
|
||||
volume, err = provisioner.Provision()
|
||||
volume, err = provisioner.Provision(selectedNode, allowedTopologies)
|
||||
opComplete(&err)
|
||||
if err != nil {
|
||||
// Other places of failure has nothing to do with DynamicProvisioningScheduling,
|
||||
// Other places of failure have nothing to do with DynamicProvisioningScheduling,
|
||||
// so just let controller retry in the next sync. We'll only call func
|
||||
// rescheduleProvisioning here when the underlying provisioning actually failed.
|
||||
ctrl.rescheduleProvisioning(claim)
|
||||
|
|
|
@ -63,6 +63,7 @@ type ControllerParameters struct {
|
|||
ClaimInformer coreinformers.PersistentVolumeClaimInformer
|
||||
ClassInformer storageinformers.StorageClassInformer
|
||||
PodInformer coreinformers.PodInformer
|
||||
NodeInformer coreinformers.NodeInformer
|
||||
EventRecorder record.EventRecorder
|
||||
EnableDynamicProvisioning bool
|
||||
}
|
||||
|
@ -122,6 +123,8 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
|
|||
controller.classListerSynced = p.ClassInformer.Informer().HasSynced
|
||||
controller.podLister = p.PodInformer.Lister()
|
||||
controller.podListerSynced = p.PodInformer.Informer().HasSynced
|
||||
controller.NodeLister = p.NodeInformer.Lister()
|
||||
controller.NodeListerSynced = p.NodeInformer.Informer().HasSynced
|
||||
return controller, nil
|
||||
}
|
||||
|
||||
|
@ -268,7 +271,7 @@ func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) {
|
|||
glog.Infof("Starting persistent volume controller")
|
||||
defer glog.Infof("Shutting down persistent volume controller")
|
||||
|
||||
if !controller.WaitForCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced) {
|
||||
if !controller.WaitForCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||
|
@ -481,7 +482,11 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// TODO: Check if the node can satisfy the topology requirement in the class
|
||||
// Check if the node can satisfy the topology requirement in the class
|
||||
if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) {
|
||||
glog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, getPVCName(claim))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// TODO: Check if capacity of the node domain in the storage class
|
||||
// can satisfy resource requirement of given claim
|
||||
|
|
|
@ -50,6 +50,7 @@ var (
|
|||
provisionedPVC2 = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "1", &waitClass)
|
||||
provisionedPVCHigherVersion = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "2", &waitClass)
|
||||
noProvisionerPVC = makeTestPVC("no-provisioner-pvc", "1Gi", pvcUnbound, "", "1", &provisionNotSupportClass)
|
||||
topoMismatchPVC = makeTestPVC("topo-mismatch-pvc", "1Gi", pvcUnbound, "", "1", &topoMismatchClass)
|
||||
|
||||
pvNoNode = makeTestPV("pv-no-node", "", "1G", "1", nil, waitClass)
|
||||
pvNode1a = makeTestPV("pv-node1a", "node1", "5G", "1", nil, waitClass)
|
||||
|
@ -74,6 +75,7 @@ var (
|
|||
waitClass = "waitClass"
|
||||
immediateClass = "immediateClass"
|
||||
provisionNotSupportClass = "provisionNotSupportedClass"
|
||||
topoMismatchClass = "topoMismatchClass"
|
||||
|
||||
nodeLabelKey = "nodeKey"
|
||||
nodeLabelValue = "node1"
|
||||
|
@ -112,6 +114,16 @@ func newTestBinder(t *testing.T) *testEnv {
|
|||
},
|
||||
VolumeBindingMode: &waitMode,
|
||||
Provisioner: "test-provisioner",
|
||||
AllowedTopologies: []v1.TopologySelectorTerm{
|
||||
{
|
||||
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Values: []string{nodeLabelValue, "reference-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -126,6 +138,23 @@ func newTestBinder(t *testing.T) *testEnv {
|
|||
VolumeBindingMode: &waitMode,
|
||||
Provisioner: "kubernetes.io/no-provisioner",
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: topoMismatchClass,
|
||||
},
|
||||
VolumeBindingMode: &waitMode,
|
||||
Provisioner: "test-provisioner",
|
||||
AllowedTopologies: []v1.TopologySelectorTerm{
|
||||
{
|
||||
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Values: []string{"reference-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, class := range classes {
|
||||
if err := classInformer.Informer().GetIndexer().Add(class); err != nil {
|
||||
|
@ -740,6 +769,11 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
|
|||
expectedUnbound: false,
|
||||
expectedBound: true,
|
||||
},
|
||||
"volume-topology-unsatisfied": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{topoMismatchPVC},
|
||||
expectedUnbound: false,
|
||||
expectedBound: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Set VolumeScheduling and DynamicProvisioningScheduling feature gate
|
||||
|
|
|
@ -499,7 +499,7 @@ type awsElasticBlockStoreProvisioner struct {
|
|||
|
||||
var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{}
|
||||
|
||||
func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
|
|
@ -173,7 +173,7 @@ func TestPlugin(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("Error creating new provisioner:%v", err)
|
||||
}
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ func (d *azureDiskDeleter) Delete() error {
|
|||
return diskController.DeleteBlobDisk(volumeSource.DataDiskURI)
|
||||
}
|
||||
|
||||
func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ type azureFileProvisioner struct {
|
|||
|
||||
var _ volume.Provisioner = &azureFileProvisioner{}
|
||||
|
||||
func (a *azureFileProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", a.options.PVC.Spec.AccessModes, a.plugin.GetAccessModes())
|
||||
}
|
||||
|
|
|
@ -500,7 +500,7 @@ type cinderVolumeProvisioner struct {
|
|||
|
||||
var _ volume.Provisioner = &cinderVolumeProvisioner{}
|
||||
|
||||
func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
|
|
@ -196,7 +196,7 @@ func TestPlugin(t *testing.T) {
|
|||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0})
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ type flockerVolumeProvisioner struct {
|
|||
|
||||
var _ volume.Provisioner = &flockerVolumeProvisioner{}
|
||||
|
||||
func (c *flockerVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (c *flockerVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ func TestProvision(t *testing.T) {
|
|||
dir, provisioner := newTestableProvisioner(assert, options)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
assert.NoError(err, "Provision() failed: ", err)
|
||||
|
||||
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
||||
|
@ -85,7 +85,7 @@ func TestProvision(t *testing.T) {
|
|||
|
||||
dir, provisioner = newTestableProvisioner(assert, options)
|
||||
defer os.RemoveAll(dir)
|
||||
persistentSpec, err = provisioner.Provision()
|
||||
persistentSpec, err = provisioner.Provision(nil, nil)
|
||||
assert.Error(err, "Provision() did not fail with Parameters specified")
|
||||
|
||||
// selectors are not supported
|
||||
|
@ -97,6 +97,6 @@ func TestProvision(t *testing.T) {
|
|||
|
||||
dir, provisioner = newTestableProvisioner(assert, options)
|
||||
defer os.RemoveAll(dir)
|
||||
persistentSpec, err = provisioner.Provision()
|
||||
persistentSpec, err = provisioner.Provision(nil, nil)
|
||||
assert.Error(err, "Provision() did not fail with Selector specified")
|
||||
}
|
||||
|
|
|
@ -425,7 +425,7 @@ type gcePersistentDiskProvisioner struct {
|
|||
|
||||
var _ volume.Provisioner = &gcePersistentDiskProvisioner{}
|
||||
|
||||
func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (c *gcePersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
|
|
@ -167,7 +167,7 @@ func TestPlugin(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("Error creating new provisioner:%v", err)
|
||||
}
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
|
|
@ -664,7 +664,7 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !volutil.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||
}
|
||||
|
|
|
@ -265,7 +265,7 @@ type hostPathProvisioner struct {
|
|||
|
||||
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
|
||||
// This Provisioner is meant for development and testing only and WILL NOT WORK in a multi-node cluster.
|
||||
func (r *hostPathProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (r *hostPathProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if util.CheckPersistentVolumeClaimModeBlock(r.options.PVC) {
|
||||
return nil, fmt.Errorf("%s does not support block volume provisioning", r.plugin.GetPluginName())
|
||||
}
|
||||
|
|
|
@ -178,7 +178,7 @@ func TestProvisioner(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("Failed to make a new Provisioner: %v", err)
|
||||
}
|
||||
pv, err := creater.Provision()
|
||||
pv, err := creater.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error creating volume: %v", err)
|
||||
}
|
||||
|
|
|
@ -340,7 +340,7 @@ func (plugin *photonPersistentDiskPlugin) newProvisionerInternal(options volume.
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (p *photonPersistentDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||
}
|
||||
|
|
|
@ -166,7 +166,7 @@ func TestPlugin(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Error creating new provisioner:%v", err)
|
||||
}
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
|
|
@ -378,7 +378,7 @@ type portworxVolumeProvisioner struct {
|
|||
|
||||
var _ volume.Provisioner = &portworxVolumeProvisioner{}
|
||||
|
||||
func (c *portworxVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (c *portworxVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
|
|
@ -204,7 +204,7 @@ func TestPlugin(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("Error creating a new provisioner:%v", err)
|
||||
}
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
|
|
@ -354,7 +354,7 @@ type quobyteVolumeProvisioner struct {
|
|||
options volume.VolumeOptions
|
||||
}
|
||||
|
||||
func (provisioner *quobyteVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (provisioner *quobyteVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(provisioner.plugin.GetAccessModes(), provisioner.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", provisioner.options.PVC.Spec.AccessModes, provisioner.plugin.GetAccessModes())
|
||||
}
|
||||
|
|
|
@ -579,7 +579,7 @@ type rbdVolumeProvisioner struct {
|
|||
|
||||
var _ volume.Provisioner = &rbdVolumeProvisioner{}
|
||||
|
||||
func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (r *rbdVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !volutil.AccessModesContainedInAll(r.plugin.GetAccessModes(), r.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", r.options.PVC.Spec.AccessModes, r.plugin.GetAccessModes())
|
||||
}
|
||||
|
|
|
@ -252,7 +252,7 @@ func (v *sioVolume) Delete() error {
|
|||
// ************************
|
||||
var _ volume.Provisioner = &sioVolume{}
|
||||
|
||||
func (v *sioVolume) Provision() (*api.PersistentVolume, error) {
|
||||
func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.TopologySelectorTerm) (*api.PersistentVolume, error) {
|
||||
glog.V(4).Info(log("attempting to dynamically provision pvc %v", v.options.PVC.Name))
|
||||
|
||||
if !util.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) {
|
||||
|
|
|
@ -296,7 +296,7 @@ func TestVolumeProvisioner(t *testing.T) {
|
|||
}
|
||||
sioVol.sioMgr.client = sio
|
||||
|
||||
spec, err := provisioner.Provision()
|
||||
spec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("call to Provision() failed: %v", err)
|
||||
}
|
||||
|
@ -467,7 +467,7 @@ func TestVolumeProvisionerWithZeroCapacity(t *testing.T) {
|
|||
}
|
||||
sioVol.sioMgr.client = sio
|
||||
|
||||
_, err = provisioner.Provision()
|
||||
_, err = provisioner.Provision(nil, nil)
|
||||
if err == nil {
|
||||
t.Fatalf("call to Provision() should fail with invalid capacity")
|
||||
}
|
||||
|
@ -516,7 +516,7 @@ func TestVolumeProvisionerWithSecretNamespace(t *testing.T) {
|
|||
}
|
||||
sioVol.sioMgr.client = sio
|
||||
|
||||
spec, err := sioVol.Provision()
|
||||
spec, err := sioVol.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("call to Provision() failed: %v", err)
|
||||
}
|
||||
|
|
|
@ -560,7 +560,7 @@ type storageosProvisioner struct {
|
|||
|
||||
var _ volume.Provisioner = &storageosProvisioner{}
|
||||
|
||||
func (c *storageosProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (c *storageosProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
|
|
@ -269,7 +269,7 @@ func TestPlugin(t *testing.T) {
|
|||
t.Errorf("newProvisionerInternal() failed: %v", err)
|
||||
}
|
||||
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Provision() failed: %v", err)
|
||||
}
|
||||
|
|
|
@ -756,7 +756,7 @@ type FakeProvisioner struct {
|
|||
Host VolumeHost
|
||||
}
|
||||
|
||||
func (fc *FakeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (fc *FakeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID())
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
|
|
|
@ -183,7 +183,7 @@ type Provisioner interface {
|
|||
// Provision creates the resource by allocating the underlying volume in a
|
||||
// storage system. This method should block until completion and returns
|
||||
// PersistentVolume representing the created storage resource.
|
||||
Provision() (*v1.PersistentVolume, error)
|
||||
Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error)
|
||||
}
|
||||
|
||||
// Deleter removes the resource from the underlying storage provider. Calls
|
||||
|
|
|
@ -348,7 +348,7 @@ func (plugin *vsphereVolumePlugin) newProvisionerInternal(options volume.VolumeO
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
func (v *vsphereVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if !util.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes())
|
||||
}
|
||||
|
|
|
@ -156,7 +156,7 @@ func TestPlugin(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("newProvisionerInternal() failed: %v", err)
|
||||
}
|
||||
persistentSpec, err := provisioner.Provision()
|
||||
persistentSpec, err := provisioner.Provision(nil, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Provision() failed: %v", err)
|
||||
}
|
||||
|
|
|
@ -384,6 +384,7 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int) *testConfig {
|
|||
ClaimInformer: informers.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informers.Storage().V1().StorageClasses(),
|
||||
PodInformer: informers.Core().V1().Pods(),
|
||||
NodeInformer: informers.Core().V1().Nodes(),
|
||||
EnableDynamicProvisioning: true,
|
||||
}
|
||||
ctrl, err := persistentvolume.NewController(params)
|
||||
|
|
|
@ -1136,6 +1136,7 @@ func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPerio
|
|||
ClaimInformer: informers.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informers.Storage().V1().StorageClasses(),
|
||||
PodInformer: informers.Core().V1().Pods(),
|
||||
NodeInformer: informers.Core().V1().Nodes(),
|
||||
EnableDynamicProvisioning: true,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in New Issue