mirror of https://github.com/k3s-io/k3s
Merge pull request #34611 from jsafrane/provision-pvc
Automatic merge from submit-queue Pass whole PVC to provisioner plugins Gluster provisioner is interested in namespace of PVCs that are being provisioned and I don't want to add at as a new field in `volume.VolumeOptions` - it would contain almost whole PVC. Let's rework `VolumeOptions` and pass direct reference to PVC there instead of some "interesting" fields and let the provisioner to pick information it is interested in. There was lot of refactoring in volume plugins to apply this change (too many plugins), however the logic is simple and it's all the same in all plugins. @rootfs @humblecpull/6/head
commit
4e393fadf3
|
@ -1148,15 +1148,17 @@ func (plugin *mockVolumePlugin) Provision() (*api.PersistentVolume, error) {
|
|||
}
|
||||
if call.ret == nil {
|
||||
// Create a fake PV with known GCE volume (to match expected volume)
|
||||
capacity := plugin.provisionOptions.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||
accessModes := plugin.provisionOptions.PVC.Spec.AccessModes
|
||||
pv = &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: plugin.provisionOptions.PVName,
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): plugin.provisionOptions.Capacity,
|
||||
api.ResourceName(api.ResourceStorage): capacity,
|
||||
},
|
||||
AccessModes: plugin.provisionOptions.AccessModes,
|
||||
AccessModes: accessModes,
|
||||
PersistentVolumeReclaimPolicy: plugin.provisionOptions.PersistentVolumeReclaimPolicy,
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
|
||||
|
|
|
@ -1260,15 +1260,12 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
|
|||
tags[cloudVolumeCreatedForVolumeNameTag] = pvName
|
||||
|
||||
options := vol.VolumeOptions{
|
||||
Capacity: claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)],
|
||||
AccessModes: claim.Spec.AccessModes,
|
||||
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
|
||||
CloudTags: &tags,
|
||||
ClusterName: ctrl.clusterName,
|
||||
PVName: pvName,
|
||||
PVCName: claim.Name,
|
||||
PVC: claim,
|
||||
Parameters: storageClass.Parameters,
|
||||
Selector: claim.Spec.Selector,
|
||||
}
|
||||
|
||||
// Provision the volume
|
||||
|
|
|
@ -160,9 +160,6 @@ func (plugin *awsElasticBlockStorePlugin) newDeleterInternal(spec *volume.Spec,
|
|||
}
|
||||
|
||||
func (plugin *awsElasticBlockStorePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
if len(options.AccessModes) == 0 {
|
||||
options.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
return plugin.newProvisionerInternal(options, &AWSDiskUtil{})
|
||||
}
|
||||
|
||||
|
@ -429,7 +426,7 @@ func (c *awsElasticBlockStoreProvisioner) Provision() (*api.PersistentVolume, er
|
|||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: c.options.AccessModes,
|
||||
AccessModes: c.options.PVC.Spec.AccessModes,
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
},
|
||||
|
@ -444,6 +441,10 @@ func (c *awsElasticBlockStoreProvisioner) Provision() (*api.PersistentVolume, er
|
|||
},
|
||||
}
|
||||
|
||||
if len(c.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = c.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
if len(labels) != 0 {
|
||||
if pv.Labels == nil {
|
||||
pv.Labels = make(map[string]string)
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
@ -180,12 +179,8 @@ func TestPlugin(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test Provisioner
|
||||
cap := resource.MustParse("100Mi")
|
||||
options := volume.VolumeOptions{
|
||||
Capacity: cap,
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
|
||||
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
provisioner, err := plug.(*awsElasticBlockStorePlugin).newProvisionerInternal(options, &fakePDManager{})
|
||||
|
@ -197,7 +192,7 @@ func TestPlugin(t *testing.T) {
|
|||
if persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID != "test-aws-volume-name" {
|
||||
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID)
|
||||
}
|
||||
cap = persistentSpec.Spec.Capacity[api.ResourceStorage]
|
||||
cap := persistentSpec.Spec.Capacity[api.ResourceStorage]
|
||||
size := cap.Value()
|
||||
if size != 100*1024*1024*1024 {
|
||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
@ -79,13 +80,14 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (strin
|
|||
}
|
||||
tags["Name"] = volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters
|
||||
|
||||
requestBytes := c.options.Capacity.Value()
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
// AWS works with gigabytes, convert to GiB with rounding up
|
||||
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
volumeOptions := &aws.VolumeOptions{
|
||||
CapacityGB: requestGB,
|
||||
Tags: tags,
|
||||
PVCName: c.options.PVCName,
|
||||
PVCName: c.options.PVC.Name,
|
||||
}
|
||||
// Apply Parameters (case-insensitive). We leave validation of
|
||||
// the values to the cloud provider.
|
||||
|
@ -112,8 +114,8 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (strin
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: implement c.options.ProvisionerSelector parsing
|
||||
if c.options.Selector != nil {
|
||||
// TODO: implement PVC.Selector parsing
|
||||
if c.options.PVC.Spec.Selector != nil {
|
||||
return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on AWS")
|
||||
}
|
||||
|
||||
|
|
|
@ -161,9 +161,6 @@ func (plugin *cinderPlugin) newDeleterInternal(spec *volume.Spec, manager cdMana
|
|||
}
|
||||
|
||||
func (plugin *cinderPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
if len(options.AccessModes) == 0 {
|
||||
options.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
return plugin.newProvisionerInternal(options, &CinderDiskUtil{})
|
||||
}
|
||||
|
||||
|
@ -474,7 +471,7 @@ func (c *cinderVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
|
|||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: c.options.AccessModes,
|
||||
AccessModes: c.options.PVC.Spec.AccessModes,
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
},
|
||||
|
@ -487,6 +484,10 @@ func (c *cinderVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
|
|||
},
|
||||
},
|
||||
}
|
||||
if len(c.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = c.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
|
@ -199,12 +198,8 @@ func TestPlugin(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test Provisioner
|
||||
cap := resource.MustParse("100Mi")
|
||||
options := volume.VolumeOptions{
|
||||
Capacity: cap,
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
|
||||
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
provisioner, err := plug.(*cinderPlugin).newProvisionerInternal(options, &fakePDManager{0})
|
||||
|
@ -216,7 +211,7 @@ func TestPlugin(t *testing.T) {
|
|||
if persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID != "test-volume-name" {
|
||||
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.Cinder.VolumeID)
|
||||
}
|
||||
cap = persistentSpec.Spec.Capacity[api.ResourceStorage]
|
||||
cap := persistentSpec.Spec.Capacity[api.ResourceStorage]
|
||||
size := cap.Value()
|
||||
if size != 1024*1024*1024 {
|
||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
@ -139,7 +140,8 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s
|
|||
return "", 0, err
|
||||
}
|
||||
|
||||
volSizeBytes := c.options.Capacity.Value()
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||
volSizeBytes := capacity.Value()
|
||||
// Cinder works with gigabytes, convert to GiB with rounding up
|
||||
volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
||||
name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
|
||||
|
@ -157,8 +159,8 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s
|
|||
return "", 0, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
|
||||
}
|
||||
}
|
||||
// TODO: implement c.options.ProvisionerSelector parsing
|
||||
if c.options.Selector != nil {
|
||||
// TODO: implement PVC.Selector parsing
|
||||
if c.options.PVC.Spec.Selector != nil {
|
||||
return "", 0, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Cinder")
|
||||
}
|
||||
|
||||
|
|
|
@ -452,9 +452,6 @@ func (plugin *flockerPlugin) newDeleterInternal(spec *volume.Spec, manager volum
|
|||
}
|
||||
|
||||
func (plugin *flockerPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
if len(options.AccessModes) == 0 {
|
||||
options.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
return plugin.newProvisionerInternal(options, &FlockerUtil{})
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/util/rand"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
|
@ -70,14 +71,15 @@ func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID
|
|||
node := nodes[rand.Intn(len(nodes))]
|
||||
glog.V(2).Infof("selected flocker node with UUID '%s' to provision dataset", node.UUID)
|
||||
|
||||
requestBytes := c.options.Capacity.Value()
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
volumeSizeGB = int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
|
||||
createOptions := &flockerApi.CreateDatasetOptions{
|
||||
MaximumSize: requestBytes,
|
||||
Metadata: map[string]string{
|
||||
"type": "k8s-dynamic-prov",
|
||||
"pvc": c.options.PVCName,
|
||||
"pvc": c.options.PVC.Name,
|
||||
},
|
||||
Primary: node.UUID,
|
||||
}
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
@ -31,11 +31,9 @@ func TestFlockerUtil_CreateVolume(t *testing.T) {
|
|||
assert := assert.New(t)
|
||||
|
||||
// test CreateVolume happy path
|
||||
pvc := volumetest.CreateTestPVC("3Gi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
||||
options := volume.VolumeOptions{
|
||||
Capacity: resource.MustParse("3Gi"),
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
PVC: pvc,
|
||||
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ func (c *flockerVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
|
|||
return nil, fmt.Errorf("Provisioning failed: Specified at least one unsupported parameter")
|
||||
}
|
||||
|
||||
if c.options.Selector != nil {
|
||||
if c.options.PVC.Spec.Selector != nil {
|
||||
return nil, fmt.Errorf("Provisioning failed: Specified unsupported selector")
|
||||
}
|
||||
|
||||
|
@ -77,7 +77,7 @@ func (c *flockerVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
|
|||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: c.options.AccessModes,
|
||||
AccessModes: c.options.PVC.Spec.AccessModes,
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
},
|
||||
|
@ -88,6 +88,9 @@ func (c *flockerVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
|
|||
},
|
||||
},
|
||||
}
|
||||
if len(c.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = c.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
if len(labels) != 0 {
|
||||
if pv.Labels == nil {
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
@ -48,12 +47,9 @@ func newTestableProvisioner(assert *assert.Assertions, options volume.VolumeOpti
|
|||
func TestProvision(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
cap := resource.MustParse("3Gi")
|
||||
pvc := volumetest.CreateTestPVC("3Gi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
||||
options := volume.VolumeOptions{
|
||||
Capacity: cap,
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
PVC: pvc,
|
||||
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
|
||||
|
@ -62,7 +58,7 @@ func TestProvision(t *testing.T) {
|
|||
persistentSpec, err := provisioner.Provision()
|
||||
assert.NoError(err, "Provision() failed: ", err)
|
||||
|
||||
cap = persistentSpec.Spec.Capacity[api.ResourceStorage]
|
||||
cap := persistentSpec.Spec.Capacity[api.ResourceStorage]
|
||||
|
||||
assert.Equal(int64(3*1024*1024*1024), cap.Value())
|
||||
|
||||
|
@ -78,10 +74,7 @@ func TestProvision(t *testing.T) {
|
|||
|
||||
// parameters are not supported
|
||||
options = volume.VolumeOptions{
|
||||
Capacity: cap,
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
PVC: pvc,
|
||||
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
|
||||
Parameters: map[string]string{
|
||||
"not-supported-params": "test123",
|
||||
|
@ -93,13 +86,10 @@ func TestProvision(t *testing.T) {
|
|||
assert.Error(err, "Provision() did not fail with Parameters specified")
|
||||
|
||||
// selectors are not supported
|
||||
pvc.Spec.Selector = &unversioned.LabelSelector{MatchLabels: map[string]string{"key": "value"}}
|
||||
options = volume.VolumeOptions{
|
||||
Capacity: cap,
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
PVC: pvc,
|
||||
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"key": "value"}},
|
||||
}
|
||||
|
||||
provisioner = newTestableProvisioner(assert, options)
|
||||
|
|
|
@ -166,9 +166,6 @@ func (plugin *gcePersistentDiskPlugin) newDeleterInternal(spec *volume.Spec, man
|
|||
}
|
||||
|
||||
func (plugin *gcePersistentDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
if len(options.AccessModes) == 0 {
|
||||
options.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
return plugin.newProvisionerInternal(options, &GCEDiskUtil{})
|
||||
}
|
||||
|
||||
|
@ -393,7 +390,7 @@ func (c *gcePersistentDiskProvisioner) Provision() (*api.PersistentVolume, error
|
|||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: c.options.AccessModes,
|
||||
AccessModes: c.options.PVC.Spec.AccessModes,
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
},
|
||||
|
@ -406,6 +403,9 @@ func (c *gcePersistentDiskProvisioner) Provision() (*api.PersistentVolume, error
|
|||
},
|
||||
},
|
||||
}
|
||||
if len(c.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = c.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
if len(labels) != 0 {
|
||||
if pv.Labels == nil {
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
|
@ -174,12 +173,8 @@ func TestPlugin(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test Provisioner
|
||||
cap := resource.MustParse("100Mi")
|
||||
options := volume.VolumeOptions{
|
||||
Capacity: cap,
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
|
||||
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
provisioner, err := plug.(*gcePersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{})
|
||||
|
@ -191,7 +186,7 @@ func TestPlugin(t *testing.T) {
|
|||
if persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName != "test-gce-volume-name" {
|
||||
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName)
|
||||
}
|
||||
cap = persistentSpec.Spec.Capacity[api.ResourceStorage]
|
||||
cap := persistentSpec.Spec.Capacity[api.ResourceStorage]
|
||||
size := cap.Value()
|
||||
if size != 100*1024*1024*1024 {
|
||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
|
@ -77,7 +78,8 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
|
|||
}
|
||||
|
||||
name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters
|
||||
requestBytes := c.options.Capacity.Value()
|
||||
capacity := c.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
// GCE works with gigabytes, convert to GiB with rounding up
|
||||
requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024)
|
||||
|
||||
|
@ -96,8 +98,8 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: implement c.options.ProvisionerSelector parsing
|
||||
if c.options.Selector != nil {
|
||||
// TODO: implement PVC.Selector parsing
|
||||
if c.options.PVC.Spec.Selector != nil {
|
||||
return "", 0, nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on GCE")
|
||||
}
|
||||
|
||||
|
@ -109,7 +111,7 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
|
|||
glog.V(2).Infof("error getting zone information from GCE: %v", err)
|
||||
return "", 0, nil, err
|
||||
}
|
||||
zone = volume.ChooseZoneForVolume(zones, c.options.PVCName)
|
||||
zone = volume.ChooseZoneForVolume(zones, c.options.PVC.Name)
|
||||
}
|
||||
|
||||
err = cloud.CreateDisk(name, diskType, zone, int64(requestGB), *c.options.CloudTags)
|
||||
|
|
|
@ -336,9 +336,6 @@ func getVolumeSource(
|
|||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
if len(options.AccessModes) == 0 {
|
||||
options.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
return plugin.newProvisionerInternal(options)
|
||||
}
|
||||
|
||||
|
@ -441,7 +438,7 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
|||
|
||||
func (r *glusterfsVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
|
||||
var err error
|
||||
if r.options.Selector != nil {
|
||||
if r.options.PVC.Spec.Selector != nil {
|
||||
glog.V(4).Infof("glusterfs: not able to parse your claim Selector")
|
||||
return nil, fmt.Errorf("glusterfs: not able to parse your claim Selector")
|
||||
}
|
||||
|
@ -507,7 +504,10 @@ func (r *glusterfsVolumeProvisioner) Provision() (*api.PersistentVolume, error)
|
|||
pv := new(api.PersistentVolume)
|
||||
pv.Spec.PersistentVolumeSource.Glusterfs = glusterfs
|
||||
pv.Spec.PersistentVolumeReclaimPolicy = r.options.PersistentVolumeReclaimPolicy
|
||||
pv.Spec.AccessModes = r.options.AccessModes
|
||||
pv.Spec.AccessModes = r.options.PVC.Spec.AccessModes
|
||||
if len(pv.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = r.plugin.GetAccessModes()
|
||||
}
|
||||
pv.Spec.Capacity = api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
}
|
||||
|
@ -516,7 +516,8 @@ func (r *glusterfsVolumeProvisioner) Provision() (*api.PersistentVolume, error)
|
|||
}
|
||||
|
||||
func (p *glusterfsVolumeProvisioner) CreateVolume() (r *api.GlusterfsVolumeSource, size int, err error) {
|
||||
volSizeBytes := p.options.Capacity.Value()
|
||||
capacity := p.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||
volSizeBytes := capacity.Value()
|
||||
sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
|
||||
glog.V(2).Infof("glusterfs: create volume of size: %d bytes and configuration %+v", volSizeBytes, p.provisioningConfig)
|
||||
if p.url == "" {
|
||||
|
|
|
@ -48,7 +48,7 @@ type hostPathPlugin struct {
|
|||
// decouple creating Recyclers/Deleters/Provisioners by deferring to a function. Allows for easier testing.
|
||||
newRecyclerFunc func(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder, host volume.VolumeHost, volumeConfig volume.VolumeConfig) (volume.Recycler, error)
|
||||
newDeleterFunc func(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error)
|
||||
newProvisionerFunc func(options volume.VolumeOptions, host volume.VolumeHost) (volume.Provisioner, error)
|
||||
newProvisionerFunc func(options volume.VolumeOptions, host volume.VolumeHost, plugin *hostPathPlugin) (volume.Provisioner, error)
|
||||
config volume.VolumeConfig
|
||||
}
|
||||
|
||||
|
@ -124,10 +124,7 @@ func (plugin *hostPathPlugin) NewProvisioner(options volume.VolumeOptions) (volu
|
|||
if !plugin.config.ProvisioningEnabled {
|
||||
return nil, fmt.Errorf("Provisioning in volume plugin %q is disabled", plugin.GetPluginName())
|
||||
}
|
||||
if len(options.AccessModes) == 0 {
|
||||
options.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
return plugin.newProvisionerFunc(options, plugin.host)
|
||||
return plugin.newProvisionerFunc(options, plugin.host, plugin)
|
||||
}
|
||||
|
||||
func (plugin *hostPathPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
|
@ -166,8 +163,8 @@ func newDeleter(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, erro
|
|||
return &hostPathDeleter{name: spec.Name(), path: path, host: host}, nil
|
||||
}
|
||||
|
||||
func newProvisioner(options volume.VolumeOptions, host volume.VolumeHost) (volume.Provisioner, error) {
|
||||
return &hostPathProvisioner{options: options, host: host}, nil
|
||||
func newProvisioner(options volume.VolumeOptions, host volume.VolumeHost, plugin *hostPathPlugin) (volume.Provisioner, error) {
|
||||
return &hostPathProvisioner{options: options, host: host, plugin: plugin}, nil
|
||||
}
|
||||
|
||||
// HostPath volumes represent a bare host file or directory mount.
|
||||
|
@ -263,6 +260,7 @@ func (r *hostPathRecycler) Recycle() error {
|
|||
type hostPathProvisioner struct {
|
||||
host volume.VolumeHost
|
||||
options volume.VolumeOptions
|
||||
plugin *hostPathPlugin
|
||||
}
|
||||
|
||||
// Create for hostPath simply creates a local /tmp/hostpath_pv/%s directory as a new PersistentVolume.
|
||||
|
@ -270,6 +268,7 @@ type hostPathProvisioner struct {
|
|||
func (r *hostPathProvisioner) Provision() (*api.PersistentVolume, error) {
|
||||
fullpath := fmt.Sprintf("/tmp/hostpath_pv/%s", uuid.NewUUID())
|
||||
|
||||
capacity := r.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||
pv := &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: r.options.PVName,
|
||||
|
@ -279,9 +278,9 @@ func (r *hostPathProvisioner) Provision() (*api.PersistentVolume, error) {
|
|||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: r.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: r.options.AccessModes,
|
||||
AccessModes: r.options.PVC.Spec.AccessModes,
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): r.options.Capacity,
|
||||
api.ResourceName(api.ResourceStorage): capacity,
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{
|
||||
|
@ -290,6 +289,9 @@ func (r *hostPathProvisioner) Provision() (*api.PersistentVolume, error) {
|
|||
},
|
||||
},
|
||||
}
|
||||
if len(r.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = r.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
return pv, os.MkdirAll(pv.Spec.HostPath.Path, 0750)
|
||||
}
|
||||
|
|
|
@ -161,7 +161,11 @@ func TestProvisioner(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
creater, err := plug.NewProvisioner(volume.VolumeOptions{Capacity: resource.MustParse("1Gi"), PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete})
|
||||
options := volume.VolumeOptions{
|
||||
PVC: volumetest.CreateTestPVC("1Gi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
|
||||
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
creater, err := plug.NewProvisioner(options)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Provisioner: %v", err)
|
||||
}
|
||||
|
|
|
@ -24,8 +24,6 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
|
@ -41,25 +39,22 @@ type VolumeOptions struct {
|
|||
// TODO: refactor all of this out of volumes when an admin can configure
|
||||
// many kinds of provisioners.
|
||||
|
||||
// Capacity is the size of a volume.
|
||||
Capacity resource.Quantity
|
||||
// AccessModes of a volume
|
||||
AccessModes []api.PersistentVolumeAccessMode
|
||||
// Reclamation policy for a persistent volume
|
||||
PersistentVolumeReclaimPolicy api.PersistentVolumeReclaimPolicy
|
||||
// PV.Name of the appropriate PersistentVolume. Used to generate cloud
|
||||
// volume name.
|
||||
PVName string
|
||||
// PVC.Name of the PersistentVolumeClaim; only set during dynamic provisioning.
|
||||
PVCName string
|
||||
// PVC is reference to the claim that lead to provisioning of a new PV.
|
||||
// Provisioners *must* create a PV that would be matched by this PVC,
|
||||
// i.e. with required capacity, accessMode, labels matching PVC.Selector and
|
||||
// so on.
|
||||
PVC *api.PersistentVolumeClaim
|
||||
// Unique name of Kubernetes cluster.
|
||||
ClusterName string
|
||||
// Tags to attach to the real volume in the cloud provider - e.g. AWS EBS
|
||||
CloudTags *map[string]string
|
||||
// Volume provisioning parameters from StorageClass
|
||||
Parameters map[string]string
|
||||
// Volume selector from PersistentVolumeClaim
|
||||
Selector *unversioned.LabelSelector
|
||||
}
|
||||
|
||||
// VolumePlugin is an interface to volume plugins that can be used on a
|
||||
|
|
|
@ -320,10 +320,6 @@ func (plugin *quobytePlugin) newDeleterInternal(spec *volume.Spec) (volume.Delet
|
|||
}
|
||||
|
||||
func (plugin *quobytePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
if len(options.AccessModes) == 0 {
|
||||
options.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
return plugin.newProvisionerInternal(options)
|
||||
}
|
||||
|
||||
|
@ -344,7 +340,7 @@ type quobyteVolumeProvisioner struct {
|
|||
}
|
||||
|
||||
func (provisioner *quobyteVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
|
||||
if provisioner.options.Selector != nil {
|
||||
if provisioner.options.PVC.Spec.Selector != nil {
|
||||
return nil, fmt.Errorf("claim Selector is not supported")
|
||||
}
|
||||
var apiServer, adminSecretName, quobyteUser, quobytePassword string
|
||||
|
@ -416,7 +412,10 @@ func (provisioner *quobyteVolumeProvisioner) Provision() (*api.PersistentVolume,
|
|||
pv := new(api.PersistentVolume)
|
||||
pv.Spec.PersistentVolumeSource.Quobyte = vol
|
||||
pv.Spec.PersistentVolumeReclaimPolicy = provisioner.options.PersistentVolumeReclaimPolicy
|
||||
pv.Spec.AccessModes = provisioner.options.AccessModes
|
||||
pv.Spec.AccessModes = provisioner.options.PVC.Spec.AccessModes
|
||||
if len(pv.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = provisioner.plugin.GetAccessModes()
|
||||
}
|
||||
pv.Spec.Capacity = api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
}
|
||||
|
|
|
@ -33,7 +33,8 @@ type quobyteVolumeManager struct {
|
|||
}
|
||||
|
||||
func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProvisioner) (quobyte *api.QuobyteVolumeSource, size int, err error) {
|
||||
volumeSize := int(volume.RoundUpSize(provisioner.options.Capacity.Value(), 1024*1024*1024))
|
||||
capacity := provisioner.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||
volumeSize := int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024))
|
||||
// Quobyte has the concept of Volumes which doen't have a specific size (they can grow unlimited)
|
||||
// to simulate a size constraint we could set here a Quota
|
||||
volumeRequest := &quobyte_api.CreateVolumeRequest{
|
||||
|
|
|
@ -213,9 +213,6 @@ func (plugin *rbdPlugin) newDeleterInternal(spec *volume.Spec, admin, secret str
|
|||
}
|
||||
|
||||
func (plugin *rbdPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
if len(options.AccessModes) == 0 {
|
||||
options.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
return plugin.newProvisionerInternal(options, &RBDUtil{})
|
||||
}
|
||||
|
||||
|
@ -237,7 +234,7 @@ type rbdVolumeProvisioner struct {
|
|||
}
|
||||
|
||||
func (r *rbdVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
|
||||
if r.options.Selector != nil {
|
||||
if r.options.PVC.Spec.Selector != nil {
|
||||
return nil, fmt.Errorf("claim Selector is not supported")
|
||||
}
|
||||
var err error
|
||||
|
@ -309,7 +306,10 @@ func (r *rbdVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
|
|||
rbd.RadosUser = r.Id
|
||||
pv.Spec.PersistentVolumeSource.RBD = rbd
|
||||
pv.Spec.PersistentVolumeReclaimPolicy = r.options.PersistentVolumeReclaimPolicy
|
||||
pv.Spec.AccessModes = r.options.AccessModes
|
||||
pv.Spec.AccessModes = r.options.PVC.Spec.AccessModes
|
||||
if len(pv.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = r.plugin.GetAccessModes()
|
||||
}
|
||||
pv.Spec.Capacity = api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", sizeMB)),
|
||||
}
|
||||
|
|
|
@ -314,7 +314,8 @@ func (util *RBDUtil) DetachDisk(c rbdUnmounter, mntPath string) error {
|
|||
}
|
||||
|
||||
func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *api.RBDVolumeSource, size int, err error) {
|
||||
volSizeBytes := p.options.Capacity.Value()
|
||||
capacity := p.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||
volSizeBytes := capacity.Value()
|
||||
// convert to MB that rbd defaults on
|
||||
sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024))
|
||||
volSz := fmt.Sprintf("%d", sz)
|
||||
|
|
|
@ -497,9 +497,9 @@ func (fc *FakeProvisioner) Provision() (*api.PersistentVolume, error) {
|
|||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: fc.Options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: fc.Options.AccessModes,
|
||||
AccessModes: fc.Options.PVC.Spec.AccessModes,
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): fc.Options.Capacity,
|
||||
api.ResourceName(api.ResourceStorage): fc.Options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)],
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{
|
||||
|
@ -746,3 +746,22 @@ func GetTestVolumePluginMgr(
|
|||
|
||||
return &v.pluginMgr, plugins[0].(*FakeVolumePlugin)
|
||||
}
|
||||
|
||||
// CreateTestPVC returns a provisionable PVC for tests
|
||||
func CreateTestPVC(capacity string, accessModes []api.PersistentVolumeAccessMode) *api.PersistentVolumeClaim {
|
||||
claim := api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "dummy",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: accessModes,
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse(capacity),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return &claim
|
||||
}
|
||||
|
|
|
@ -321,9 +321,6 @@ type vsphereVolumeProvisioner struct {
|
|||
var _ volume.Provisioner = &vsphereVolumeProvisioner{}
|
||||
|
||||
func (plugin *vsphereVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
if len(options.AccessModes) == 0 {
|
||||
options.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
return plugin.newProvisionerInternal(options, &VsphereDiskUtil{})
|
||||
}
|
||||
|
||||
|
@ -353,7 +350,7 @@ func (v *vsphereVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
|
|||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: v.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: v.options.AccessModes,
|
||||
AccessModes: v.options.PVC.Spec.AccessModes,
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", sizeKB)),
|
||||
},
|
||||
|
@ -365,6 +362,10 @@ func (v *vsphereVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
|
|||
},
|
||||
},
|
||||
}
|
||||
if len(v.options.PVC.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = v.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||
|
@ -142,12 +141,8 @@ func TestPlugin(t *testing.T) {
|
|||
}
|
||||
|
||||
// Test Provisioner
|
||||
cap := resource.MustParse("100Mi")
|
||||
options := volume.VolumeOptions{
|
||||
Capacity: cap,
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
|
||||
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
provisioner, err := plug.(*vsphereVolumePlugin).newProvisionerInternal(options, &fakePDManager{})
|
||||
|
@ -160,7 +155,7 @@ func TestPlugin(t *testing.T) {
|
|||
t.Errorf("Provision() returned unexpected path %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath)
|
||||
}
|
||||
|
||||
cap = persistentSpec.Spec.Capacity[api.ResourceStorage]
|
||||
cap := persistentSpec.Spec.Capacity[api.ResourceStorage]
|
||||
size := cap.Value()
|
||||
if size != 100*1024 {
|
||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
@ -57,7 +58,8 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPa
|
|||
return "", 0, err
|
||||
}
|
||||
|
||||
volSizeBytes := v.options.Capacity.Value()
|
||||
capacity := v.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||
volSizeBytes := capacity.Value()
|
||||
// vSphere works with kilobytes, convert to KiB with rounding up
|
||||
volSizeKB := int(volume.RoundUpSize(volSizeBytes, 1024))
|
||||
name := volume.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255)
|
||||
|
@ -78,8 +80,8 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPa
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: implement v.options.ProvisionerSelector parsing
|
||||
if v.options.Selector != nil {
|
||||
// TODO: implement PVC.Selector parsing
|
||||
if v.options.PVC.Spec.Selector != nil {
|
||||
return "", 0, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on vSphere")
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue