Merge pull request #32745 from abrarshivani/vsphere_storage_class

Automatic merge from submit-queue

Support for storage class for vSphere volume plugin. Custom disk format for dynamic provisioning.

This PR does following,

1.  Add support for storage class for vSphere volume plugin.
2. Add option for user to provision disk with different disk formats. Format choices are
    "thin" (default), "zeroedthick", "eagerzeroedthick".

Sample storageclass (yaml):
```
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
  name: slow
provisioner: kubernetes.io/vsphere-volume
parameters:
  diskformat: thin
```
pull/6/head
Kubernetes Submit Queue 2016-09-19 01:51:15 -07:00 committed by GitHub
commit 3d82251e92
5 changed files with 100 additions and 20 deletions

View File

@ -83,6 +83,21 @@ parameters:
* `type`: `pd-standard` or `pd-ssd`. Default: `pd-ssd`
* `zone`: GCE zone. If not specified, a random zone in the same region as controller-manager will be chosen.
#### vSphere
```yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
name: slow
provisioner: kubernetes.io/vsphere-volume
parameters:
diskformat: thin
```
* `diskformat`: `thin`, `zeroedthick` and `eagerzeroedthick`. See vSphere docs for details. Default: `"thin"`.
#### GLUSTERFS
```yaml

View File

@ -56,6 +56,9 @@ const (
SCSIDeviceSlots = 16
SCSIReservedSlot = 7
ThinDiskType = "thin"
PreallocatedDiskType = "preallocated"
EagerZeroedThickDiskType = "eagerZeroedThick"
ZeroedThickDiskType = "zeroedThick"
VolDir = "kubevols"
)
@ -66,6 +69,17 @@ const (
// TODO: Add support for lsilogic driver type
var supportedSCSIControllerType = []string{strings.ToLower(LSILogicSASControllerType), PVSCSIControllerType}
// Maps user options to API parameters.
// Keeping user options consistent with docker volume plugin for vSphere.
// API: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.wssdk.apiref.doc/vim.VirtualDiskManager.VirtualDiskType.html
var diskFormatValidType = map[string]string{
ThinDiskType: ThinDiskType,
strings.ToLower(EagerZeroedThickDiskType): EagerZeroedThickDiskType,
strings.ToLower(ZeroedThickDiskType): PreallocatedDiskType,
}
var DiskformatValidOptions = generateDiskFormatValidOptions()
var ErrNoDiskUUIDFound = errors.New("No disk UUID found")
var ErrNoDiskIDFound = errors.New("No vSphere disk ID found")
var ErrNoDevicesFound = errors.New("No devices found")
@ -126,12 +140,30 @@ type Volumes interface {
DiskIsAttached(volPath, nodeName string) (bool, error)
// CreateVolume creates a new vmdk with specified parameters.
CreateVolume(name string, size int, tags *map[string]string) (volumePath string, err error)
CreateVolume(volumeOptions *VolumeOptions) (volumePath string, err error)
// DeleteVolume deletes vmdk.
DeleteVolume(vmDiskPath string) error
}
// VolumeOptions specifies capacity, tags, name and diskFormat for a volume.
type VolumeOptions struct {
CapacityKB int
Tags map[string]string
Name string
DiskFormat string
}
// Generates Valid Options for Diskformat
func generateDiskFormatValidOptions() string {
validopts := ""
for diskformat := range diskFormatValidType {
validopts += (diskformat + ", ")
}
validopts = strings.TrimSuffix(validopts, ", ")
return validopts
}
// Parses vSphere cloud config file and stores it into VSphereConfig.
func readConfig(config io.Reader) (VSphereConfig, error) {
if config == nil {
@ -1064,7 +1096,22 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName string) error {
}
// CreateVolume creates a volume of given size (in KiB).
func (vs *VSphere) CreateVolume(name string, size int, tags *map[string]string) (volumePath string, err error) {
func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string, err error) {
var diskFormat string
// Default diskformat as 'thin'
if volumeOptions.DiskFormat == "" {
volumeOptions.DiskFormat = ThinDiskType
}
if _, ok := diskFormatValidType[volumeOptions.DiskFormat]; !ok {
return "", fmt.Errorf("Cannot create disk. Error diskformat %+q."+
" Valid options are %s.", volumeOptions.DiskFormat, DiskformatValidOptions)
}
diskFormat = diskFormatValidType[volumeOptions.DiskFormat]
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -1089,13 +1136,6 @@ func (vs *VSphere) CreateVolume(name string, size int, tags *map[string]string)
return "", err
}
if (*tags)["adapterType"] == "" {
(*tags)["adapterType"] = LSILogicControllerType
}
if (*tags)["diskType"] == "" {
(*tags)["diskType"] = ThinDiskType
}
// vmdks will be created inside kubevols directory
kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/"
err = makeDirectoryInDatastore(c, dc, kubeVolsPath, false)
@ -1103,18 +1143,19 @@ func (vs *VSphere) CreateVolume(name string, size int, tags *map[string]string)
glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err)
return "", err
}
glog.V(4).Infof("Created dir with path as %+q", kubeVolsPath)
vmDiskPath := kubeVolsPath + name + ".vmdk"
vmDiskPath := kubeVolsPath + volumeOptions.Name + ".vmdk"
// Create a virtual disk manager
virtualDiskManager := object.NewVirtualDiskManager(c.Client)
// Create specification for new virtual disk
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
VirtualDiskSpec: types.VirtualDiskSpec{
AdapterType: (*tags)["adapterType"],
DiskType: (*tags)["diskType"],
AdapterType: LSILogicControllerType,
DiskType: diskFormat,
},
CapacityKb: int64(size),
CapacityKb: int64(volumeOptions.CapacityKB),
}
// Create virtual disk

View File

@ -222,12 +222,13 @@ func TestVolumes(t *testing.T) {
t.Fatalf("Instances.List() returned zero servers")
}
tags := map[string]string{
"adapterType": "lsiLogic",
"diskType": "thin",
}
volumeOptions := &VolumeOptions{
CapacityKB: 1 * 1024 * 1024,
Tags: nil,
Name: "kubernetes-test-volume-" + rand.String(10),
DiskFormat: "thin"}
volPath, err := vs.CreateVolume("kubernetes-test-volume-"+rand.String(10), 1*1024*1024, &tags)
volPath, err := vs.CreateVolume(volumeOptions)
if err != nil {
t.Fatalf("Cannot create a new VMDK volume: %v", err)
}

View File

@ -21,6 +21,7 @@ import (
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@ -305,7 +306,7 @@ func (testcase *testcase) DiskIsAttached(diskName, hostName string) (bool, error
return expected.isAttached, expected.ret
}
func (testcase *testcase) CreateVolume(name string, size int, tags *map[string]string) (volumePath string, err error) {
func (testcase *testcase) CreateVolume(volumeOptions *vsphere.VolumeOptions) (volumePath string, err error) {
return "", errors.New("Not implemented")
}

View File

@ -61,7 +61,29 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPa
// vSphere works with kilobytes, convert to KiB with rounding up
volSizeKB := int(volume.RoundUpSize(volSizeBytes, 1024))
name := volume.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255)
vmDiskPath, err = cloud.CreateVolume(name, volSizeKB, v.options.CloudTags)
volumeOptions := &vsphere.VolumeOptions{
CapacityKB: volSizeKB,
Tags: *v.options.CloudTags,
Name: name,
}
// Apply Parameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for parameter, value := range v.options.Parameters {
switch strings.ToLower(parameter) {
case "diskformat":
volumeOptions.DiskFormat = value
default:
return "", 0, fmt.Errorf("invalid option %q for volume plugin %s", parameter, v.plugin.GetPluginName())
}
}
// TODO: implement v.options.ProvisionerSelector parsing
if v.options.Selector != nil {
return "", 0, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on vSphere")
}
vmDiskPath, err = cloud.CreateVolume(volumeOptions)
if err != nil {
glog.V(2).Infof("Error creating vsphere volume: %v", err)
return "", 0, err