Merge pull request #60165 from verult/repd-beta

Automatic merge from submit-queue (batch tested with PRs 59286, 59743, 59883, 60190, 60165). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Updating GCE PD StorageClass parameters

**What this PR does / why we need it**: New parameter to improve support of multi-zone PDs.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: partially addresses #59988

/cc @msau42 @saad-ali
pull/6/head
Kubernetes Submit Queue 2018-02-23 20:09:41 -08:00 committed by GitHub
commit 976fdde91b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 54 additions and 40 deletions

View File

@ -41,11 +41,13 @@ const (
diskPartitionSuffix = "-part" diskPartitionSuffix = "-part"
diskSDPath = "/dev/sd" diskSDPath = "/dev/sd"
diskSDPattern = "/dev/sd*" diskSDPattern = "/dev/sd*"
regionalPDZonesAuto = "auto" // "replica-zones: auto" means Kubernetes will select zones for RePD
maxChecks = 60
maxRetries = 10 maxRetries = 10
checkSleepDuration = time.Second checkSleepDuration = time.Second
maxRegionalPDZones = 2 maxRegionalPDZones = 2
// Replication type constants must be lower case.
replicationTypeNone = "none"
replicationTypeRegionalPD = "regional-pd"
) )
// These variables are modified only in unit tests and should be constant // These variables are modified only in unit tests and should be constant
@ -85,15 +87,16 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
// GCE PDs are allocated in chunks of GBs (not GiBs) // GCE PDs are allocated in chunks of GBs (not GiBs)
requestGB := volume.RoundUpToGB(capacity) requestGB := volume.RoundUpToGB(capacity)
// Apply Parameters (case-insensitive). We leave validation of // Apply Parameters.
// the values to the cloud provider. // Values for parameter "replication-type" are canonicalized to lower case.
// Values for other parameters are case-insensitive, and we leave validation of these values
// to the cloud provider.
diskType := "" diskType := ""
configuredZone := "" configuredZone := ""
configuredZones := "" configuredZones := ""
configuredReplicaZones := ""
zonePresent := false zonePresent := false
zonesPresent := false zonesPresent := false
replicaZonesPresent := false replicationType := replicationTypeNone
fstype := "" fstype := ""
for k, v := range c.options.Parameters { for k, v := range c.options.Parameters {
switch strings.ToLower(k) { switch strings.ToLower(k) {
@ -105,9 +108,8 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
case "zones": case "zones":
zonesPresent = true zonesPresent = true
configuredZones = v configuredZones = v
case "replica-zones": case "replication-type":
replicaZonesPresent = true replicationType = strings.ToLower(v)
configuredReplicaZones = v
case volume.VolumeParameterFSType: case volume.VolumeParameterFSType:
fstype = v fstype = v
default: default:
@ -115,10 +117,14 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
} }
} }
if ((zonePresent || zonesPresent) && replicaZonesPresent) || if zonePresent && zonesPresent {
(zonePresent && zonesPresent) { return "", 0, nil, "", fmt.Errorf("the 'zone' and 'zones' StorageClass parameters must not be used at the same time")
// 011, 101, 111, 110 }
return "", 0, nil, "", fmt.Errorf("a combination of zone, zones, and replica-zones StorageClass parameters must not be used at the same time")
if replicationType == replicationTypeRegionalPD && zonePresent {
// If a user accidentally types 'zone' instead of 'zones', we want to throw an error
// instead of assuming that 'zones' is empty and proceed by randomly selecting zones.
return "", 0, nil, "", fmt.Errorf("the '%s' replication type does not support the 'zone' parameter; use 'zones' instead", replicationTypeRegionalPD)
} }
// TODO: implement PVC.Selector parsing // TODO: implement PVC.Selector parsing
@ -126,18 +132,13 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
return "", 0, nil, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on GCE") return "", 0, nil, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on GCE")
} }
if !zonePresent && !zonesPresent && replicaZonesPresent { switch replicationType {
// 001 - "replica-zones" specified case replicationTypeRegionalPD:
replicaZones, err := volumeutil.ZonesToSet(configuredReplicaZones)
if err != nil {
return "", 0, nil, "", err
}
err = createRegionalPD( err = createRegionalPD(
name, name,
c.options.PVC.Name, c.options.PVC.Name,
diskType, diskType,
replicaZones, configuredZones,
requestGB, requestGB,
c.options.CloudTags, c.options.CloudTags,
cloud) cloud)
@ -147,10 +148,11 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
} }
glog.V(2).Infof("Successfully created Regional GCE PD volume %s", name) glog.V(2).Infof("Successfully created Regional GCE PD volume %s", name)
} else {
case replicationTypeNone:
var zones sets.String var zones sets.String
if !zonePresent && !zonesPresent { if !zonePresent && !zonesPresent {
// 000 - neither "zone", "zones", or "replica-zones" specified // 00 - neither "zone" or "zones" specified
// Pick a zone randomly selected from all active zones where // Pick a zone randomly selected from all active zones where
// Kubernetes cluster has a node. // Kubernetes cluster has a node.
zones, err = cloud.GetAllCurrentZones() zones, err = cloud.GetAllCurrentZones()
@ -159,13 +161,13 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
return "", 0, nil, "", err return "", 0, nil, "", err
} }
} else if !zonePresent && zonesPresent { } else if !zonePresent && zonesPresent {
// 010 - "zones" specified // 01 - "zones" specified
// Pick a zone randomly selected from specified set. // Pick a zone randomly selected from specified set.
if zones, err = volumeutil.ZonesToSet(configuredZones); err != nil { if zones, err = volumeutil.ZonesToSet(configuredZones); err != nil {
return "", 0, nil, "", err return "", 0, nil, "", err
} }
} else if zonePresent && !zonesPresent { } else if zonePresent && !zonesPresent {
// 100 - "zone" specified // 10 - "zone" specified
// Use specified zone // Use specified zone
if err := volume.ValidateZone(configuredZone); err != nil { if err := volume.ValidateZone(configuredZone); err != nil {
return "", 0, nil, "", err return "", 0, nil, "", err
@ -186,6 +188,9 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
} }
glog.V(2).Infof("Successfully created single-zone GCE PD volume %s", name) glog.V(2).Infof("Successfully created single-zone GCE PD volume %s", name)
default:
return "", 0, nil, "", fmt.Errorf("replication-type of '%s' is not supported", replicationType)
} }
labels, err := cloud.GetAutoLabelsForPD(name, "" /* zone */) labels, err := cloud.GetAutoLabelsForPD(name, "" /* zone */)
@ -202,32 +207,41 @@ func createRegionalPD(
diskName string, diskName string,
pvcName string, pvcName string,
diskType string, diskType string,
replicaZones sets.String, zonesString string,
requestGB int64, requestGB int64,
cloudTags *map[string]string, cloudTags *map[string]string,
cloud *gcecloud.GCECloud) error { cloud *gcecloud.GCECloud) error {
autoZoneSelection := false var replicaZones sets.String
if replicaZones.Len() != maxRegionalPDZones { var err error
replicaZonesList := replicaZones.UnsortedList()
if replicaZones.Len() == 1 && replicaZonesList[0] == regionalPDZonesAuto { if zonesString == "" {
// User requested automatic zone selection. // Consider all zones
autoZoneSelection = true replicaZones, err = cloud.GetAllCurrentZones()
} else { if err != nil {
return fmt.Errorf( glog.V(2).Infof("error getting zone information from GCE: %v", err)
"replica-zones specifies %d zones. It must specify %d zones or the keyword \"auto\" to let Kubernetes select zones.", return err
replicaZones.Len(), }
maxRegionalPDZones) } else {
replicaZones, err = volumeutil.ZonesToSet(zonesString)
if err != nil {
return err
} }
} }
selectedReplicaZones := replicaZones zoneCount := replicaZones.Len()
if autoZoneSelection { var selectedReplicaZones sets.String
if zoneCount < maxRegionalPDZones {
return fmt.Errorf("cannot specify only %d zone(s) for Regional PDs.", zoneCount)
} else if zoneCount == maxRegionalPDZones {
selectedReplicaZones = replicaZones
} else {
// Must randomly select zones
selectedReplicaZones = volume.ChooseZonesForVolume( selectedReplicaZones = volume.ChooseZonesForVolume(
replicaZones, pvcName, maxRegionalPDZones) replicaZones, pvcName, maxRegionalPDZones)
} }
if err := cloud.CreateRegionalDisk( if err = cloud.CreateRegionalDisk(
diskName, diskName,
diskType, diskType,
selectedReplicaZones, selectedReplicaZones,