Merge pull request #66464 from wongma7/round-overflow

Automatic merge from submit-queue (batch tested with PRs 66464, 66488). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Avoid overflowing int64 in RoundUpSize and return error if overflow int

**What this PR does / why we need it**:
There are many places in plugins (some I may have missed) that we naively convert a resource.Quantity.Value() which is an int64, to an int, which may be only 32 bits long.

Background, optional to read :): Kubernetes canonicalizes resource.Quantities, and from what I have seen testing creating PVCs, decimalSI is the default. If a quantity is in `decimalSI` format and its value in bytes would overflow an int64, e.g. `10E`, nothing happens. If it is in binarySI and its value in bytes would overflow an int64, e.g. `10Ei`, it is set down to 2^63-1 and there's no overflow of the field value. But there may be overflow later in the code which is what this PR is addressing.

* Change `RoundUpSize` implementation to avoid overflowing `int64`
* Add `RoundUp*Int` functions for use when an `int` is expected instead of an `int64`, because `int` may be 32bits and naively doing `int($INT64_VALUE)` can lead to silent overflow. These functions return an error if overflow has occurred.
* Rename `*GB` variables to `*GiB` where appropriate for maximum clarity
* Use `RoundUpToGiB` instead of `RoundUpSize` where possible

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #

**Special notes for your reviewer**: please review carefully as we don't have e2e tests for most plugins!

**Release note**:

```release-note
NONE
```
edit: remove 'we do not need to worry about...'. yes we do, i worded that badly :))
pull/8/head
Kubernetes Submit Queue 2018-07-24 19:03:01 -07:00 committed by GitHub
commit 35c3764bbb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 94 additions and 36 deletions

View File

@ -2510,9 +2510,8 @@ func (c *Cloud) ResizeDisk(
descErr := fmt.Errorf("AWS.ResizeDisk Error describing volume %s with %v", diskName, err)
return oldSize, descErr
}
requestBytes := newSize.Value()
// AWS resizes in chunks of GiB (not GB)
requestGiB := volumeutil.RoundUpSize(requestBytes, 1024*1024*1024)
requestGiB := volumeutil.RoundUpToGiB(newSize)
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
// If disk already if of greater or equal size than requested we return

View File

@ -746,9 +746,8 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity,
return oldSize, err
}
requestBytes := newSize.Value()
// GCE resizes in chunks of GiBs
requestGIB := volumeutil.RoundUpSize(requestBytes, volumeutil.GIB)
requestGIB := volumeutil.RoundUpToGiB(newSize)
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGIB))
// If disk is already of size equal or greater than requested size, we simply return

View File

@ -411,13 +411,15 @@ func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, ne
return oldSize, fmt.Errorf("volume status is not available")
}
volSizeBytes := newSize.Value()
// Cinder works with gigabytes, convert to GiB with rounding up
volSizeGB := int(volumeutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGB))
volSizeGiB, err := volumeutil.RoundUpToGiBInt(newSize)
if err != nil {
return oldSize, err
}
newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGiB))
// if volume size equals to or greater than the newSize, return nil
if volume.Size >= volSizeGB {
if volume.Size >= volSizeGiB {
return newSizeQuant, nil
}
@ -426,7 +428,7 @@ func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, ne
return oldSize, err
}
err = volumes.expandVolume(volumeID, volSizeGB)
err = volumes.expandVolume(volumeID, volSizeGiB)
if err != nil {
return oldSize, err
}

View File

@ -83,11 +83,13 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.K
tags["Name"] = volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
// AWS works with gigabytes, convert to GiB with rounding up
requestGB := int(volumeutil.RoundUpSize(requestBytes, 1024*1024*1024))
requestGiB, err := volumeutil.RoundUpToGiBInt(capacity)
if err != nil {
return "", 0, nil, "", err
}
volumeOptions := &aws.VolumeOptions{
CapacityGB: requestGB,
CapacityGB: requestGiB,
Tags: tags,
PVCName: c.options.PVC.Name,
}
@ -147,7 +149,7 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.K
glog.Errorf("error building labels for new EBS volume %q: %v", name, err)
}
return name, int(requestGB), labels, fstype, nil
return name, requestGiB, labels, fstype, nil
}
// Returns the first path that exists, or empty string if none exist.

View File

@ -100,8 +100,10 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
// maxLength = 79 - (4 for ".vhd") = 75
name := util.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
requestGB := int(util.RoundUpSize(requestBytes, 1024*1024*1024))
requestGiB, err := util.RoundUpToGiBInt(capacity)
if err != nil {
return nil, err
}
for k, v := range p.options.Parameters {
switch strings.ToLower(k) {
@ -157,18 +159,18 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
if p.options.CloudTags != nil {
tags = *(p.options.CloudTags)
}
diskURI, err = diskController.CreateManagedDisk(name, skuName, resourceGroup, requestGB, tags)
diskURI, err = diskController.CreateManagedDisk(name, skuName, resourceGroup, requestGiB, tags)
if err != nil {
return nil, err
}
} else {
if kind == v1.AzureDedicatedBlobDisk {
_, diskURI, _, err = diskController.CreateVolume(name, account, storageAccountType, location, requestGB)
_, diskURI, _, err = diskController.CreateVolume(name, account, storageAccountType, location, requestGiB)
if err != nil {
return nil, err
}
} else {
diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB)
diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGiB)
if err != nil {
return nil, err
}
@ -196,7 +198,7 @@ func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
AccessModes: supportedModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)),
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGiB)),
},
VolumeMode: volumeMode,
PersistentVolumeSource: v1.PersistentVolumeSource{

View File

@ -169,9 +169,12 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string,
}
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// Cinder works with gigabytes, convert to GiB with rounding up
volSizeGB := int(volutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
volSizeGiB, err := volutil.RoundUpToGiBInt(capacity)
if err != nil {
return "", 0, nil, "", err
}
name := volutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters
vtype := ""
availability := ""
@ -208,7 +211,7 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string,
}
}
volumeID, volumeAZ, volumeRegion, IgnoreVolumeAZ, err := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
volumeID, volumeAZ, volumeRegion, IgnoreVolumeAZ, err := cloud.CreateVolume(name, volSizeGiB, vtype, availability, c.options.CloudTags)
if err != nil {
glog.V(2).Infof("Error creating cinder volume: %v", err)
return "", 0, nil, "", err
@ -221,7 +224,7 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string,
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion
}
return volumeID, volSizeGB, volumeLabels, fstype, nil
return volumeID, volSizeGiB, volumeLabels, fstype, nil
}
func probeAttachedVolume() error {

View File

@ -49,7 +49,7 @@ func (util *FlockerUtil) DeleteVolume(d *flockerVolumeDeleter) error {
return d.flockerClient.DeleteDataset(datasetUUID)
}
func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGB int, labels map[string]string, err error) {
func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGiB int, labels map[string]string, err error) {
if c.flockerClient == nil {
c.flockerClient, err = c.plugin.newFlockerClient("")
@ -74,7 +74,10 @@ func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
volumeSizeGB = int(volutil.RoundUpSize(requestBytes, 1024*1024*1024))
volumeSizeGiB, err = volutil.RoundUpToGiBInt(capacity)
if err != nil {
return
}
createOptions := &flockerapi.CreateDatasetOptions{
MaximumSize: requestBytes,

View File

@ -744,7 +744,10 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
// GlusterFS/heketi creates volumes in units of GiB.
sz := int(volutil.RoundUpToGiB(capacity))
sz, err := volutil.RoundUpToGiBInt(capacity)
if err != nil {
return nil, 0, "", err
}
glog.V(2).Infof("create volume of size %dGiB", sz)
if p.url == "" {

View File

@ -88,9 +88,11 @@ func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pd
}
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// PhotonController works with GB, convert to GB with rounding up
volSizeGB := int(volumeutil.RoundUpSize(volSizeBytes, 1024*1024*1024))
// PhotonController works with GiB, convert to GiB with rounding up
volSizeGB, err := volumeutil.RoundUpToGiBInt(capacity)
if err != nil {
return "", 0, "", err
}
name := volumeutil.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 255)
volumeOptions := &photon.VolumeOptions{
CapacityGB: volSizeGB,

View File

@ -35,7 +35,10 @@ type quobyteVolumeManager struct {
func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProvisioner, createQuota bool) (quobyte *v1.QuobyteVolumeSource, size int, err error) {
capacity := provisioner.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volumeSize := int(util.RoundUpSize(capacity.Value(), 1024*1024*1024))
volumeSize, err := util.RoundUpToGiBInt(capacity)
if err != nil {
return nil, 0, err
}
// Quobyte has the concept of Volumes which doen't have a specific size (they can grow unlimited)
// to simulate a size constraint we set here a Quota for logical space
volumeRequest := &quobyteapi.CreateVolumeRequest{

View File

@ -579,7 +579,10 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVo
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// Convert to MB that rbd defaults on.
sz := int(volutil.RoundUpSize(volSizeBytes, 1024*1024))
sz, err := volutil.RoundUpSizeInt(volSizeBytes, 1024*1024)
if err != nil {
return nil, 0, err
}
volSz := fmt.Sprintf("%d", sz)
mon := util.kernelRBDMonitorsOpt(p.Mon)
if p.rbdMounter.imageFormat == rbdImageFormat2 {

View File

@ -602,7 +602,11 @@ func (c *storageosProvisioner) Provision(selectedNode *v1.Node, allowedTopologie
c.labels[k] = v
}
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
c.sizeGB = int(util.RoundUpSize(capacity.Value(), 1024*1024*1024))
var err error
c.sizeGB, err = util.RoundUpToGiBInt(capacity)
if err != nil {
return nil, err
}
apiCfg, err := parsePVSecret(adminSecretNamespace, adminSecretName, c.plugin.host.GetKubeClient())
if err != nil {

View File

@ -361,7 +361,11 @@ func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.Pers
// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2'
// (2 GiB is the smallest allocatable volume that can hold 1500MiB)
func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes
roundedUp := volumeSizeBytes / allocationUnitBytes
if volumeSizeBytes%allocationUnitBytes > 0 {
roundedUp += 1
}
return roundedUp
}
// RoundUpToGB rounds up given quantity to chunks of GB
@ -376,6 +380,32 @@ func RoundUpToGiB(size resource.Quantity) int64 {
return RoundUpSize(requestBytes, GIB)
}
// RoundUpSizeInt calculates how many allocation units are needed to accommodate
// a volume of given size. It returns an int instead of an int64 and an error if
// there's overflow
func RoundUpSizeInt(volumeSizeBytes int64, allocationUnitBytes int64) (int, error) {
roundedUp := RoundUpSize(volumeSizeBytes, allocationUnitBytes)
roundedUpInt := int(roundedUp)
if int64(roundedUpInt) != roundedUp {
return 0, fmt.Errorf("capacity %v is too great, casting results in integer overflow", roundedUp)
}
return roundedUpInt, nil
}
// RoundUpToGBInt rounds up given quantity to chunks of GB. It returns an
// int instead of an int64 and an error if there's overflow
func RoundUpToGBInt(size resource.Quantity) (int, error) {
requestBytes := size.Value()
return RoundUpSizeInt(requestBytes, GB)
}
// RoundUpToGiBInt rounds up given quantity upto chunks of GiB. It returns an
// int instead of an int64 and an error if there's overflow
func RoundUpToGiBInt(size resource.Quantity) (int, error) {
requestBytes := size.Value()
return RoundUpSizeInt(requestBytes, GIB)
}
// GenerateVolumeName returns a PV name with clusterName prefix. The function
// should be used to generate a name of GCE PD or Cinder volume. It basically
// adds "<clusterName>-dynamic-" before the PV name, making sure the resulting

View File

@ -93,10 +93,13 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec
capacity := v.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volSizeBytes := capacity.Value()
// vSphere works with kilobytes, convert to KiB with rounding up
volSizeKB := int(volumeutil.RoundUpSize(volSizeBytes, 1024))
volSizeKiB, err := volumeutil.RoundUpSizeInt(volSizeBytes, 1024)
if err != nil {
return nil, err
}
name := volumeutil.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255)
volumeOptions := &vclib.VolumeOptions{
CapacityKB: volSizeKB,
CapacityKB: volSizeKiB,
Tags: *v.options.CloudTags,
Name: name,
}
@ -146,7 +149,7 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec
}
volSpec = &VolumeSpec{
Path: vmDiskPath,
Size: volSizeKB,
Size: volSizeKiB,
Fstype: fstype,
StoragePolicyName: volumeOptions.StoragePolicyName,
StoragePolicyID: volumeOptions.StoragePolicyID,